summaryrefslogtreecommitdiffstats
path: root/collections-debian-merged/ansible_collections/community/aws/tests/integration
diff options
context:
space:
mode:
Diffstat (limited to 'collections-debian-merged/ansible_collections/community/aws/tests/integration')
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/constraints.txt3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/requirements.txt5
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_acm/aliases4
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_acm/defaults/main.yml40
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_acm/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_acm/tasks/full_acm_test.yml401
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_acm/tasks/main.yml39
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_api_gateway/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_api_gateway/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_api_gateway/tasks/main.yml207
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_api_gateway/templates/minimal-swagger-api.yml.j233
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codebuild/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codebuild/defaults/main.yml10
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codebuild/files/codebuild_iam_trust_policy.json12
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codebuild/tasks/main.yml122
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codebuild/vars/main.yml0
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codecommit/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codecommit/tasks/main.yml134
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codepipeline/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codepipeline/defaults/main.yml12
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codepipeline/files/codepipeline_iam_trust_policy.json12
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codepipeline/tasks/main.yml156
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_config/aliases8
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_config/defaults/main.yaml4
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_config/files/config-trust-policy.json13
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_config/tasks/main.yaml405
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_config/templates/config-s3-policy.json.j223
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/defaults/main.yml33
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/files/eks-trust-policy.json12
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/tasks/botocore_lt_1.10.1.yml12
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/tasks/botocore_lt_1.12.38.yml13
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/tasks/full_test.yml247
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/tasks/main.yml66
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_elasticbeanstalk_app/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_elasticbeanstalk_app/defaults/main.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_elasticbeanstalk_app/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_elasticbeanstalk_app/tasks/main.yml156
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_glue_connection/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_glue_connection/tasks/main.yml87
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_inspector_target/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_inspector_target/defaults/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_inspector_target/tasks/main.yml96
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_kms/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_kms/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_kms/tasks/main.yml424
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_kms/templates/console-policy-no-key-rotation.j281
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_kms/templates/console-policy.j272
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_region_info/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_region_info/main.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_region_info/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_region_info/tasks/main.yml107
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_secret/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_secret/defaults/main.yaml5
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_secret/files/hello_world.zipbin0 -> 401 bytes
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_secret/files/secretsmanager-trust-policy.json19
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_secret/tasks/main.yaml253
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity/defaults/main.yaml4
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity/meta/main.yaml0
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity/tasks/assert_defaults.yaml23
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity/tasks/main.yaml648
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity_policy/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity_policy/defaults/main.yaml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity_policy/tasks/main.yaml334
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity_policy/templates/policy.json.j213
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/defaults/main.yaml9
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/tasks/active-rule-set-tests.yaml349
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/tasks/cleanup-lock.yaml15
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/tasks/inactive-rule-set-tests.yaml187
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/tasks/main.yaml36
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/tasks/obtain-lock-wrapper.yaml26
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/tasks/obtain-lock.yaml126
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ssm_parameter_store/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ssm_parameter_store/defaults/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ssm_parameter_store/tasks/main.yml132
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_step_functions_state_machine/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_step_functions_state_machine/defaults/main.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_step_functions_state_machine/files/alternative_state_machine.json15
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_step_functions_state_machine/files/state_machine.json10
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_step_functions_state_machine/files/state_machines_iam_trust_policy.json12
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_step_functions_state_machine/tasks/main.yml303
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_waf_web_acl/aliases6
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_waf_web_acl/tasks/main.yml1201
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_exports_info/aliases4
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_exports_info/defaults/main.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_exports_info/files/test_stack.yml24
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_exports_info/tasks/main.yml41
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/files/test_bucket_stack.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/files/test_modded_bucket_stack.yml9
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/playbooks/full_test.yml6
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/runme.sh8
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/tasks/main.yml190
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudfront_distribution/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudfront_distribution/defaults/main.yml49
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudfront_distribution/meta/main.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudfront_distribution/tasks/main.yml442
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/defaults/main.yml7
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/tasks/main.yml1425
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/templates/cloudwatch-assume-policy.j213
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/templates/cloudwatch-policy.j217
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/templates/kms-policy.j234
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/templates/s3-policy.j234
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/templates/sns-policy.j234
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudwatchlogs/aliases4
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudwatchlogs/defaults/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudwatchlogs/tasks/main.yml157
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection/aliases1
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection/test.sh10
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection/test_connection.yml43
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aliases7
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/README.md43
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/defaults/main.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/files/ec2-trust-policy.json13
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/tasks/debian.yml19
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/tasks/main.yml156
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/tasks/redhat.yml11
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/aws-env-vars.j24
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/ec2_linux_vars_to_delete.yml.j22
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/ec2_windows_vars_to_delete.yml.j22
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/iam_role_vars_to_delete.yml.j22
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/inventory-linux.aws_ssm.j212
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/inventory-windows.aws_ssm.j212
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/s3_vars_to_delete.yml.j22
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_teardown.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_teardown/README.md43
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_teardown/tasks/main.yml85
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/inventory.aws_ssm.template10
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/runme.sh42
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/dms_endpoint/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/dms_endpoint/tasks/main.yml139
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/dms_replication_subnet_group/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/dms_replication_subnet_group/defaults/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/dms_replication_subnet_group/files/dmsAssumeRolePolicyDocument.json12
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/dms_replication_subnet_group/tasks/main.yml192
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_asg/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_asg/defaults/main.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_asg/tasks/main.yml787
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_asg/vars/main.yml0
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_eip/aliases4
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_eip/defaults/main.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_eip/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_eip/tasks/main.yml727
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/inventory18
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/main.yml43
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/meta/main.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/defaults/main.yml14
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/files/assume-role-policy.json13
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/meta/main.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/block_devices.yml82
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/checkmode_tests.yml201
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/cpu_options.yml86
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/default_vpc_tests.yml57
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/ebs_optimized.yml41
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/env_cleanup.yml104
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/env_setup.yml95
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/external_resource_attach.yml129
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/find_ami.yml15
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/iam_instance_role.yml127
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/instance_no_wait.yml68
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/main.yml55
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/security_group.yml81
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/tags_and_vpc_settings.yml158
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/termination_protection.yml184
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/version_fail.yml29
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/version_fail_wrapper.yml30
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/runme.sh12
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/full_test.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/defaults/main.yml18
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/files/assume-role-policy.json13
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/cpu_options.yml38
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/iam_instance_role.yml104
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/main.yml24
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/tags_and_vpc_settings.yml208
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/versions.yml62
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/version_fail.yml36
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/runme.sh15
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/defaults/main.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/tasks/env_cleanup.yml94
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/tasks/env_setup.yml62
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/tasks/main.yml232
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/vars/main.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_scaling_policy/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_scaling_policy/defaults/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_scaling_policy/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_scaling_policy/tasks/main.yml215
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_transit_gateway/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_transit_gateway/tasks/main.yml175
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_egress_igw/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_egress_igw/tasks/main.yml116
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_igw/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_igw/defaults/main.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_igw/tasks/main.yml429
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/aliases5
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/ingress_and_egress.yml162
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/ipv6.yml178
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/main.yml172
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/subnet_ids.yml174
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/subnet_names.yml140
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/tags.yml117
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nat_gateway/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml82
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_route_table/aliases4
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_route_table/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_route_table/tasks/main.yml761
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_vgw/aliases4
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_vgw/tasks/main.yml174
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_vpn_info/aliases4
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_vpn_info/tasks/main.yml127
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/aliases6
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/defaults/main.yml38
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/files/ec2-trust-policy.json13
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/files/ecs-trust-policy.json16
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/full_test.yml1172
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/main.yml53
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/network_assign_public_ip_fail.yml123
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/network_fail.yml216
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/network_force_new_deployment.yml124
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/network_force_new_deployment_fail.yml125
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_ecr/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_ecr/defaults/main.yml22
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_ecr/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_ecr/tasks/main.yml541
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_tag/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_tag/tasks/main.yml322
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/efs/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/efs/playbooks/full_test.yml9
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/efs/playbooks/roles/efs/tasks/main.yml331
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/efs/playbooks/version_fail.yml32
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/aws/tests/integration/targets/efs/runme.sh15
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/defaults/main.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/full_test.yml283
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/main.yml44
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/multiple_actions_fail.yml277
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_alb_bad_listener_options.yml71
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_alb_tags.yml93
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_alb_with_asg.yml89
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_creating_alb.yml52
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_deleting_alb.yml52
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_modifying_alb_listeners.yml240
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_multiple_actions.yml467
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_multiple_actions_fail.yml53
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_classic_lb/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_classic_lb/defaults/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_classic_lb/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_classic_lb/tasks/main.yml425
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_classic_lb/vars/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/defaults/main.yml7
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/files/cert.pem32
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/files/key.pem52
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/main.yml252
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_creating_nlb.yml82
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_deleting_nlb.yml50
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_modifying_nlb_listeners.yml88
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_nlb_bad_listener_options.yml72
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_nlb_tags.yml101
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_nlb_with_asg.yml90
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/full_test.yml7
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/roles/elb_lambda_target/defaults/main.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/roles/elb_lambda_target/files/ansible_lambda_target.py10
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/roles/elb_lambda_target/files/assume-role.json8
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/roles/elb_lambda_target/tasks/main.yml110
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/roles/elb_target/defaults/main.yml10
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/roles/elb_target/tasks/main.yml484
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/version_fail.yml41
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/runme.sh13
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target_info/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target_info/playbooks/full_test.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target_info/playbooks/roles/elb_target_info/defaults/main.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target_info/playbooks/roles/elb_target_info/tasks/main.yml507
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target_info/runme.sh5
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_group/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_group/defaults/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_group/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_group/tasks/main.yml127
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_password_policy/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_password_policy/tasks/main.yaml107
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/defaults/main.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/files/no_access.json10
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/files/no_access_with_id.json11
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/files/no_access_with_second_id.json11
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/files/no_trust.json10
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/tasks/main.yml99
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/tasks/object.yml1065
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/defaults/main.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/files/deny-all-a.json13
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/files/deny-all-b.json13
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/files/deny-all.json12
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/files/deny-assume.json10
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/main.yml1521
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_saml_federation/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_saml_federation/files/example1.xml22
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_saml_federation/files/example2.xml22
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_saml_federation/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_saml_federation/tasks/main.yml89
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_user/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_user/defaults/main.yml7
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_user/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_user/tasks/main.yml482
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/kinesis_stream/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/kinesis_stream/defaults/main.yml18
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/kinesis_stream/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/kinesis_stream/tasks/main.yml713
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/kinesis_stream/tasks/test_encryption.yml262
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda/aliases4
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda/defaults/main.yml10
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda/files/mini_lambda.py48
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda/files/minimal_trust_policy.json12
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda/tasks/main.yml433
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/defaults/main.yml10
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/files/mini_http_lambda.py40
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/files/minimal_trust_policy.json12
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/tasks/main.yml176
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/templates/endpoint-test-swagger-api.yml.j239
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lightsail/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lightsail/defaults/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lightsail/tasks/main.yml122
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/prepare_tests/tasks/main.yml0
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/defaults/main.yml23
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/credential_tests.yml36
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/main.yml29
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_aurora.yml144
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_bad_options.yml41
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_encryption.yml53
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_final_snapshot.yml75
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_modification.yml195
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_processor_features.yml126
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_read_replica.yml162
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_restore_instance.yml95
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_snapshot.yml85
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_states.yml277
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_tags.yml265
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_vpc_security_groups.yml169
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_param_group/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_param_group/defaults/main.yml30
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_param_group/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_param_group/tasks/main.yml321
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_subnet_group/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_subnet_group/defaults/main.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_subnet_group/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_subnet_group/tasks/main.yml115
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_subnet_group/tasks/params.yml30
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_subnet_group/tasks/tests.yml221
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/redshift/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/redshift/defaults/main.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/redshift/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/redshift/tasks/main.yml357
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/route53/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/route53/defaults/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/route53/tasks/main.yml252
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/route53/vars/main.yml0
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/route53_zone/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/route53_zone/tasks/main.yml396
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/defaults/main.yml10
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/files/lambda-trust-policy.json12
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/files/mini_lambda.py13
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/tasks/main.yml326
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_lifecycle/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_lifecycle/tasks/main.yml435
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_logging/aliases4
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_logging/defaults/main.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_logging/tasks/main.yml205
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_metrics_configuration/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_metrics_configuration/defaults/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_metrics_configuration/tasks/main.yml243
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_metrics_configuration/tasks/s3_metrics_info.yml16
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_sync/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_sync/files/test1.txt1
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_sync/files/test2.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_sync/files/test3.json3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_sync/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_sync/tasks/main.yml108
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/script_inventory_ec2/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/script_inventory_ec2/ec2.sh5
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/script_inventory_ec2/inventory_diff.py67
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/script_inventory_ec2/runme.sh151
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_ec2/defaults/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_ec2/tasks/common.yml119
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_ec2/vars/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml11
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml10
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows.yml10
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns/defaults/main.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns/tasks/main.yml50
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/defaults/main.yml15
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/files/lambda-trust-policy.json12
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/files/sns_topic_lambda/sns_topic_lambda.py9
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/tasks/main.yml303
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/templates/initial-policy.json20
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/templates/updated-policy.json20
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sqs_queue/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sqs_queue/defaults/main.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sqs_queue/tasks/main.yml106
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/tasks/main.yml384
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/templates/policy.json.j212
430 files changed, 36673 insertions, 0 deletions
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/constraints.txt b/collections-debian-merged/ansible_collections/community/aws/tests/integration/constraints.txt
new file mode 100644
index 00000000..c105f290
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/constraints.txt
@@ -0,0 +1,3 @@
+boto3 >= 1.9.250, <= 1.15.18 # minimum version that supports botocore 1.13.3, max that will work with ansible 2.9's other constraints
+botocore<1.19.0,>=1.13.3 # adds support for ECR image scanning
+
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/requirements.txt b/collections-debian-merged/ansible_collections/community/aws/tests/integration/requirements.txt
new file mode 100644
index 00000000..2fb8f547
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/requirements.txt
@@ -0,0 +1,5 @@
+# netaddr is needed for ansible.netcommon.ipv6
+netaddr
+virtualenv
+boto3
+botocore
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/__init__.py b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_acm/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_acm/aliases
new file mode 100644
index 00000000..c5a973f8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_acm/aliases
@@ -0,0 +1,4 @@
+cloud/aws
+aws_acm_info
+shippable/aws/group2
+unstable
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_acm/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_acm/defaults/main.yml
new file mode 100644
index 00000000..5d3648f8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_acm/defaults/main.yml
@@ -0,0 +1,40 @@
+---
+# we'll generate 3 certificates locally for the test
+# Upload the first
+# overwrite it with the second
+# and the third is unrelated, to check we only get info about the first when we want
+local_certs:
+ - priv_key: "{{ remote_tmp_dir }}/private-1.pem"
+ cert: "{{ remote_tmp_dir }}/public-1.pem"
+ csr: "{{ remote_tmp_dir }}/csr-1.csr"
+ domain: "acm1.{{ aws_acm_test_uuid }}.ansible.com"
+ name: "{{ resource_prefix }}_{{ aws_acm_test_uuid }}_1"
+
+ - priv_key: "{{ remote_tmp_dir }}/private-2.pem"
+ cert: "{{ remote_tmp_dir }}/public-2.pem"
+ csr: "{{ remote_tmp_dir }}/csr-2.csr"
+ domain: "acm2.{{ aws_acm_test_uuid }}.ansible.com"
+ name: "{{ resource_prefix }}_{{ aws_acm_test_uuid }}_2"
+
+ - priv_key: "{{ remote_tmp_dir }}/private-3.pem"
+ cert: "{{ remote_tmp_dir }}/public-3.pem"
+ csr: "{{ remote_tmp_dir }}/csr-3.csr"
+ domain: "acm3.{{ aws_acm_test_uuid }}.ansible.com"
+ name: "{{ resource_prefix }}_{{ aws_acm_test_uuid }}_3"
+
+# we'll have one private key
+# make 2 chains using it
+# so we can test what happens when you change just the chain
+# not the domain or key
+chained_cert:
+ priv_key: "{{ remote_tmp_dir }}/private-ch-0.pem"
+ domain: "acm-ch.{{ aws_acm_test_uuid }}.ansible.com"
+ name: "{{ resource_prefix }}_{{ aws_acm_test_uuid }}_4"
+ chains:
+ - cert: "{{ remote_tmp_dir }}/public-ch-0.pem"
+ csr: "{{ remote_tmp_dir }}/csr-ch-0.csr"
+ ca: 0 # index into local_certs
+ - cert: "{{ remote_tmp_dir }}/public-ch-1.pem"
+ csr: "{{ remote_tmp_dir }}/csr-ch-1.csr"
+ ca: 1 # index into local_certs
+ \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_acm/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_acm/meta/main.yml
new file mode 100644
index 00000000..1810d4be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_acm/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_acm/tasks/full_acm_test.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_acm/tasks/full_acm_test.yml
new file mode 100644
index 00000000..88c24356
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_acm/tasks/full_acm_test.yml
@@ -0,0 +1,401 @@
+- name: AWS ACM integration test
+ module_defaults:
+ group/aws:
+ aws_region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ block:
+ - name: list certs
+ aws_acm_info: null
+ register: list_all
+ failed_when: list_all.certificates is not defined
+ - name: ensure absent cert which doesn't exist - first time
+ aws_acm:
+ name_tag: '{{ item.name }}'
+ state: absent
+ with_items: '{{ local_certs }}'
+ - name: ensure absent cert which doesn't exist - second time
+ aws_acm:
+ name_tag: '{{ item.name }}'
+ state: absent
+ with_items: '{{ local_certs }}'
+ register: absent_start_two
+ failed_when: absent_start_two.changed
+ - name: list cert which shouldn't exist
+ aws_acm_info:
+ tags:
+ Name: '{{ item.name }}'
+ register: list_tag
+ with_items: '{{ local_certs }}'
+ failed_when: list_tag.certificates | length > 0
+ - name: check directory was made
+ assert:
+ that:
+ - remote_tmp_dir is defined
+ - name: Generate private key for local certs
+ with_items: '{{ local_certs }}'
+ community.crypto.openssl_privatekey:
+ path: '{{ item.priv_key }}'
+ type: RSA
+ size: 2048
+ - name: Generate an OpenSSL Certificate Signing Request for own certs
+ with_items: '{{ local_certs }}'
+ community.crypto.openssl_csr:
+ path: '{{ item.csr }}'
+ privatekey_path: '{{ item.priv_key }}'
+ common_name: '{{ item.domain }}'
+ - name: Generate a Self Signed OpenSSL certificate for own certs
+ with_items: '{{ local_certs }}'
+ community.crypto.openssl_certificate:
+ provider: selfsigned
+ path: '{{ item.cert }}'
+ csr_path: '{{ item.csr }}'
+ privatekey_path: '{{ item.priv_key }}'
+ signature_algorithms:
+ - sha256WithRSAEncryption
+ - name: upload certificates first time
+ aws_acm:
+ name_tag: '{{ item.name }}'
+ certificate: '{{ lookup(''file'', item.cert ) }}'
+ private_key: '{{ lookup(''file'', item.priv_key ) }}'
+ state: present
+ register: upload
+ with_items: '{{ local_certs }}'
+ until: upload is succeeded
+ retries: 5
+ delay: 10
+ - assert:
+ that:
+ - prev_task.certificate.arn is defined
+ - ('arn:aws:acm:123' | regex_search( 'arn:aws:acm:' )) is defined
+ - (prev_task.certificate.arn | regex_search( 'arn:aws:acm:' )) is defined
+ - prev_task.certificate.domain_name == original_cert.domain
+ - prev_task.changed
+ with_items: '{{ upload.results }}'
+ vars:
+ original_cert: '{{ item.item }}'
+ prev_task: '{{ item }}'
+ - name: fetch data about cert just uploaded, by ARN
+ aws_acm_info:
+ certificate_arn: '{{ item.certificate.arn }}'
+ register: fetch_after_up
+ with_items: '{{ upload.results }}'
+ - name: check output of prior task (fetch data about cert just uploaded, by ARN)
+ assert:
+ that:
+ - fetch_after_up_result.certificates | length == 1
+ - fetch_after_up_result.certificates[0].certificate_arn == upload_result.certificate.arn
+ - fetch_after_up_result.certificates[0].domain_name == original_cert.domain
+ - (fetch_after_up_result.certificates[0].certificate | replace( ' ', '' ) | replace( '\n', '')) == (lookup( 'file', original_cert.cert ) | replace( ' ', '' ) | replace( '\n', '' ))
+ - '''Name'' in fetch_after_up_result.certificates[0].tags'
+ - fetch_after_up_result.certificates[0].tags['Name'] == original_cert.name
+ with_items: '{{ fetch_after_up.results }}'
+ vars:
+ fetch_after_up_result: '{{ item }}'
+ upload_result: '{{ item.item }}'
+ original_cert: '{{ item.item.item }}'
+ - name: fetch data about cert just uploaded, by name
+ aws_acm_info:
+ tags:
+ Name: '{{ original_cert.name }}'
+ register: fetch_after_up_name
+ with_items: '{{ upload.results }}'
+ vars:
+ upload_result: '{{ item }}'
+ original_cert: '{{ item.item }}'
+ - name: check fetched data of cert we just uploaded
+ assert:
+ that:
+ - fetch_after_up_name_result.certificates | length == 1
+ - fetch_after_up_name_result.certificates[0].certificate_arn == upload_result.certificate.arn
+ - fetch_after_up_name_result.certificates[0].domain_name == original_cert.domain
+ - (fetch_after_up_name_result.certificates[0].certificate | replace( ' ', '' ) | replace( '\n', '')) == (lookup('file', original_cert.cert ) | replace( ' ', '' ) | replace( '\n', ''))
+ - '''Name'' in fetch_after_up_name_result.certificates[0].tags'
+ - fetch_after_up_name_result.certificates[0].tags['Name'] == original_cert.name
+ with_items: '{{ fetch_after_up_name.results }}'
+ vars:
+ fetch_after_up_name_result: '{{ item }}'
+ upload_result: '{{ item.item }}'
+ original_cert: '{{ item.item.item }}'
+ - name: fetch data about cert just uploaded, by domain name
+ aws_acm_info:
+ domain_name: '{{ original_cert.domain }}'
+ register: fetch_after_up_domain
+ with_items: '{{ upload.results }}'
+ vars:
+ original_cert: '{{ item.item }}'
+ - name: compare fetched data of cert just uploaded to upload task
+ assert:
+ that:
+ - fetch_after_up_domain_result.certificates | length == 1
+ - fetch_after_up_domain_result.certificates[0].certificate_arn == upload_result.certificate.arn
+ - fetch_after_up_domain_result.certificates[0].domain_name == original_cert.domain
+ - (fetch_after_up_domain_result.certificates[0].certificate | replace( ' ', '' ) | replace( '\n', '')) == (lookup('file', original_cert.cert ) | replace( ' ', '' ) | replace( '\n', ''))
+ - '''Name'' in fetch_after_up_domain_result.certificates[0].tags'
+ - fetch_after_up_domain_result.certificates[0].tags['Name'] == original_cert.name
+ with_items: '{{ fetch_after_up_domain.results }}'
+ vars:
+ fetch_after_up_domain_result: '{{ item }}'
+ upload_result: '{{ item.item }}'
+ original_cert: '{{ item.item.item }}'
+ - name: upload certificates again, check not changed
+ aws_acm:
+ name_tag: '{{ item.name }}'
+ certificate: '{{ lookup(''file'', item.cert ) }}'
+ private_key: '{{ lookup(''file'', item.priv_key ) }}'
+ state: present
+ register: upload2
+ with_items: '{{ local_certs }}'
+ failed_when: upload2.changed
+ - name: update first cert with body of the second, first time
+ aws_acm:
+ state: present
+ name_tag: '{{ local_certs[0].name }}'
+ certificate: '{{ lookup(''file'', local_certs[1].cert ) }}'
+ private_key: '{{ lookup(''file'', local_certs[1].priv_key ) }}'
+ register: overwrite
+ - name: check output of previous task (update first cert with body of the second, first time)
+ assert:
+ that:
+ - overwrite.certificate.arn is defined
+ - overwrite.certificate.arn | regex_search( 'arn:aws:acm:' ) is defined
+ - overwrite.certificate.arn == upload.results[0].certificate.arn
+ - overwrite.certificate.domain_name == local_certs[1].domain
+ - overwrite.changed
+ - name: check update was sucessfull
+ aws_acm_info:
+ tags:
+ Name: '{{ local_certs[0].name }}'
+ register: fetch_after_overwrite
+ - name: check output of update fetch
+ assert:
+ that:
+ - fetch_after_overwrite.certificates | length == 1
+ - fetch_after_overwrite.certificates[0].certificate_arn == fetch_after_up.results[0].certificates[0].certificate_arn
+ - fetch_after_overwrite.certificates[0].domain_name == local_certs[1].domain
+ - (fetch_after_overwrite.certificates[0].certificate | replace( ' ', '' ) | replace( '\n', '')) == (lookup('file', local_certs[1].cert )| replace( ' ', '' ) | replace( '\n', ''))
+ - '''Name'' in fetch_after_overwrite.certificates[0].tags'
+ - fetch_after_overwrite.certificates[0].tags['Name'] == local_certs[0].name
+ - name: fetch other cert
+ aws_acm_info:
+ tags:
+ Name: '{{ local_certs[1].name }}'
+ register: check_after_overwrite
+ - name: check other cert unaffected
+ assert:
+ that:
+ - check_after_overwrite.certificates | length == 1
+ - check_after_overwrite.certificates[0].certificate_arn == fetch_after_up.results[1].certificates[0].certificate_arn
+ - check_after_overwrite.certificates[0].domain_name == local_certs[1].domain
+ - (check_after_overwrite.certificates[0].certificate | replace( ' ', '' ) | replace( '\n', '')) == (lookup('file', local_certs[1].cert ) | replace( ' ', '' ) | replace( '\n', ''))
+ - '''Name'' in check_after_overwrite.certificates[0].tags'
+ - check_after_overwrite.certificates[0].tags['Name'] == local_certs[1].name
+ - name: update first cert with body of the second again
+ aws_acm:
+ state: present
+ name_tag: '{{ local_certs[0].name }}'
+ certificate: '{{ lookup(''file'', local_certs[1].cert ) }}'
+ private_key: '{{ lookup(''file'', local_certs[1].priv_key ) }}'
+ register: overwrite2
+ - name: check output of previous task (update first cert with body of the second again)
+ assert:
+ that:
+ - overwrite2.certificate.arn is defined
+ - overwrite2.certificate.arn | regex_search( 'arn:aws:acm:' ) is defined
+ - overwrite2.certificate.arn == upload.results[0].certificate.arn
+ - overwrite2.certificate.domain_name == local_certs[1].domain
+ - not overwrite2.changed
+ - name: delete certs 1 and 2
+ aws_acm:
+ state: absent
+ domain_name: '{{ local_certs[1].domain }}'
+ register: delete_both
+ - name: test prev task
+ assert:
+ that:
+ - delete_both.arns is defined
+ - check_after_overwrite.certificates[0].certificate_arn in delete_both.arns
+ - upload.results[0].certificate.arn in delete_both.arns
+ - delete_both.changed
+ - name: fetch info for certs 1 and 2
+ aws_acm_info:
+ tags:
+ Name: '{{ local_certs[item].name }}'
+ register: check_del_one
+ with_items:
+ - 0
+ - 1
+ retries: 2
+ until:
+ - check_del_one is not failed
+ - check_del_one.certificates | length == 0
+ delay: 10
+ - name: check certs 1 and 2 were already deleted
+ with_items: '{{ check_del_one.results }}'
+ assert:
+ that: item.certificates | length == 0
+ - name: check cert 3 not deleted
+ aws_acm_info:
+ tags:
+ Name: '{{ local_certs[2].name }}'
+ register: check_del_one_remain
+ failed_when: check_del_one_remain.certificates | length != 1
+ - name: delete cert 3
+ aws_acm:
+ state: absent
+ domain_name: '{{ local_certs[2].domain }}'
+ register: delete_third
+ - name: check cert 3 deletion went as expected
+ assert:
+ that:
+ - delete_third.arns is defined
+ - delete_third.arns | length == 1
+ - delete_third.arns[0] == upload.results[2].certificate.arn
+ - delete_third.changed
+ - name: check cert 3 was deleted
+ aws_acm_info:
+ tags:
+ Name: '{{ local_certs[2].name }}'
+ register: check_del_three
+ failed_when: check_del_three.certificates | length != 0
+ - name: delete cert 3 again
+ aws_acm:
+ state: absent
+ domain_name: '{{ local_certs[2].domain }}'
+ register: delete_third
+ - name: check deletion of cert 3 not changed, because already deleted
+ assert:
+ that:
+ - delete_third.arns is defined
+ - delete_third.arns | length == 0
+ - not delete_third.changed
+ - name: check directory was made
+ assert:
+ that:
+ - remote_tmp_dir is defined
+ - name: Generate private key for cert to be chained
+ community.crypto.openssl_privatekey:
+ path: '{{ chained_cert.priv_key }}'
+ type: RSA
+ size: 2048
+ - name: Generate two OpenSSL Certificate Signing Requests for cert to be chained
+ with_items: '{{ chained_cert.chains }}'
+ community.crypto.openssl_csr:
+ path: '{{ item.csr }}'
+ privatekey_path: '{{ chained_cert.priv_key }}'
+ common_name: '{{ chained_cert.domain }}'
+ - name: Sign new certs with cert 0 and 1
+ with_items: '{{ chained_cert.chains }}'
+ community.crypto.openssl_certificate:
+ provider: ownca
+ path: '{{ item.cert }}'
+ csr_path: '{{ item.csr }}'
+ ownca_path: '{{ local_certs[item.ca].cert }}'
+ ownca_privatekey_path: '{{ local_certs[item.ca].priv_key }}'
+ signature_algorithms:
+ - sha256WithRSAEncryption
+ - name: check files exist (for next task)
+ file:
+ path: '{{ item }}'
+ state: file
+ with_items:
+ - '{{ local_certs[chained_cert.chains[0].ca].cert }}'
+ - '{{ local_certs[chained_cert.chains[1].ca].cert }}'
+ - '{{ chained_cert.chains[0].cert }}'
+ - '{{ chained_cert.chains[1].cert }}'
+ - name: Find chains
+ with_items: '{{ chained_cert.chains }}'
+ register: chains
+ community.crypto.certificate_complete_chain:
+ input_chain: '{{ lookup(''file'', item.cert ) }}'
+ root_certificates:
+ - '{{ local_certs[item.ca].cert }}'
+ - name: upload chained cert, first chain, first time
+ aws_acm:
+ name_tag: '{{ chained_cert.name }}'
+ certificate: '{{ lookup(''file'', chained_cert.chains[0].cert ) }}'
+ certificate_chain: '{{ chains.results[0].complete_chain | join(''
+
+ '') }}'
+ private_key: '{{ lookup(''file'', chained_cert.priv_key ) }}'
+ state: present
+ register: upload_chain
+ failed_when: not upload_chain.changed
+ - name: fetch chain of cert we just uploaded
+ aws_acm_info:
+ tags:
+ Name: '{{ chained_cert.name }}'
+ register: check_chain
+ - name: check chain of cert we just uploaded
+ assert:
+ that:
+ - (check_chain.certificates[0].certificate_chain | replace( ' ', '' ) | replace( '\n', '')) == ( chains.results[0].complete_chain | join( '\n' ) | replace( ' ', '' ) | replace( '\n', '') )
+ - (check_chain.certificates[0].certificate | replace( ' ', '' ) | replace( '\n', '')) == ( lookup('file', chained_cert.chains[0].cert ) | replace( ' ', '' ) | replace( '\n', '') )
+ - name: upload chained cert again, check not changed
+ aws_acm:
+ name_tag: '{{ chained_cert.name }}'
+ certificate: '{{ lookup(''file'', chained_cert.chains[0].cert ) }}'
+ certificate_chain: '{{ chains.results[0].complete_chain | join(''
+
+ '') }}'
+ private_key: '{{ lookup(''file'', chained_cert.priv_key ) }}'
+ state: present
+ register: upload_chain_2
+ - name: check previous task not changed
+ assert:
+ that:
+ - upload_chain_2.certificate.arn == upload_chain.certificate.arn
+ - not upload_chain_2.changed
+ - name: upload chained cert, different chain
+ aws_acm:
+ name_tag: '{{ chained_cert.name }}'
+ certificate: '{{ lookup(''file'', chained_cert.chains[1].cert ) }}'
+ certificate_chain: '{{ chains.results[1].complete_chain | join(''
+
+ '') }}'
+ private_key: '{{ lookup(''file'', chained_cert.priv_key ) }}'
+ state: present
+ register: upload_chain_3
+ - name: check uploading with different chain is changed
+ assert:
+ that:
+ - upload_chain_3.changed
+ - upload_chain_3.certificate.arn == upload_chain.certificate.arn
+ - name: fetch info about chain of cert we just updated
+ aws_acm_info:
+ tags:
+ Name: '{{ chained_cert.name }}'
+ register: check_chain_2
+ - name: check chain of cert we just uploaded
+ assert:
+ that:
+ - (check_chain_2.certificates[0].certificate_chain | replace( ' ', '' ) | replace( '\n', '')) == ( chains.results[1].complete_chain | join( '\n' ) | replace( ' ', '' ) | replace( '\n', '') )
+ - (check_chain_2.certificates[0].certificate | replace( ' ', '' ) | replace( '\n', '')) == ( lookup('file', chained_cert.chains[1].cert ) | replace( ' ', '' ) | replace( '\n', '') )
+ - name: delete chained cert
+ aws_acm:
+ name_tag: '{{ chained_cert.name }}'
+ state: absent
+ register: delete_chain_3
+ - name: check deletion of chained cert 3 is changed
+ assert:
+ that:
+ - delete_chain_3.changed
+ - upload_chain.certificate.arn in delete_chain_3.arns
+ always:
+ - name: delete first bunch of certificates
+ aws_acm:
+ name_tag: '{{ item.name }}'
+ state: absent
+ with_items: '{{ local_certs }}'
+ ignore_errors: true
+ - name: delete chained cert
+ aws_acm:
+ state: absent
+ name_tag: '{{ chained_cert.name }}'
+ ignore_errors: true
+ - name: deleting local directory with test artefacts
+ file:
+ path: '{{ remote_tmp_dir }}'
+ state: directory
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_acm/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_acm/tasks/main.yml
new file mode 100644
index 00000000..cec5e92f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_acm/tasks/main.yml
@@ -0,0 +1,39 @@
+- name: AWS ACM integration test virtualenv wrapper
+ block:
+ - set_fact:
+ virtualenv: "{{ remote_tmp_dir }}/virtualenv"
+ virtualenv_command: "{{ ansible_python_interpreter }} -m virtualenv"
+
+ - set_fact:
+ virtualenv_interpreter: "{{ virtualenv }}/bin/python"
+
+ # The CI runs many of these tests in parallel
+ # Use this random ID to differentiate which resources
+ # are from which test
+ - set_fact:
+ aws_acm_test_uuid: "{{ (10**9) | random }}"
+
+ - pip:
+ name: virtualenv
+
+ - pip:
+ name:
+ - 'botocore<1.13.0,>=1.12.211'
+ - boto3
+ - coverage<5
+ - jinja2
+ - pyyaml
+ - 'pyopenssl>=0.15'
+ - 'cryptography>=1.6'
+ virtualenv: "{{ virtualenv }}"
+ virtualenv_command: "{{ virtualenv_command }}"
+ virtualenv_site_packages: no
+
+ - include_tasks: full_acm_test.yml
+ vars:
+ ansible_python_interpreter: "{{ virtualenv_interpreter }}"
+
+ always:
+ - file:
+ path: "{{ virtualenv }}"
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_api_gateway/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_api_gateway/aliases
new file mode 100644
index 00000000..6e3860be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_api_gateway/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_api_gateway/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_api_gateway/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_api_gateway/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_api_gateway/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_api_gateway/tasks/main.yml
new file mode 100644
index 00000000..5c6047c3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_api_gateway/tasks/main.yml
@@ -0,0 +1,207 @@
+- block:
+
+ # ====================== testing failure cases: ==================================
+
+ - name: test with no parameters
+ aws_api_gateway:
+ register: result
+ ignore_errors: true
+
+ - name: assert failure when called with no parameters
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("The aws_api_gateway module requires a region")'
+
+ - name: test with minimal parameters but no region
+ aws_api_gateway:
+ api_id: 'fake-api-doesnt-exist'
+ register: result
+ ignore_errors: true
+
+ - name: assert failure when called with with minimal parameters but no region
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("The aws_api_gateway module requires a region")'
+
+ - name: test for disallowing multiple swagger sources
+ aws_api_gateway:
+ api_id: 'fake-api-doesnt-exist'
+ region: '{{ec2_region}}'
+ swagger_file: foo.yml
+ swagger_text: "this is not really an API"
+ register: result
+ ignore_errors: true
+
+ - name: assert failure when called with with minimal parameters but no region
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("parameters are mutually exclusive")'
+
+
+ # ====================== regular testing: ===================================
+
+ - name: build API file
+ template:
+ src: minimal-swagger-api.yml.j2
+ dest: "{{output_dir}}/minimal-swagger-api.yml"
+
+ - name: deploy new API
+ aws_api_gateway:
+ api_file: "{{output_dir}}/minimal-swagger-api.yml"
+ stage: "minimal"
+ endpoint_type: 'REGIONAL'
+ state: present
+ region: '{{ec2_region}}'
+ aws_access_key: '{{ec2_access_key}}'
+ aws_secret_key: '{{ec2_secret_key}}'
+ security_token: '{{security_token}}'
+ register: create_result
+
+ - name: assert deploy new API worked
+ assert:
+ that:
+ - 'create_result.changed == True'
+ - 'create_result.failed == False'
+ - 'create_result.deploy_response.description == "Automatic deployment by Ansible."'
+ - 'create_result.configure_response.id == create_result.api_id'
+ - '"apigateway:CreateRestApi" in create_result.resource_actions'
+ - 'create_result.configure_response.endpoint_configuration.types.0 == "REGIONAL"'
+
+ - name: check if API endpoint works
+ uri: url="https://{{create_result.api_id}}.execute-api.{{ec2_region}}.amazonaws.com/minimal"
+ register: uri_result
+
+ - name: assert API works success
+ assert:
+ that:
+ - 'uri_result.status == 200'
+
+ - name: check if nonexistent endpoint causes error
+ uri: url="https://{{create_result.api_id}}.execute-api.{{ec2_region}}.amazonaws.com/nominal"
+ register: bad_uri_result
+ ignore_errors: true
+
+ - name: assert
+ assert:
+ that:
+ - bad_uri_result is failed
+
+ - name: Update API to test params effect
+ aws_api_gateway:
+ api_id: '{{create_result.api_id}}'
+ api_file: "{{output_dir}}/minimal-swagger-api.yml"
+ cache_enabled: true
+ cache_size: '1.6'
+ tracing_enabled: true
+ state: present
+ region: '{{ec2_region}}'
+ aws_access_key: '{{ec2_access_key}}'
+ aws_secret_key: '{{ec2_secret_key}}'
+ security_token: '{{security_token}}'
+ register: update_result
+
+ - name: assert update result
+ assert:
+ that:
+ - 'update_result.changed == True'
+ - 'update_result.failed == False'
+ - '"apigateway:PutRestApi" in update_result.resource_actions'
+
+ # ==== additional create/delete tests ====
+
+ - name: deploy first API
+ aws_api_gateway:
+ api_file: "{{output_dir}}/minimal-swagger-api.yml"
+ stage: "minimal"
+ cache_enabled: false
+ state: present
+ region: '{{ec2_region}}'
+ aws_access_key: '{{ec2_access_key}}'
+ aws_secret_key: '{{ec2_secret_key}}'
+ security_token: '{{security_token}}'
+ register: create_result_1
+
+ - name: deploy second API rapidly after first
+ aws_api_gateway:
+ api_file: "{{output_dir}}/minimal-swagger-api.yml"
+ stage: "minimal"
+ state: present
+ region: '{{ec2_region}}'
+ aws_access_key: '{{ec2_access_key}}'
+ aws_secret_key: '{{ec2_secret_key}}'
+ security_token: '{{security_token}}'
+ register: create_result_2
+
+ - name: assert both APIs deployed successfully
+ assert:
+ that:
+ - 'create_result_1.changed == True'
+ - 'create_result_2.changed == True'
+ - '"api_id" in create_result_1'
+ - '"api_id" in create_result_1'
+ - 'create_result_1.configure_response.endpoint_configuration.types.0 == "EDGE"'
+
+ - name: destroy first API
+ aws_api_gateway:
+ state: absent
+ api_id: '{{create_result_1.api_id}}'
+ region: '{{ec2_region}}'
+ aws_access_key: '{{ec2_access_key}}'
+ aws_secret_key: '{{ec2_secret_key}}'
+ security_token: '{{security_token}}'
+ register: destroy_result_1
+
+ - name: destroy second API rapidly after first
+ aws_api_gateway:
+ state: absent
+ api_id: '{{create_result_2.api_id}}'
+ region: '{{ec2_region}}'
+ aws_access_key: '{{ec2_access_key}}'
+ aws_secret_key: '{{ec2_secret_key}}'
+ security_token: '{{security_token}}'
+ register: destroy_result_2
+
+ - name: assert both APIs deployed successfully
+ assert:
+ that:
+ - 'destroy_result_1.changed == True'
+ - 'destroy_result_2.changed == True'
+ - '"apigateway:DeleteRestApi" in destroy_result_1.resource_actions'
+ - '"apigateway:DeleteRestApi" in destroy_result_2.resource_actions'
+
+ # ================= end testing ====================================
+
+ always:
+
+ - name: Ensure cleanup of API deploy
+ aws_api_gateway:
+ state: absent
+ api_id: '{{create_result.api_id}}'
+ ec2_region: '{{ec2_region}}'
+ aws_access_key: '{{ec2_access_key}}'
+ aws_secret_key: '{{ec2_secret_key}}'
+ security_token: '{{security_token}}'
+ ignore_errors: true
+
+ - name: Ensure cleanup of API deploy 1
+ aws_api_gateway:
+ state: absent
+ api_id: '{{create_result_1.api_id}}'
+ ec2_region: '{{ec2_region}}'
+ aws_access_key: '{{ec2_access_key}}'
+ aws_secret_key: '{{ec2_secret_key}}'
+ security_token: '{{security_token}}'
+ ignore_errors: true
+
+ - name: Ensure cleanup of API deploy 2
+ aws_api_gateway:
+ state: absent
+ api_id: '{{create_result_2.api_id}}'
+ ec2_region: '{{ec2_region}}'
+ aws_access_key: '{{ec2_access_key}}'
+ aws_secret_key: '{{ec2_secret_key}}'
+ security_token: '{{security_token}}'
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_api_gateway/templates/minimal-swagger-api.yml.j2 b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_api_gateway/templates/minimal-swagger-api.yml.j2
new file mode 100644
index 00000000..8c5c0581
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_api_gateway/templates/minimal-swagger-api.yml.j2
@@ -0,0 +1,33 @@
+---
+swagger: "2.0"
+info:
+ version: "2017-05-11T12:14:59Z"
+ title: "{{resource_prefix}}Empty_API"
+host: "fakeexample.execute-api.us-east-1.amazonaws.com"
+basePath: "/minimal"
+schemes:
+- "https"
+paths:
+ /:
+ get:
+ consumes:
+ - "application/json"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "200 response"
+ schema:
+ $ref: "#/definitions/Empty"
+ x-amazon-apigateway-integration:
+ responses:
+ default:
+ statusCode: "200"
+ requestTemplates:
+ application/json: "{\"statusCode\": 200}"
+ passthroughBehavior: "when_no_match"
+ type: "mock"
+definitions:
+ Empty:
+ type: "object"
+ title: "Empty Schema"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codebuild/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codebuild/aliases
new file mode 100644
index 00000000..a112c3d1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codebuild/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group1
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codebuild/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codebuild/defaults/main.yml
new file mode 100644
index 00000000..a36eb3de
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codebuild/defaults/main.yml
@@ -0,0 +1,10 @@
+---
+# defaults file for aws_codebuild
+
+# IAM role names have to be less than 64 characters
+# The 8 digit identifier at the end of resource_prefix helps determine during
+# which test something was created and allows tests to be run in parallel
+# Shippable resource_prefixes are in the format shippable-123456-123, so in those cases
+# we need both sets of digits to keep the resource name unique
+unique_id: "{{ resource_prefix | regex_search('(\\d+-?)(\\d+)$') }}"
+iam_role_name: "ansible-test-sts-{{ unique_id }}-codebuild-service-role"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codebuild/files/codebuild_iam_trust_policy.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codebuild/files/codebuild_iam_trust_policy.json
new file mode 100644
index 00000000..3af7c641
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codebuild/files/codebuild_iam_trust_policy.json
@@ -0,0 +1,12 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "codebuild.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codebuild/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codebuild/tasks/main.yml
new file mode 100644
index 00000000..2e4a622e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codebuild/tasks/main.yml
@@ -0,0 +1,122 @@
+---
+# tasks file for aws_codebuild
+
+- name: Run aws_codebuild integration tests.
+
+ collections:
+ - amazon.aws
+
+ block:
+
+ # ==================== preparations ========================================
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: create IAM role needed for CodeBuild
+ iam_role:
+ name: "{{ iam_role_name }}"
+ description: Role with permissions for CodeBuild actions.
+ assume_role_policy_document: "{{ lookup('file', 'codebuild_iam_trust_policy.json') }}"
+ state: present
+ <<: *aws_connection_info
+ register: codebuild_iam_role
+
+ - name: Set variable with aws account id
+ set_fact:
+ aws_account_id: "{{ codebuild_iam_role.iam_role.arn.split(':')[4] }}"
+
+ # ================== integration test ==========================================
+
+ - name: create CodeBuild project
+ aws_codebuild:
+ name: "{{ resource_prefix }}-test-ansible-codebuild"
+ description: Build project for testing the Ansible aws_codebuild module
+ service_role: "{{ codebuild_iam_role.iam_role.arn }}"
+ timeout_in_minutes: 30
+ source:
+ type: CODEPIPELINE
+ buildspec: ''
+ artifacts:
+ namespace_type: NONE
+ packaging: NONE
+ type: CODEPIPELINE
+ name: test
+ environment:
+ compute_type: BUILD_GENERAL1_SMALL
+ privileged_mode: true
+ image: 'aws/codebuild/docker:17.09.0'
+ type: LINUX_CONTAINER
+ environment_variables:
+ - { name: 'FOO_ENV', value: 'other' }
+ tags:
+ - { key: 'purpose', value: 'ansible-test' }
+ state: present
+ <<: *aws_connection_info
+ register: output
+ retries: 10
+ delay: 5
+ until: output is success
+
+ - assert:
+ that:
+ - "output.project.description == 'Build project for testing the Ansible aws_codebuild module'"
+
+ - name: idempotence check rerunning same Codebuild task
+ aws_codebuild:
+ name: "{{ resource_prefix }}-test-ansible-codebuild"
+ description: Build project for testing the Ansible aws_codebuild module
+ service_role: "{{ codebuild_iam_role.iam_role.arn }}"
+ timeout_in_minutes: 30
+ source:
+ type: CODEPIPELINE
+ buildspec: ''
+ artifacts:
+ namespace_type: NONE
+ packaging: NONE
+ type: CODEPIPELINE
+ name: test
+ encryption_key: 'arn:aws:kms:{{ aws_region }}:{{ aws_account_id }}:alias/aws/s3'
+ environment:
+ compute_type: BUILD_GENERAL1_SMALL
+ privileged_mode: true
+ image: 'aws/codebuild/docker:17.09.0'
+ type: LINUX_CONTAINER
+ environment_variables:
+ - { name: 'FOO_ENV', value: 'other' }
+ tags:
+ - { key: 'purpose', value: 'ansible-test' }
+ state: present
+ <<: *aws_connection_info
+ register: rerun_test_output
+
+ - assert:
+ that:
+ - "rerun_test_output.project.created == output.project.created"
+
+ - name: delete CodeBuild project
+ aws_codebuild:
+ name: "{{ output.project.name }}"
+ source:
+ type: CODEPIPELINE
+ buildspec: ''
+ artifacts: {}
+ state: absent
+ <<: *aws_connection_info
+ async: 300
+
+ # ============================== cleanup ======================================
+
+ always:
+
+ - name: cleanup IAM role created for CodeBuild test
+ iam_role:
+ name: "{{ iam_role_name }}"
+ state: absent
+ <<: *aws_connection_info
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codebuild/vars/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codebuild/vars/main.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codebuild/vars/main.yml
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codecommit/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codecommit/aliases
new file mode 100644
index 00000000..6e3860be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codecommit/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codecommit/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codecommit/tasks/main.yml
new file mode 100644
index 00000000..acf194e1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codecommit/tasks/main.yml
@@ -0,0 +1,134 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ # ============================================================
+ - name: Create a repository (CHECK MODE)
+ aws_codecommit:
+ name: "{{ resource_prefix }}_repo"
+ description: original comment
+ state: present
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: Create a repository
+ aws_codecommit:
+ name: "{{ resource_prefix }}_repo"
+ description: original comment
+ state: present
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.repository_metadata.repository_name == '{{ resource_prefix }}_repo'
+ - output.repository_metadata.repository_description == 'original comment'
+
+ - name: No-op update to repository
+ aws_codecommit:
+ name: "{{ resource_prefix }}_repo"
+ description: original comment
+ state: present
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.repository_metadata.repository_name == '{{ resource_prefix }}_repo'
+ - output.repository_metadata.repository_description == 'original comment'
+
+ - name: Update repository description (CHECK MODE)
+ aws_codecommit:
+ name: "{{ resource_prefix }}_repo"
+ description: new comment
+ state: present
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+ - output.repository_metadata.repository_name == '{{ resource_prefix }}_repo'
+ - output.repository_metadata.repository_description == 'original comment'
+
+ - name: Update repository description
+ aws_codecommit:
+ name: "{{ resource_prefix }}_repo"
+ description: new comment
+ state: present
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.repository_metadata.repository_name == '{{ resource_prefix }}_repo'
+ - output.repository_metadata.repository_description == 'new comment'
+
+ # ============================================================
+ - name: Delete a repository (CHECK MODE)
+ aws_codecommit:
+ name: "{{ resource_prefix }}_repo"
+ state: absent
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: Delete a repository
+ aws_codecommit:
+ name: "{{ resource_prefix }}_repo"
+ state: absent
+ register: output
+ - assert:
+ that:
+ - output is changed
+
+ - name: Delete a non-existent repository
+ aws_codecommit:
+ name: "{{ resource_prefix }}_repo"
+ state: absent
+ register: output
+ - assert:
+ that:
+ - output is not changed
+
+ - name: Create a repository without description
+ aws_codecommit:
+ name: "{{ resource_prefix }}_repo"
+ state: present
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.repository_metadata.repository_name == '{{ resource_prefix }}_repo'
+
+ - name: No-op update to repository without description
+ aws_codecommit:
+ name: "{{ resource_prefix }}_repo"
+ state: present
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.repository_metadata.repository_name == '{{ resource_prefix }}_repo'
+
+ - name: Delete a repository without description
+ aws_codecommit:
+ name: "{{ resource_prefix }}_repo"
+ state: absent
+ register: output
+ - assert:
+ that:
+ - output is changed
+
+ always:
+ ###### TEARDOWN STARTS HERE ######
+ - name: Delete a repository
+ aws_codecommit:
+ name: "{{ resource_prefix }}_repo"
+ state: absent
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codepipeline/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codepipeline/aliases
new file mode 100644
index 00000000..a112c3d1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codepipeline/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group1
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codepipeline/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codepipeline/defaults/main.yml
new file mode 100644
index 00000000..5f735ba6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codepipeline/defaults/main.yml
@@ -0,0 +1,12 @@
+---
+# defaults file for aws_codepipeline
+
+codepipeline_name: "{{ resource_prefix }}-test-codepipeline"
+
+# IAM role names have to be less than 64 characters
+# The 8 digit identifier at the end of resource_prefix helps determine during
+# which test something was created and allows tests to be run in parallel
+# Shippable resource_prefixes are in the format shippable-123456-123, so in those cases
+# we need both sets of digits to keep the resource name unique
+unique_id: "{{ resource_prefix | regex_search('(\\d+-?)(\\d+)$') }}"
+codepipeline_service_role_name: "ansible-test-sts-{{ unique_id }}-codepipeline-role"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codepipeline/files/codepipeline_iam_trust_policy.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codepipeline/files/codepipeline_iam_trust_policy.json
new file mode 100644
index 00000000..9be3f72b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codepipeline/files/codepipeline_iam_trust_policy.json
@@ -0,0 +1,12 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "codepipeline.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codepipeline/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codepipeline/tasks/main.yml
new file mode 100644
index 00000000..f5fe7b41
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_codepipeline/tasks/main.yml
@@ -0,0 +1,156 @@
+---
+# tasks file for aws_codepipeline
+
+- name: Run aws_codebuild module integration tests
+
+ block:
+
+ # ==================== preparaions ========================================
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: create IAM role needed for CodePipeline test
+ iam_role:
+ name: "{{ codepipeline_service_role_name }}"
+ description: Role with permissions for CodePipeline actions.
+ assume_role_policy_document: "{{ lookup('file', 'codepipeline_iam_trust_policy.json') }}"
+ state: present
+ <<: *aws_connection_info
+ register: codepipeline_iam_role
+
+ # ================== integration test ==========================================
+
+ - name: create CodePipeline
+ aws_codepipeline:
+ name: "{{ codepipeline_name }}"
+ role_arn: "{{ codepipeline_iam_role.iam_role.arn }}"
+ artifact_store:
+ type: S3
+ location: foo
+ stages:
+ - name: step_1
+ actions:
+ - name: action
+ actionTypeId:
+ category: Source
+ owner: AWS
+ provider: S3
+ version: '1'
+ configuration:
+ S3Bucket: foo
+ S3ObjectKey: bar
+ outputArtifacts:
+ - { name: step_one_output }
+ - name: step_2
+ actions:
+ - name: action
+ actionTypeId:
+ category: Build
+ owner: AWS
+ provider: CodeBuild
+ version: '1'
+ inputArtifacts:
+ - { name: step_one_output }
+ outputArtifacts:
+ - { name: step_two_output }
+ configuration:
+ ProjectName: foo
+ state: present
+ <<: *aws_connection_info
+ register: output
+ retries: 10
+ delay: 5
+ until: output is success
+
+ - assert:
+ that:
+ - output.changed == True
+ - output.pipeline.name == "{{ codepipeline_name }}"
+ - output.pipeline.stages|length > 1
+
+ - name: idempotence check rerunning same CodePipeline task
+ aws_codepipeline:
+ name: "{{ codepipeline_name }}"
+ role_arn: "{{ codepipeline_iam_role.iam_role.arn }}"
+ artifact_store:
+ type: S3
+ location: foo
+ stages:
+ - name: step_1
+ actions:
+ - name: action
+ actionTypeId:
+ category: Source
+ owner: AWS
+ provider: S3
+ version: '1'
+ configuration:
+ S3Bucket: foo
+ S3ObjectKey: bar
+ outputArtifacts:
+ - { name: step_one_output }
+ - name: step_2
+ actions:
+ - name: action
+ actionTypeId:
+ category: Build
+ owner: AWS
+ provider: CodeBuild
+ version: '1'
+ inputArtifacts:
+ - { name: step_one_output }
+ outputArtifacts:
+ - { name: step_two_output }
+ configuration:
+ ProjectName: foo
+ state: present
+ <<: *aws_connection_info
+ register: rerun_test_output
+
+ - assert:
+ that:
+ - rerun_test_output.changed == False
+ - rerun_test_output.pipeline == output.pipeline
+
+ - name: Test deletion of CodePipeline
+ aws_codepipeline:
+ name: "{{ codepipeline_name }}"
+ role_arn: ''
+ artifact_store: {}
+ stages: []
+ state: absent
+ <<: *aws_connection_info
+ register: absent_test_output
+
+ - assert:
+ that:
+ - absent_test_output.changed == True
+ - absent_test_output.pipeline is undefined
+
+ # ==================== cleanup =======================
+
+ always:
+
+ - name: Cleanup - delete test CodePipeline
+ aws_codepipeline:
+ name: "{{ codepipeline_name }}"
+ role_arn: ''
+ artifact_store: {}
+ stages: []
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: true
+
+ - name: Cleanup - delete IAM role needed for CodePipeline test
+ iam_role:
+ name: "{{ codepipeline_name }}"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_config/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_config/aliases
new file mode 100644
index 00000000..c598f3e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_config/aliases
@@ -0,0 +1,8 @@
+cloud/aws
+disabled
+shippable/aws/group2
+aws_config_aggregation_authorization
+aws_config_aggregator
+aws_config_delivery_channel
+aws_config_recorder
+aws_config_rule
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_config/defaults/main.yaml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_config/defaults/main.yaml
new file mode 100644
index 00000000..da7b735d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_config/defaults/main.yaml
@@ -0,0 +1,4 @@
+---
+config_s3_bucket: '{{ resource_prefix }}-config-records'
+config_sns_name: '{{ resource_prefix }}-delivery-channel-test-topic'
+config_role_name: 'config-recorder-test-{{ resource_prefix }}'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_config/files/config-trust-policy.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_config/files/config-trust-policy.json
new file mode 100644
index 00000000..532b3ed5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_config/files/config-trust-policy.json
@@ -0,0 +1,13 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "",
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "config.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_config/tasks/main.yaml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_config/tasks/main.yaml
new file mode 100644
index 00000000..34e3449f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_config/tasks/main.yaml
@@ -0,0 +1,405 @@
+---
+- block:
+
+ # ============================================================
+ # Prerequisites
+ # ============================================================
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: true
+
+ - name: ensure IAM role exists
+ iam_role:
+ <<: *aws_connection_info
+ name: '{{ config_role_name }}'
+ assume_role_policy_document: "{{ lookup('file','config-trust-policy.json') }}"
+ state: present
+ create_instance_profile: no
+ managed_policy:
+ - 'arn:aws:iam::aws:policy/service-role/AWSConfigRole'
+ register: config_iam_role
+
+ - name: ensure SNS topic exists
+ sns_topic:
+ <<: *aws_connection_info
+ name: '{{ config_sns_name }}'
+ state: present
+ subscriptions:
+ - endpoint: "rando_email_address@rando.com"
+ protocol: "email"
+ register: config_sns_topic
+
+ - name: ensure S3 bucket exists
+ s3_bucket:
+ <<: *aws_connection_info
+ name: "{{ config_s3_bucket }}"
+
+ - name: ensure S3 access for IAM role
+ iam_policy:
+ <<: *aws_connection_info
+ iam_type: role
+ iam_name: '{{ config_role_name }}'
+ policy_name: AwsConfigRecorderTestRoleS3Policy
+ state: present
+ policy_json: "{{ lookup( 'template', 'config-s3-policy.json.j2') }}"
+
+ # ============================================================
+ # Module requirement testing
+ # ============================================================
+ - name: test rule with no source parameter
+ aws_config_rule:
+ <<: *aws_connection_info
+ name: random_name
+ state: present
+ register: output
+ ignore_errors: true
+
+ - name: assert failure when called with no source parameter
+ assert:
+ that:
+ - output.failed
+ - 'output.msg.startswith("missing required arguments:")'
+
+ - name: test resource_type delivery_channel with no s3_bucket parameter
+ aws_config_delivery_channel:
+ <<: *aws_connection_info
+ name: random_name
+ state: present
+ register: output
+ ignore_errors: true
+
+ - name: assert failure when called with no s3_bucket parameter
+ assert:
+ that:
+ - output.failed
+ - 'output.msg.startswith("missing required arguments:")'
+
+ - name: test resource_type configuration_recorder with no role_arn parameter
+ aws_config_recorder:
+ <<: *aws_connection_info
+ name: random_name
+ state: present
+ register: output
+ ignore_errors: true
+
+ - name: assert failure when called with no role_arn parameter
+ assert:
+ that:
+ - output.failed
+ - 'output.msg.startswith("state is present but all of the following are missing")'
+
+ - name: test resource_type configuration_recorder with no recording_group parameter
+ aws_config_recorder:
+ <<: *aws_connection_info
+ name: random_name
+ state: present
+ role_arn: 'arn:aws:iam::123456789012:role/AwsConfigRecorder'
+ register: output
+ ignore_errors: true
+
+ - name: assert failure when called with no recording_group parameter
+ assert:
+ that:
+ - output.failed
+ - 'output.msg.startswith("state is present but all of the following are missing")'
+
+ - name: test resource_type aggregation_authorization with no authorized_account_id parameter
+ aws_config_aggregation_authorization:
+ state: present
+ <<: *aws_connection_info
+ register: output
+ ignore_errors: true
+
+ - name: assert failure when called with no authorized_account_id parameter
+ assert:
+ that:
+ - output.failed
+ - 'output.msg.startswith("missing required arguments:")'
+
+ - name: test resource_type aggregation_authorization with no authorized_aws_region parameter
+ aws_config_aggregation_authorization:
+ <<: *aws_connection_info
+ state: present
+ authorized_account_id: '123456789012'
+ register: output
+ ignore_errors: true
+
+ - name: assert failure when called with no authorized_aws_region parameter
+ assert:
+ that:
+ - output.failed
+ - 'output.msg.startswith("missing required arguments:")'
+
+ - name: test resource_type configuration_aggregator with no account_sources parameter
+ aws_config_aggregator:
+ <<: *aws_connection_info
+ name: random_name
+ state: present
+ register: output
+ ignore_errors: true
+
+ - name: assert failure when called with no account_sources parameter
+ assert:
+ that:
+ - output.failed
+ - 'output.msg.startswith("missing required arguments: account_sources")'
+
+ - name: test resource_type configuration_aggregator with no organization_source parameter
+ aws_config_aggregator:
+ <<: *aws_connection_info
+ name: random_name
+ state: present
+ account_sources: []
+ register: output
+ ignore_errors: true
+
+ - name: assert failure when called with no organization_source parameter
+ assert:
+ that:
+ - output.failed
+ - 'output.msg.startswith("missing required arguments: organization_source")'
+
+ # ============================================================
+ # Creation testing
+ # ============================================================
+ - name: Create Configuration Recorder for AWS Config
+ aws_config_recorder:
+ <<: *aws_connection_info
+ name: test_configuration_recorder
+ state: present
+ role_arn: "{{ config_iam_role.arn }}"
+ recording_group:
+ all_supported: true
+ include_global_types: true
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+
+ - name: Create Delivery Channel for AWS Config
+ aws_config_delivery_channel:
+ <<: *aws_connection_info
+ name: test_delivery_channel
+ state: present
+ s3_bucket: "{{ config_s3_bucket }}"
+ s3_prefix: "foo/bar"
+ sns_topic_arn: "{{ config_sns_topic.sns_arn }}"
+ delivery_frequency: 'Twelve_Hours'
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+
+ - name: Create Config Rule for AWS Config
+ aws_config_rule:
+ <<: *aws_connection_info
+ name: test_config_rule
+ state: present
+ description: 'This AWS Config rule checks for public write access on S3 buckets'
+ scope:
+ compliance_types:
+ - 'AWS::S3::Bucket'
+ source:
+ owner: AWS
+ identifier: 'S3_BUCKET_PUBLIC_WRITE_PROHIBITED'
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+
+ # ============================================================
+ # Update testing
+ # ============================================================
+ - name: Update Configuration Recorder
+ aws_config_recorder:
+ <<: *aws_connection_info
+ name: test_configuration_recorder
+ state: present
+ role_arn: "{{ config_iam_role.arn }}"
+ recording_group:
+ all_supported: false
+ include_global_types: false
+ resource_types:
+ - 'AWS::S3::Bucket'
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+
+ - name: Update Delivery Channel
+ aws_config_delivery_channel:
+ <<: *aws_connection_info
+ name: test_delivery_channel
+ state: present
+ s3_bucket: "{{ config_s3_bucket }}"
+ sns_topic_arn: "{{ config_sns_topic.sns_arn }}"
+ delivery_frequency: 'TwentyFour_Hours'
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+
+ - name: Update Config Rule
+ aws_config_rule:
+ <<: *aws_connection_info
+ name: test_config_rule
+ state: present
+ description: 'This AWS Config rule checks for public write access on S3 buckets'
+ scope:
+ compliance_types:
+ - 'AWS::S3::Bucket'
+ source:
+ owner: AWS
+ identifier: 'S3_BUCKET_PUBLIC_READ_PROHIBITED'
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+
+ # ============================================================
+ # Read testing
+ # ============================================================
+ - name: Don't update Configuration Recorder
+ aws_config_recorder:
+ <<: *aws_connection_info
+ name: test_configuration_recorder
+ state: present
+ role_arn: "{{ config_iam_role.arn }}"
+ recording_group:
+ all_supported: false
+ include_global_types: false
+ resource_types:
+ - 'AWS::S3::Bucket'
+ register: output
+
+ - assert:
+ that:
+ - not output.changed
+
+ - name: Don't update Delivery Channel
+ aws_config_delivery_channel:
+ <<: *aws_connection_info
+ name: test_delivery_channel
+ state: present
+ s3_bucket: "{{ config_s3_bucket }}"
+ sns_topic_arn: "{{ config_sns_topic.sns_arn }}"
+ delivery_frequency: 'TwentyFour_Hours'
+ register: output
+
+ - assert:
+ that:
+ - not output.changed
+
+ - name: Don't update Config Rule
+ aws_config_rule:
+ <<: *aws_connection_info
+ name: test_config_rule
+ state: present
+ description: 'This AWS Config rule checks for public write access on S3 buckets'
+ scope:
+ compliance_types:
+ - 'AWS::S3::Bucket'
+ source:
+ owner: AWS
+ identifier: 'S3_BUCKET_PUBLIC_READ_PROHIBITED'
+ register: output
+
+ - assert:
+ that:
+ - not output.changed
+
+ always:
+ # ============================================================
+ # Destroy testing
+ # ============================================================
+ - name: Destroy Configuration Recorder
+ aws_config_recorder:
+ <<: *aws_connection_info
+ name: test_configuration_recorder
+ state: absent
+ register: output
+ ignore_errors: yes
+
+# - assert:
+# that:
+# - output.changed
+
+ - name: Destroy Delivery Channel
+ aws_config_delivery_channel:
+ <<: *aws_connection_info
+ name: test_delivery_channel
+ state: absent
+ s3_bucket: "{{ config_s3_bucket }}"
+ sns_topic_arn: "{{ config_sns_topic.sns_arn }}"
+ delivery_frequency: 'TwentyFour_Hours'
+ register: output
+ ignore_errors: yes
+
+# - assert:
+# that:
+# - output.changed
+
+ - name: Destroy Config Rule
+ aws_config_rule:
+ <<: *aws_connection_info
+ name: test_config_rule
+ state: absent
+ description: 'This AWS Config rule checks for public write access on S3 buckets'
+ scope:
+ compliance_types:
+ - 'AWS::S3::Bucket'
+ source:
+ owner: AWS
+ identifier: 'S3_BUCKET_PUBLIC_READ_PROHIBITED'
+ register: output
+ ignore_errors: yes
+
+# - assert:
+# that:
+# - output.changed
+
+ # ============================================================
+ # Clean up prerequisites
+ # ============================================================
+ - name: remove S3 access from IAM role
+ iam_policy:
+ <<: *aws_connection_info
+ iam_type: role
+ iam_name: '{{ config_role_name }}'
+ policy_name: AwsConfigRecorderTestRoleS3Policy
+ state: absent
+ policy_json: "{{ lookup( 'template', 'config-s3-policy.json.j2') }}"
+ ignore_errors: yes
+
+ - name: remove IAM role
+ iam_role:
+ <<: *aws_connection_info
+ name: '{{ config_role_name }}'
+ state: absent
+ ignore_errors: yes
+
+ - name: remove SNS topic
+ sns_topic:
+ <<: *aws_connection_info
+ name: '{{ config_sns_name }}'
+ state: absent
+ ignore_errors: yes
+
+ - name: remove S3 bucket
+ s3_bucket:
+ <<: *aws_connection_info
+ name: "{{ config_s3_bucket }}"
+ state: absent
+ force: yes
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_config/templates/config-s3-policy.json.j2 b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_config/templates/config-s3-policy.json.j2
new file mode 100644
index 00000000..53093300
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_config/templates/config-s3-policy.json.j2
@@ -0,0 +1,23 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Action": "sns:Publish",
+ "Resource": "{{ config_sns_topic.sns_arn }}",
+ "Effect": "Allow",
+ "Sid": "PublishToSNS"
+ },
+ {
+ "Action": "s3:PutObject",
+ "Resource": "arn:aws:s3:::{{ config_s3_bucket }}/*",
+ "Effect": "Allow",
+ "Sid": "AllowPutS3Object"
+ },
+ {
+ "Action": "s3:GetBucketAcl",
+ "Resource": "arn:aws:s3:::{{ config_s3_bucket }}",
+ "Effect": "Allow",
+ "Sid": "AllowGetS3Acl"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/aliases
new file mode 100644
index 00000000..56927195
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/defaults/main.yml
new file mode 100644
index 00000000..214f2496
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/defaults/main.yml
@@ -0,0 +1,33 @@
+eks_cluster_name: "{{ resource_prefix }}"
+eks_subnets:
+ - zone: a
+ cidr: 10.0.1.0/24
+ - zone: b
+ cidr: 10.0.2.0/24
+ - zone: c
+ cidr: 10.0.3.0/24
+
+eks_security_groups:
+ - name: "{{ eks_cluster_name }}-control-plane-sg"
+ description: "EKS Control Plane Security Group"
+ rules:
+ - group_name: "{{ eks_cluster_name }}-workers-sg"
+ group_desc: "EKS Worker Security Group"
+ ports: 443
+ proto: tcp
+ rules_egress:
+ - group_name: "{{ eks_cluster_name }}-workers-sg"
+ group_desc: "EKS Worker Security Group"
+ from_port: 1025
+ to_port: 65535
+ proto: tcp
+ - name: "{{ eks_cluster_name }}-workers-sg"
+ description: "EKS Worker Security Group"
+ rules:
+ - group_name: "{{ eks_cluster_name }}-workers-sg"
+ proto: tcp
+ from_port: 1
+ to_port: 65535
+ - group_name: "{{ eks_cluster_name }}-control-plane-sg"
+ ports: 10250
+ proto: tcp
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/files/eks-trust-policy.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/files/eks-trust-policy.json
new file mode 100644
index 00000000..85cfb59d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/files/eks-trust-policy.json
@@ -0,0 +1,12 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "eks.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/meta/main.yml
new file mode 100644
index 00000000..1810d4be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/tasks/botocore_lt_1.10.1.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/tasks/botocore_lt_1.10.1.yml
new file mode 100644
index 00000000..e4c4b31f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/tasks/botocore_lt_1.10.1.yml
@@ -0,0 +1,12 @@
+- name: try and use aws_eks_cluster module
+ aws_eks_cluster:
+ state: absent
+ name: my_cluster
+ ignore_errors: yes
+ register: aws_eks_cluster
+
+- name: ensure that aws_eks fails with friendly error message
+ assert:
+ that:
+ - '"msg" in aws_eks_cluster'
+ - aws_eks_cluster is failed
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/tasks/botocore_lt_1.12.38.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/tasks/botocore_lt_1.12.38.yml
new file mode 100644
index 00000000..4feb7ab4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/tasks/botocore_lt_1.12.38.yml
@@ -0,0 +1,13 @@
+- name: try using aws_eks_cluster wait with state=absent
+ aws_eks_cluster:
+ state: absent
+ name: my_cluster
+ wait: yes
+ ignore_errors: yes
+ register: aws_eks_cluster
+
+- name: ensure that aws_eks fails with friendly error message
+ assert:
+ that:
+ - '"msg" in aws_eks_cluster'
+ - aws_eks_cluster is failed
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/tasks/full_test.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/tasks/full_test.yml
new file mode 100644
index 00000000..43b1c801
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/tasks/full_test.yml
@@ -0,0 +1,247 @@
+---
+# tasks file for aws_eks modules
+- name: aws_eks_cluster tests
+ collections:
+ - amazon.aws
+ block:
+ # If us-west-1 does become supported, change this test to use an unsupported region
+ # or if all regions are supported, delete this test
+ - name: attempt to use eks in unsupported region
+ aws_eks_cluster:
+ name: "{{ eks_cluster_name }}"
+ state: absent
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: us-west-1
+ register: aws_eks_unsupported_region
+ ignore_errors: yes
+
+ - name: check that aws_eks_cluster did nothing
+ assert:
+ that:
+ - aws_eks_unsupported_region is failed
+ - '"msg" in aws_eks_unsupported_region'
+
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: delete an as yet non-existent EKS cluster
+ aws_eks_cluster:
+ name: "{{ eks_cluster_name }}"
+ state: absent
+ <<: *aws_connection_info
+ register: aws_eks_delete_non_existent
+
+ - name: check that aws_eks_cluster did nothing
+ assert:
+ that:
+ - aws_eks_delete_non_existent is not changed
+
+ - name: ensure IAM instance role exists
+ iam_role:
+ name: aws_eks_cluster_role
+ assume_role_policy_document: "{{ lookup('file','eks-trust-policy.json') }}"
+ state: present
+ create_instance_profile: no
+ managed_policies:
+ - AmazonEKSServicePolicy
+ - AmazonEKSClusterPolicy
+ <<: *aws_connection_info
+ register: iam_role
+
+ - name: create a VPC to work in
+ ec2_vpc_net:
+ cidr_block: 10.0.0.0/16
+ state: present
+ name: '{{ resource_prefix }}_aws_eks'
+ resource_tags:
+ Name: '{{ resource_prefix }}_aws_eks'
+ <<: *aws_connection_info
+ register: setup_vpc
+
+ - name: create subnets
+ ec2_vpc_subnet:
+ az: '{{ aws_region }}{{ item.zone }}'
+ tags:
+ Name: '{{ resource_prefix }}_aws_eks-subnet-{{ item.zone }}'
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ cidr: "{{ item.cidr }}"
+ state: present
+ <<: *aws_connection_info
+ register: setup_subnets
+ with_items:
+ - "{{ eks_subnets }}"
+
+ - name: create security groups to use for EKS
+ ec2_group:
+ name: "{{ item.name }}"
+ description: "{{ item.description }}"
+ state: present
+ rules: "{{ item.rules }}"
+ rules_egress: "{{ item.rules_egress|default(omit) }}"
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ <<: *aws_connection_info
+ with_items: "{{ eks_security_groups }}"
+ register: setup_security_groups
+
+ - name: create EKS cluster
+ aws_eks_cluster:
+ name: "{{ eks_cluster_name }}"
+ security_groups: "{{ eks_security_groups | community.general.json_query('[].name') }}"
+ subnets: "{{ setup_subnets.results | community.general.json_query('[].subnet.id') }}"
+ role_arn: "{{ iam_role.arn }}"
+ <<: *aws_connection_info
+ register: eks_create
+
+ - name: check that EKS cluster was created
+ assert:
+ that:
+ - eks_create is changed
+ - eks_create.name == eks_cluster_name
+
+ - name: create EKS cluster with same details but wait for it to become active
+ aws_eks_cluster:
+ name: "{{ eks_cluster_name }}"
+ security_groups: "{{ eks_security_groups | community.general.json_query('[].name') }}"
+ subnets: "{{ setup_subnets.results | community.general.json_query('[].subnet.id') }}"
+ role_arn: "{{ iam_role.arn }}"
+ wait: yes
+ <<: *aws_connection_info
+ register: eks_create
+
+ - name: Check that EKS cluster is active and has CA and endpoint data
+ assert:
+ that:
+ - eks_create is not changed
+ - eks_create.name == eks_cluster_name
+ - eks_create.status == "ACTIVE"
+ - eks_create.certificate_authority.data is defined
+ - eks_create.certificate_authority.data != ""
+ - eks_create.endpoint is defined
+ - eks_create.endpoint != ""
+
+ - name: create EKS cluster with same details but using SG ids
+ aws_eks_cluster:
+ name: "{{ eks_cluster_name }}"
+ security_groups: "{{ setup_security_groups.results | community.general.json_query('[].group_id') }}"
+ subnets: "{{ setup_subnets.results | community.general.json_query('[].subnet.id') }}"
+ role_arn: "{{ iam_role.arn }}"
+ <<: *aws_connection_info
+ register: eks_create
+
+ - name: check that EKS cluster did not change
+ assert:
+ that:
+ - eks_create is not changed
+ - eks_create.name == eks_cluster_name
+
+ - name: remove EKS cluster, waiting until complete
+ aws_eks_cluster:
+ name: "{{ eks_cluster_name }}"
+ state: absent
+ wait: yes
+ <<: *aws_connection_info
+ register: eks_delete
+
+ - name: check that EKS cluster was removed
+ assert:
+ that:
+ - eks_delete is changed
+
+ - name: create EKS cluster with same details but wait for it to become active
+ aws_eks_cluster:
+ name: "{{ eks_cluster_name }}"
+ security_groups: "{{ eks_security_groups | community.general.json_query('[].name') }}"
+ subnets: "{{ setup_subnets.results | community.general.json_query('[].subnet.id') }}"
+ role_arn: "{{ iam_role.arn }}"
+ wait: yes
+ <<: *aws_connection_info
+ register: eks_create
+
+ - name: check that EKS cluster was created
+ assert:
+ that:
+ - eks_create is changed
+ - eks_create.name == eks_cluster_name
+
+ - name: remove EKS cluster, without waiting this time
+ aws_eks_cluster:
+ name: "{{ eks_cluster_name }}"
+ state: absent
+ <<: *aws_connection_info
+ register: eks_delete
+
+ - name: check that EKS cluster remove has started
+ assert:
+ that:
+ - eks_delete is changed
+
+ always:
+ - name: Announce teardown start
+ debug:
+ msg: "***** TESTING COMPLETE. COMMENCE TEARDOWN *****"
+
+ - name: remove EKS cluster
+ aws_eks_cluster:
+ name: "{{ eks_cluster_name }}"
+ state: absent
+ wait: yes
+ <<: *aws_connection_info
+ register: eks_delete
+ ignore_errors: yes
+
+ - debug:
+ msg: "{{ eks_security_groups|reverse|list }}"
+
+ - name: create list of all additional EKS security groups
+ set_fact:
+ additional_eks_sg:
+ - name: "{{ eks_cluster_name }}-workers-sg"
+
+ - name: set all security group rule lists to empty to remove circular dependency
+ ec2_group:
+ name: "{{ item.name }}"
+ description: "{{ item.description }}"
+ state: present
+ rules: []
+ rules_egress: []
+ purge_rules: yes
+ purge_rules_egress: yes
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ <<: *aws_connection_info
+ with_items: "{{ eks_security_groups }}"
+ ignore_errors: yes
+
+ - name: remove security groups
+ ec2_group:
+ name: '{{ item.name }}'
+ state: absent
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ <<: *aws_connection_info
+ with_items: "{{ eks_security_groups|reverse|list + additional_eks_sg }}"
+ ignore_errors: yes
+
+ - name: remove setup subnet
+ ec2_vpc_subnet:
+ az: '{{ aws_region }}{{ item.zone }}'
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ cidr: "{{ item.cidr}}"
+ state: absent
+ <<: *aws_connection_info
+ with_items: "{{ eks_subnets }}"
+ ignore_errors: yes
+
+ - name: remove setup VPC
+ ec2_vpc_net:
+ cidr_block: 10.0.0.0/16
+ state: absent
+ name: '{{ resource_prefix }}_aws_eks'
+ <<: *aws_connection_info
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/tasks/main.yml
new file mode 100644
index 00000000..f26e11db
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_eks_cluster/tasks/main.yml
@@ -0,0 +1,66 @@
+- set_fact:
+ virtualenv: "{{ remote_tmp_dir }}/virtualenv"
+ virtualenv_command: "{{ ansible_python_interpreter }} -m virtualenv"
+
+- set_fact:
+ virtualenv_interpreter: "{{ virtualenv }}/bin/python"
+
+- pip:
+ name: virtualenv
+
+# Test graceful failure for missing kubernetes-validate
+
+- pip:
+ name:
+ - 'botocore<1.10.1'
+ - boto3
+ - coverage<5
+ virtualenv: "{{ virtualenv }}"
+ virtualenv_command: "{{ virtualenv_command }}"
+ virtualenv_site_packages: no
+
+- include_tasks: botocore_lt_1.10.1.yml
+ vars:
+ ansible_python_interpreter: "{{ virtualenv_interpreter }}"
+
+- file:
+ path: "{{ virtualenv }}"
+ state: absent
+
+# Test graceful failures when botocore<1.12.38
+
+- pip:
+ name:
+ - 'botocore>1.10.1,<1.12.38'
+ - boto3
+ - coverage<5
+ virtualenv: "{{ virtualenv }}"
+ virtualenv_command: "{{ virtualenv_command }}"
+ virtualenv_site_packages: no
+
+- include_tasks: botocore_lt_1.12.38.yml
+ vars:
+ ansible_python_interpreter: "{{ virtualenv_interpreter }}"
+
+- file:
+ path: "{{ virtualenv }}"
+ state: absent
+
+# Test validate with kubernetes-validate
+
+- pip:
+ name:
+ - 'botocore>=1.10.1'
+ - boto3
+ virtualenv: "{{ virtualenv }}"
+ virtualenv_command: "{{ virtualenv_command }}"
+ virtualenv_site_packages: no
+
+- include_tasks: full_test.yml
+ vars:
+ ansible_python_interpreter: "{{ virtualenv_interpreter }}"
+ playbook_namespace: ansible-test-k8s-validate
+
+- file:
+ path: "{{ virtualenv }}"
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_elasticbeanstalk_app/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_elasticbeanstalk_app/aliases
new file mode 100644
index 00000000..6e3860be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_elasticbeanstalk_app/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_elasticbeanstalk_app/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_elasticbeanstalk_app/defaults/main.yml
new file mode 100644
index 00000000..3f38e1a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_elasticbeanstalk_app/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+# defaults file for aws_elasticbeanstalk_app
+app_name: '{{ resource_prefix }}_eb_ansible_test'
+description: 'eb_ansible_test app description'
+alternate_description: 'eb_ansible_test app alternate_description'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_elasticbeanstalk_app/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_elasticbeanstalk_app/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_elasticbeanstalk_app/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_elasticbeanstalk_app/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_elasticbeanstalk_app/tasks/main.yml
new file mode 100644
index 00000000..56f77d3d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_elasticbeanstalk_app/tasks/main.yml
@@ -0,0 +1,156 @@
+---
+# tasks file for aws_elasticbeanstalk_app
+- block:
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ # ============================================================
+ - name: test with no parameters
+ aws_elasticbeanstalk_app:
+ register: result
+ ignore_errors: true
+
+ - name: assert failure when called with no parameters
+ assert:
+ that:
+ - 'result.failed'
+
+ # ============================================================
+ - name: test create app
+ aws_elasticbeanstalk_app:
+ app_name: "{{ app_name }}"
+ description: "{{ description }}"
+ state: present
+ <<: *aws_connection_info
+ register: result
+
+ - name: assert changed is True
+ assert:
+ that:
+ - result.changed == True
+
+ # ============================================================
+ - name: test create when app already exists
+ aws_elasticbeanstalk_app:
+ app_name: "{{ app_name }}"
+ description: "{{ description }}"
+ state: present
+ <<: *aws_connection_info
+ register: result
+
+ - name: assert changed is False since the app already exists
+ assert:
+ that:
+ - result.changed == False
+
+ # ============================================================
+ - name: make an update to an existing app
+ aws_elasticbeanstalk_app:
+ app_name: "{{ app_name }}"
+ description: "{{ alternate_description }}"
+ state: present
+ <<: *aws_connection_info
+ register: result
+
+ - name: assert changed is True
+ assert:
+ that:
+ - result.changed == True
+
+# # ============================================================
+# - name: fail deleting an app that has environments that exist
+# aws_elasticbeanstalk_app:
+# app_name: "non_app"
+# state: absent
+# <<: *aws_connection_info
+# register: result
+# ignore_errors: true
+#
+# - name: assert deleteing app with running environments fail
+# assert:
+# that:
+# - result.changed == False
+
+# # ============================================================
+# - name: deleting an app that has environments that exist with terminate_by_force True
+# aws_elasticbeanstalk_app:
+# app_name: "non_app"
+# state: absent
+# terminate_by_force: True
+# <<: *aws_connection_info
+# register: result
+#
+# - name: assert deleteing app with running environments with terminate_by_force True
+# assert:
+# that:
+# - result.changed == True
+#
+ # ============================================================
+# - name: retrieve a list of apps
+# aws_elasticbeanstalk_app_facts:
+# <<: *aws_connection_info
+# register: result
+
+# - name: assert changed is True
+# assert:
+# that:
+# - result is success
+
+# # ============================================================
+# - name: deleting an app that has environments that exist with terminate_by_force True
+# aws_elasticbeanstalk_app:
+# app_name: "non_app"
+# state: absent
+# terminate_by_force: True
+# <<: *aws_connection_info
+# register: result
+#
+# - name: assert deleteing app with running environments with terminate_by_force True
+# assert:
+# that:
+# - result.changed == True
+#
+ # ============================================================
+ - name: delete non existent app
+ aws_elasticbeanstalk_app:
+ app_name: "non_app"
+ state: absent
+ <<: *aws_connection_info
+ register: result
+ ignore_errors: true
+
+ - name: assert deleteing non existant app fails
+ assert:
+ that:
+ - result.changed == False
+ - 'result.output.startswith("Application not found")'
+
+ # ============================================================
+ - name: delete existing app
+ aws_elasticbeanstalk_app:
+ app_name: "{{ app_name }}"
+ state: absent
+ <<: *aws_connection_info
+ register: result
+
+ - name: assert changed is True
+ assert:
+ that:
+ - result.changed == True
+
+ # ============================================================
+
+ always:
+
+ - name: delete existing app
+ aws_elasticbeanstalk_app:
+ app_name: "{{ app_name }}"
+ state: absent
+ <<: *aws_connection_info
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_glue_connection/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_glue_connection/aliases
new file mode 100644
index 00000000..6e3860be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_glue_connection/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_glue_connection/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_glue_connection/tasks/main.yml
new file mode 100644
index 00000000..84ca6e5e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_glue_connection/tasks/main.yml
@@ -0,0 +1,87 @@
+- block:
+
+ # TODO: description, match_criteria, security_groups, and subnet_id are unused module options
+
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: create glue connection
+ aws_glue_connection:
+ name: "{{ resource_prefix }}"
+ connection_properties:
+ JDBC_CONNECTION_URL: "jdbc:mysql://mydb:3306/{{ resource_prefix }}"
+ USERNAME: my-username
+ PASSWORD: my-password
+ state: present
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: test idempotence creating glue connection
+ aws_glue_connection:
+ name: "{{ resource_prefix }}"
+ connection_properties:
+ JDBC_CONNECTION_URL: "jdbc:mysql://mydb:3306/{{ resource_prefix }}"
+ USERNAME: my-username
+ PASSWORD: my-password
+ state: present
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: test updating JDBC connection url
+ aws_glue_connection:
+ name: "{{ resource_prefix }}"
+ connection_properties:
+ JDBC_CONNECTION_URL: "jdbc:mysql://mydb:3306/{{ resource_prefix }}-updated"
+ USERNAME: my-username
+ PASSWORD: my-password
+ state: present
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: delete glue connection
+ aws_glue_connection:
+ name: "{{ resource_prefix }}"
+ state: absent
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: test idempotence removing glue connection
+ aws_glue_connection:
+ name: "{{ resource_prefix }}"
+ state: absent
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+
+ always:
+
+ - name: delete glue connection
+ aws_glue_connection:
+ name: "{{ resource_prefix }}"
+ state: absent
+ <<: *aws_connection_info
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_inspector_target/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_inspector_target/aliases
new file mode 100644
index 00000000..6e3860be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_inspector_target/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_inspector_target/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_inspector_target/defaults/main.yml
new file mode 100644
index 00000000..8777873f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_inspector_target/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+
+aws_inspector_scan_name: "aws_inspector_scan-{{ ansible_date_time.epoch }}"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_inspector_target/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_inspector_target/tasks/main.yml
new file mode 100644
index 00000000..36a3cfca
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_inspector_target/tasks/main.yml
@@ -0,0 +1,96 @@
+---
+
+- name: Set Connexion Information for All Tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+- block:
+ - name: Create AWS Inspector Target Group
+ aws_inspector_target:
+ name: "{{ aws_inspector_scan_name }}"
+ state: present
+ tags:
+ Name: "{{ aws_inspector_scan_name }}"
+ changed: "no"
+ <<: *aws_connection_info
+ register: target_group_create
+
+ - name: Create AWS Inspector Target Group (Verify)
+ aws_inspector_target:
+ name: "{{ aws_inspector_scan_name }}"
+ state: present
+ tags:
+ Name: "{{ aws_inspector_scan_name }}"
+ changed: "no"
+ <<: *aws_connection_info
+ register: target_group_create_verify
+
+ - name: Assert Successful AWS Inspector Target Group Creation
+ assert:
+ that:
+ - target_group_create is changed
+ - target_group_create.name == aws_inspector_scan_name
+ - target_group_create.tags.Name == aws_inspector_scan_name
+ - target_group_create.tags.changed == "no"
+ - target_group_create_verify is not changed
+ - target_group_create_verify.name == aws_inspector_scan_name
+ - target_group_create_verify.tags.Name == aws_inspector_scan_name
+ - target_group_create_verify.tags.changed == "no"
+
+ - name: Change AWS Inspector Target Group Tags
+ aws_inspector_target:
+ name: "{{ aws_inspector_scan_name }}"
+ state: present
+ tags:
+ Name: "{{ aws_inspector_scan_name }}"
+ changed: "yes"
+ <<: *aws_connection_info
+ register: target_group_tag_change
+
+ - name: Change AWS Inspector Target Group Tags (Verify)
+ aws_inspector_target:
+ name: "{{ aws_inspector_scan_name }}"
+ state: present
+ tags:
+ Name: "{{ aws_inspector_scan_name }}"
+ changed: "yes"
+ <<: *aws_connection_info
+ register: target_group_tag_change_verify
+
+ - name: Assert Successful AWS Inspector Target Group Tag Change
+ assert:
+ that:
+ - target_group_tag_change is changed
+ - target_group_tag_change.name == aws_inspector_scan_name
+ - target_group_tag_change.tags.Name == aws_inspector_scan_name
+ - target_group_tag_change.tags.changed == "yes"
+ - target_group_tag_change_verify is not changed
+ - target_group_tag_change_verify.name == aws_inspector_scan_name
+ - target_group_tag_change_verify.tags.Name == aws_inspector_scan_name
+ - target_group_tag_change_verify.tags.changed == "yes"
+
+ always:
+ - name: Delete AWS Inspector Target Group
+ aws_inspector_target:
+ name: "{{ aws_inspector_scan_name }}"
+ state: absent
+ <<: *aws_connection_info
+ register: target_group_delete
+
+ - name: Delete AWS Inspector Target Group (Verify)
+ aws_inspector_target:
+ name: "{{ aws_inspector_scan_name }}"
+ state: absent
+ <<: *aws_connection_info
+ register: target_group_delete_verify
+
+ - name: Assert Successful AWS Inspector Target Group Deletion
+ assert:
+ that:
+ - target_group_delete is changed
+ - target_group_delete_verify is not changed
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_kms/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_kms/aliases
new file mode 100644
index 00000000..7ed9252c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_kms/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+aws_kms_info
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_kms/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_kms/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_kms/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_kms/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_kms/tasks/main.yml
new file mode 100644
index 00000000..a27475c5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_kms/tasks/main.yml
@@ -0,0 +1,424 @@
+- module_defaults:
+ group/aws:
+ region: "{{ aws_region }}"
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ collections:
+ - amazon.aws
+
+ block:
+ # ============================================================
+ # PREPARATION
+ #
+ # Get some information about who we are before starting our tests
+ # we'll need this as soon as we start working on the policies
+ - name: get ARN of calling user
+ aws_caller_info:
+ register: aws_caller_info
+
+ # IAM Roles completes before the Role is fully instantiated, create it here
+ # to ensure it exists when we need it for updating the policies
+ - name: create an IAM role that can do nothing
+ iam_role:
+ name: "{{ resource_prefix }}-kms-role"
+ state: present
+ assume_role_policy_document: '{"Version": "2012-10-17", "Statement": {"Action": "sts:AssumeRole", "Principal": {"Service": "ec2.amazonaws.com"}, "Effect": "Deny"} }'
+ register: iam_role_result
+ # ============================================================
+ # TESTS
+ - name: See whether key exists and its current state
+ aws_kms_info:
+ filters:
+ alias: "{{ resource_prefix }}-kms"
+
+ - name: create a key in check mode
+ check_mode: yes
+ aws_kms:
+ alias: "{{ resource_prefix }}-kms-check"
+ tags:
+ Hello: World
+ state: present
+ enabled: yes
+ register: create_kms_check
+
+ - name: find facts about the check mode key
+ aws_kms_info:
+ filters:
+ alias: "{{ resource_prefix }}-kms-check"
+ register: check_key
+
+ - name: ensure that check mode worked as expected
+ assert:
+ that:
+ - check_key["keys"]|length == 0
+ - create_kms_check is changed
+
+ - name: create a key
+ aws_kms:
+ alias: "{{ resource_prefix }}-kms"
+ tags:
+ Hello: World
+ state: present
+ enabled: yes
+ register: create_kms
+
+ - name: assert that state is enabled
+ assert:
+ that:
+ - create_kms.key_state == "Enabled"
+ - create_kms.tags['Hello'] == 'World'
+ - create_kms.enable_key_rotation == false
+
+ - name: enable key rotation
+ aws_kms:
+ alias: "{{ resource_prefix }}-kms"
+ tags:
+ Hello: World
+ state: present
+ enabled: yes
+ enable_key_rotation: yes
+ register: create_kms
+
+ - name: assert that key rotation is enabled
+ assert:
+ that:
+ - create_kms.key_state == "Enabled"
+ - create_kms.tags['Hello'] == 'World'
+ - create_kms.enable_key_rotation == true
+
+ - name: delete the key in check mode
+ check_mode: yes
+ aws_kms:
+ alias: "{{ resource_prefix }}-kms"
+ state: absent
+ register: delete_kms_check
+
+ - assert:
+ that:
+ - delete_kms_check is changed
+
+ - name: find facts about the key
+ aws_kms_info:
+ filters:
+ alias: "{{ resource_prefix }}-kms"
+ register: new_key
+
+ - name: check that a key was found
+ assert:
+ that:
+ - new_key["keys"]|length == 1
+ - new_key["keys"][0]["enable_key_rotation"] == true
+ - new_key["keys"][0]["key_state"] != PendingDeletion
+
+ - name: Update Policy on key to match AWS Console generate policy
+ aws_kms:
+ key_id: '{{ new_key["keys"][0]["key_id"] }}'
+ policy: "{{ lookup('template', 'console-policy.j2') | to_json }}"
+ register: kms_policy_changed
+
+ - name: Policy should have been changed
+ assert:
+ that:
+ - kms_policy_changed is changed
+
+ - name: Attempt to re-assert the same policy
+ aws_kms:
+ alias: "alias/{{ resource_prefix }}-kms"
+ policy: "{{ lookup('template', 'console-policy.j2') | to_json }}"
+ register: kms_policy_changed
+
+ - name: Policy should not have changed since it was last set
+ assert:
+ that:
+ - kms_policy_changed is succeeded
+
+ - name: grant user-style access to production secrets
+ aws_kms:
+ mode: grant
+ alias: "alias/{{ resource_prefix }}-kms"
+ role_name: "{{ resource_prefix }}-kms-role"
+ grant_types: "role,role grant"
+
+ - name: find facts about the key
+ aws_kms_info:
+ filters:
+ alias: "{{ resource_prefix }}-kms"
+ register: new_key
+
+ - name: remove access to production secrets from role
+ aws_kms:
+ mode: deny
+ alias: "alias/{{ resource_prefix }}-kms"
+ role_arn: "{{ iam_role_result.iam_role.arn }}"
+
+ - name: find facts about the key
+ aws_kms_info:
+ filters:
+ alias: "{{ resource_prefix }}-kms"
+ register: new_key
+
+ - name: Allow the IAM role to use a specific Encryption Context
+ aws_kms:
+ alias: "{{ resource_prefix }}-kms"
+ state: present
+ purge_grants: yes
+ purge_tags: yes
+ grants:
+ - name: test_grant
+ grantee_principal: "{{ iam_role_result.iam_role.arn }}"
+ retiring_principal: "{{ aws_caller_info.arn }}"
+ constraints:
+ encryption_context_equals:
+ environment: test
+ application: testapp
+ operations:
+ - Decrypt
+ - RetireGrant
+ register: grant_one
+
+ - name: assert grant added
+ assert:
+ that:
+ - grant_one.changed
+ - grant_one.grants|length == 1
+
+ - name: Add a second grant
+ aws_kms:
+ alias: "{{ resource_prefix }}-kms"
+ state: present
+ grants:
+ - name: another_grant
+ grantee_principal: "{{ iam_role_result.iam_role.arn }}"
+ retiring_principal: "{{ aws_caller_info.arn }}"
+ constraints:
+ encryption_context_equals:
+ Environment: second
+ Application: anotherapp
+ operations:
+ - Decrypt
+ - RetireGrant
+ register: grant_two
+
+ - name: assert grant added
+ assert:
+ that:
+ - grant_two.changed
+ - grant_two.grants|length == 2
+
+ - name: Add a second grant again
+ aws_kms:
+ alias: "{{ resource_prefix }}-kms"
+ state: present
+ grants:
+ - name: another_grant
+ grantee_principal: "{{ iam_role_result.iam_role.arn }}"
+ retiring_principal: "{{ aws_caller_info.arn }}"
+ constraints:
+ encryption_context_equals:
+ Environment: second
+ Application: anotherapp
+ operations:
+ - Decrypt
+ - RetireGrant
+ register: grant_two_again
+
+ - name: assert grant added
+ assert:
+ that:
+ - not grant_two_again.changed
+ - grant_two_again.grants|length == 2
+
+ - name: Update the grants with purge_grants set
+ aws_kms:
+ alias: "{{ resource_prefix }}-kms"
+ state: present
+ purge_grants: yes
+ grants:
+ - name: third_grant
+ grantee_principal: "{{ iam_role_result.iam_role.arn }}"
+ retiring_principal: "{{ aws_caller_info.arn }}"
+ constraints:
+ encryption_context_equals:
+ environment: third
+ application: onemoreapp
+ operations:
+ - Decrypt
+ - RetireGrant
+ register: grant_three
+
+ - name: assert grants replaced
+ assert:
+ that:
+ - grant_three.changed
+ - grant_three.grants|length == 1
+
+ - name: update third grant to change encryption context equals to subset
+ aws_kms:
+ alias: "{{ resource_prefix }}-kms"
+ state: present
+ grants:
+ - name: third_grant
+ grantee_principal: "{{ iam_role_result.iam_role.arn }}"
+ retiring_principal: "{{ aws_caller_info.arn }}"
+ constraints:
+ encryption_context_subset:
+ environment: third
+ application: onemoreapp
+ operations:
+ - Decrypt
+ - RetireGrant
+ register: grant_three_update
+
+ - name: assert grants replaced
+ assert:
+ that:
+ - "grant_three_update.changed"
+ - "grant_three_update.grants|length == 1"
+ - "'encryption_context_equals' not in grant_three_update.grants[0].constraints"
+ - "'encryption_context_subset' in grant_three_update.grants[0].constraints"
+
+ - name: tag encryption key
+ aws_kms:
+ alias: "{{ resource_prefix }}-kms"
+ state: present
+ tags:
+ tag_one: tag_one
+ tag_two: tag_two
+ register: tag_kms
+
+ - name: assert tags added and grants remain in place
+ assert:
+ that:
+ - "tag_kms.changed"
+ - "tag_kms.grants|length == 1"
+ - "'tag_one' in tag_kms.tags"
+ - "'tag_two' in tag_kms.tags"
+
+ - name: add, replace, remove tags
+ aws_kms:
+ alias: "{{ resource_prefix }}-kms"
+ state: present
+ purge_tags: yes
+ tags:
+ tag_two: tag_two_updated
+ tag_three: tag_three
+ register: tag_kms_update
+
+ - name: assert tags correctly changed
+ assert:
+ that:
+ - "tag_kms_update.changed"
+ - "'tag_one' not in tag_kms_update.tags"
+ - "'tag_two' in tag_kms_update.tags"
+ - "tag_kms_update.tags.tag_two == 'tag_two_updated'"
+ - "'tag_three' in tag_kms_update.tags"
+
+ - name: make no real tag change
+ aws_kms:
+ alias: "{{ resource_prefix }}-kms"
+ state: present
+ register: tag_kms_no_update
+
+ - name: assert no change to tags
+ assert:
+ that:
+ - "not tag_kms_no_update.changed"
+ - "'tag_one' not in tag_kms_no_update.tags"
+ - "'tag_two' in tag_kms_no_update.tags"
+ - "tag_kms_no_update.tags.tag_two == 'tag_two_updated'"
+ - "'tag_three' in tag_kms_no_update.tags"
+
+ - name: update the key's description and disable it
+ aws_kms:
+ alias: "{{ resource_prefix }}-kms"
+ state: present
+ description: test key for testing
+ enabled: no
+ register: update_key
+
+ - name: assert that state is enabled
+ assert:
+ that:
+ - update_key.description == "test key for testing"
+ - update_key.key_state == "Disabled"
+ - update_key.changed
+
+ - name: update policy to remove access to key rotation status
+ aws_kms:
+ alias: "alias/{{ resource_prefix }}-kms"
+ policy: "{{ lookup('template', 'console-policy-no-key-rotation.j2') | to_json }}"
+
+ - name: find facts about the key without key rotation status
+ aws_kms_info:
+ filters:
+ alias: "{{ resource_prefix }}-kms"
+ register: update_key
+
+ - name: assert that key rotation status is set to None
+ assert:
+ that:
+ - update_key.enable_key_rotation is undefined
+
+ - name: delete the key
+ aws_kms:
+ alias: "{{ resource_prefix }}-kms"
+ state: absent
+ register: delete_kms
+
+ - name: assert that state is pending deletion
+ assert:
+ that:
+ - delete_kms.key_state == "PendingDeletion"
+ - delete_kms.changed
+
+ - name: re-delete the key
+ aws_kms:
+ alias: "{{ resource_prefix }}-kms"
+ state: absent
+ register: delete_kms
+
+ - name: assert that state is pending deletion
+ assert:
+ that:
+ - delete_kms.key_state == "PendingDeletion"
+ - delete_kms is not changed
+
+ - name: undelete and enable the key
+ aws_kms:
+ alias: "{{ resource_prefix }}-kms"
+ state: present
+ enabled: yes
+ register: undelete_kms
+
+ - name: assert that state is enabled
+ assert:
+ that:
+ - undelete_kms.key_state == "Enabled"
+ - undelete_kms.changed
+
+ - name: delete a non-existant key
+ aws_kms:
+ key_id: '00000000-0000-0000-0000-000000000000'
+ state: absent
+ register: delete_kms
+
+ - name: assert that state is unchanged
+ assert:
+ that:
+ - delete_kms is not changed
+
+ always:
+ # ============================================================
+ # CLEAN-UP
+ - name: finish off by deleting key
+ aws_kms:
+ state: absent
+ alias: "{{ resource_prefix }}-kms"
+ register: destroy_result
+
+ - name: remove the IAM role
+ iam_role:
+ name: "{{ resource_prefix }}-kms-role"
+ state: absent
+ register: iam_role_result
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_kms/templates/console-policy-no-key-rotation.j2 b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_kms/templates/console-policy-no-key-rotation.j2
new file mode 100644
index 00000000..0e019d20
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_kms/templates/console-policy-no-key-rotation.j2
@@ -0,0 +1,81 @@
+{
+ "Id": "key-consolepolicy-3",
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "Enable IAM User Permissions",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "arn:aws:iam::{{ aws_caller_info.account }}:root"
+ },
+ "Action": "kms:*",
+ "Resource": "*"
+ },
+ {
+ "Sid": "Allow access for Key Administrators",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "{{ aws_caller_info.arn }}"
+ },
+ "Action": [
+ "kms:Create*",
+ "kms:Describe*",
+ "kms:Enable*",
+ "kms:List*",
+ "kms:Put*",
+ "kms:Update*",
+ "kms:Revoke*",
+ "kms:Disable*",
+ "kms:Get*",
+ "kms:Delete*",
+ "kms:TagResource",
+ "kms:UntagResource",
+ "kms:ScheduleKeyDeletion",
+ "kms:CancelKeyDeletion"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "Allow use of the key",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "{{ aws_caller_info.arn }}"
+ },
+ "Action": [
+ "kms:Encrypt",
+ "kms:Decrypt",
+ "kms:ReEncrypt*",
+ "kms:GenerateDataKey*",
+ "kms:DescribeKey"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "Allow attachment of persistent resources",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "{{ aws_caller_info.arn }}"
+ },
+ "Action": [
+ "kms:CreateGrant",
+ "kms:ListGrants",
+ "kms:RevokeGrant"
+ ],
+ "Resource": "*",
+ "Condition": {
+ "Bool": {
+ "kms:GrantIsForAWSResource": "true"
+ }
+ }
+ },
+ {
+ "Sid": "Disable access to key rotation status",
+ "Effect": "Deny",
+ "Principal": {
+ "AWS": "{{ aws_caller_info.arn }}"
+ },
+ "Action": "kms:GetKeyRotationStatus",
+ "Resource": "*"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_kms/templates/console-policy.j2 b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_kms/templates/console-policy.j2
new file mode 100644
index 00000000..4b60ba58
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_kms/templates/console-policy.j2
@@ -0,0 +1,72 @@
+{
+ "Id": "key-consolepolicy-3",
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "Enable IAM User Permissions",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "arn:aws:iam::{{ aws_caller_info.account }}:root"
+ },
+ "Action": "kms:*",
+ "Resource": "*"
+ },
+ {
+ "Sid": "Allow access for Key Administrators",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "{{ aws_caller_info.arn }}"
+ },
+ "Action": [
+ "kms:Create*",
+ "kms:Describe*",
+ "kms:Enable*",
+ "kms:List*",
+ "kms:Put*",
+ "kms:Update*",
+ "kms:Revoke*",
+ "kms:Disable*",
+ "kms:Get*",
+ "kms:Delete*",
+ "kms:TagResource",
+ "kms:UntagResource",
+ "kms:ScheduleKeyDeletion",
+ "kms:CancelKeyDeletion"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "Allow use of the key",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "{{ aws_caller_info.arn }}"
+ },
+ "Action": [
+ "kms:Encrypt",
+ "kms:Decrypt",
+ "kms:ReEncrypt*",
+ "kms:GenerateDataKey*",
+ "kms:DescribeKey"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "Allow attachment of persistent resources",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "{{ aws_caller_info.arn }}"
+ },
+ "Action": [
+ "kms:CreateGrant",
+ "kms:ListGrants",
+ "kms:RevokeGrant"
+ ],
+ "Resource": "*",
+ "Condition": {
+ "Bool": {
+ "kms:GrantIsForAWSResource": "true"
+ }
+ }
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_region_info/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_region_info/aliases
new file mode 100644
index 00000000..72a9fb4f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_region_info/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group4
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_region_info/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_region_info/main.yml
new file mode 100644
index 00000000..abffda91
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_region_info/main.yml
@@ -0,0 +1,5 @@
+- hosts: localhost
+ connection: local
+ environment: "{{ ansible_test.environment }}"
+ tasks:
+ - include_tasks: 'tasks/tests.yml'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_region_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_region_info/meta/main.yml
new file mode 100644
index 00000000..1810d4be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_region_info/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_region_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_region_info/tasks/main.yml
new file mode 100644
index 00000000..3edbbade
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_region_info/tasks/main.yml
@@ -0,0 +1,107 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key | default(omit) }}'
+ aws_secret_key: '{{ aws_secret_key | default(omit) }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region | default(omit) }}'
+
+ block:
+ - name: 'List available Regions'
+ aws_region_info:
+ register: regions
+
+ - name: check task return attributes
+ vars:
+ first_region: '{{ regions.regions[0] }}'
+ assert:
+ that:
+ - regions is successful
+ - regions is not changed
+ - '"regions" in regions'
+ - '"endpoint" in first_region'
+ - '"opt_in_status" in first_region'
+ - '"region_name" in first_region'
+
+ - name: 'List available Regions - check_mode'
+ aws_region_info:
+ register: check_regions
+
+ - name: check task return attributes - check_mode
+ vars:
+ first_region: '{{ check_regions.regions[0] }}'
+ assert:
+ that:
+ - check_regions is successful
+ - check_regions is not changed
+ - '"regions" in check_regions'
+ - '"endpoint" in first_region'
+ - '"opt_in_status" in first_region'
+ - '"region_name" in first_region'
+
+ - name: 'Filter available Regions using - ("region-name")'
+ aws_region_info:
+ filters:
+ region-name: 'us-west-1'
+ register: us_west_1
+
+ - name: check task return attributes - filtering using -
+ vars:
+ first_region: '{{ us_west_1.regions[0] }}'
+ assert:
+ that:
+ - us_west_1 is successful
+ - us_west_1 is not changed
+ - '"regions" in us_west_1'
+ - us_west_1.regions | length == 1
+ - '"endpoint" in first_region'
+ - first_region.endpoint == 'ec2.us-west-1.amazonaws.com'
+ - '"opt_in_status" in first_region'
+ - first_region.opt_in_status == 'opt-in-not-required'
+ - '"region_name" in first_region'
+ - first_region.region_name == 'us-west-1'
+
+ - name: 'Filter available Regions using _ ("region_name")'
+ aws_region_info:
+ filters:
+ region_name: 'us-west-2'
+ register: us_west_2
+
+ - name: check task return attributes - filtering using _
+ vars:
+ first_region: '{{ us_west_2.regions[0] }}'
+ assert:
+ that:
+ - us_west_2 is successful
+ - us_west_2 is not changed
+ - '"regions" in us_west_2'
+ - us_west_2.regions | length == 1
+ - '"endpoint" in first_region'
+ - first_region.endpoint == 'ec2.us-west-2.amazonaws.com'
+ - '"opt_in_status" in first_region'
+ - first_region.opt_in_status == 'opt-in-not-required'
+ - '"region_name" in first_region'
+ - first_region.region_name == 'us-west-2'
+
+ - name: 'Filter available Regions using _ and - to check precedence'
+ aws_region_info:
+ filters:
+ region-name: 'eu-west-1'
+ region_name: 'eu-central-1'
+ register: regions_prededence
+
+ - name: check task return attributes - precedence
+ vars:
+ first_region: '{{ regions_prededence.regions[0] }}'
+ assert:
+ that:
+ - regions_prededence is successful
+ - regions_prededence is not changed
+ - '"regions" in regions_prededence'
+ - regions_prededence.regions | length == 1
+ - '"endpoint" in first_region'
+ - first_region.endpoint == 'ec2.eu-central-1.amazonaws.com'
+ - '"opt_in_status" in first_region'
+ - first_region.opt_in_status == 'opt-in-not-required'
+ - '"region_name" in first_region'
+ - first_region.region_name == 'eu-central-1'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_secret/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_secret/aliases
new file mode 100644
index 00000000..56927195
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_secret/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_secret/defaults/main.yaml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_secret/defaults/main.yaml
new file mode 100644
index 00000000..cfdab552
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_secret/defaults/main.yaml
@@ -0,0 +1,5 @@
+---
+super_secret_string: 'Test12345'
+secret_manager_role: "{{ resource_prefix }}-secrets-manager"
+secret_name: "{{ resource_prefix }}-test-secret-string"
+lambda_name: "{{ resource_prefix }}-hello-world"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_secret/files/hello_world.zip b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_secret/files/hello_world.zip
new file mode 100644
index 00000000..8fd9e058
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_secret/files/hello_world.zip
Binary files differ
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_secret/files/secretsmanager-trust-policy.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_secret/files/secretsmanager-trust-policy.json
new file mode 100644
index 00000000..c53e3096
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_secret/files/secretsmanager-trust-policy.json
@@ -0,0 +1,19 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "lambda.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ },
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "secretsmanager.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_secret/tasks/main.yaml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_secret/tasks/main.yaml
new file mode 100644
index 00000000..483be475
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_secret/tasks/main.yaml
@@ -0,0 +1,253 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ collections:
+ - amazon.aws
+
+ block:
+ - name: retrieve caller facts
+ aws_caller_info:
+ register: test_caller_facts
+
+ - name: ensure IAM role exists
+ iam_role:
+ name: "{{ secret_manager_role }}"
+ assume_role_policy_document: "{{ lookup('file','secretsmanager-trust-policy.json') }}"
+ state: present
+ create_instance_profile: no
+ managed_policy:
+ - 'arn:aws:iam::aws:policy/SecretsManagerReadWrite'
+ register: iam_role
+ ignore_errors: yes
+
+ - name: wait 10 seconds for role to become available
+ pause:
+ seconds: 10
+ when: iam_role.changed
+
+ # CI does not remove the role and comparing policies has a bug on Python3; fall back to use iam_role_info
+ - name: get IAM role
+ iam_role_info:
+ name: "{{ secret_manager_role }}"
+ register: iam_role_info
+
+ - name: set iam_role_output
+ set_fact:
+ iam_role_output: "{{ iam_role_info.iam_roles[0] }}"
+ when: iam_role_info is defined
+
+ - name: create a temporary directory
+ tempfile:
+ state: directory
+ register: tmp
+
+ - name: move lambda into place for upload
+ copy:
+ src: "files/hello_world.zip"
+ dest: "{{ tmp.path }}/hello_world.zip"
+
+ - name: dummy lambda for testing
+ lambda:
+ name: "{{ lambda_name }}"
+ state: present
+ zip_file: "{{ tmp.path }}/hello_world.zip"
+ runtime: 'python2.7'
+ role: "{{ iam_role_output.arn }}"
+ handler: 'hello_world.lambda_handler'
+ register: lambda_output
+ until: not lambda_output.failed
+ retries: 10
+ delay: 5
+
+ - debug:
+ var: lambda_output
+
+ # ============================================================
+ # Module parameter testing
+ # ============================================================
+ - name: test with no parameters
+ aws_secret:
+ register: result
+ ignore_errors: true
+ check_mode: true
+
+ - name: assert failure when called with no parameters
+ assert:
+ that:
+ - result.failed
+ - 'result.msg.startswith("missing required arguments:")'
+
+ # ============================================================
+ # Creation/Deletion testing
+ # ============================================================
+ - name: add secret to AWS Secrets Manager
+ aws_secret:
+ name: "{{ secret_name }}"
+ state: present
+ secret_type: 'string'
+ secret: "{{ super_secret_string }}"
+ register: result
+
+ - name: assert correct keys are returned
+ assert:
+ that:
+ - result.changed
+ - result.arn is not none
+ - result.name is not none
+ - result.tags is not none
+ - result.version_ids_to_stages is not none
+
+ - name: no changes to secret
+ aws_secret:
+ name: "{{ secret_name }}"
+ state: present
+ secret_type: 'string'
+ secret: "{{ super_secret_string }}"
+ register: result
+
+ - name: assert correct keys are returned
+ assert:
+ that:
+ - not result.changed
+ - result.arn is not none
+
+ - name: make change to secret
+ aws_secret:
+ name: "{{ secret_name }}"
+ description: 'this is a change to this secret'
+ state: present
+ secret_type: 'string'
+ secret: "{{ super_secret_string }}"
+ register: result
+
+ - debug:
+ var: result
+
+ - name: assert correct keys are returned
+ assert:
+ that:
+ - result.changed
+ - result.arn is not none
+ - result.name is not none
+ - result.tags is not none
+ - result.version_ids_to_stages is not none
+
+ - name: add tags to secret
+ aws_secret:
+ name: "{{ secret_name }}"
+ description: 'this is a change to this secret'
+ state: present
+ secret_type: 'string'
+ secret: "{{ super_secret_string }}"
+ tags:
+ Foo: 'Bar'
+ Test: 'Tag'
+ register: result
+
+ - name: assert correct keys are returned
+ assert:
+ that:
+ - result.changed
+
+ - name: remove tags from secret
+ aws_secret:
+ name: "{{ secret_name }}"
+ description: 'this is a change to this secret'
+ state: present
+ secret_type: 'string'
+ secret: "{{ super_secret_string }}"
+ register: result
+
+ - name: assert correct keys are returned
+ assert:
+ that:
+ - result.changed
+
+ - name: lambda policy for secrets manager
+ lambda_policy:
+ state: present
+ function_name: "{{ lambda_name }}"
+ statement_id: LambdaSecretsManagerTestPolicy
+ action: 'lambda:InvokeFunction'
+ principal: "secretsmanager.amazonaws.com"
+
+ - name: add rotation lambda to secret
+ aws_secret:
+ name: "{{ secret_name }}"
+ description: 'this is a change to this secret'
+ state: present
+ secret_type: 'string'
+ secret: "{{ super_secret_string }}"
+ rotation_lambda: "arn:aws:lambda:{{ aws_region }}:{{ test_caller_facts.account }}:function:{{ lambda_name }}"
+ register: result
+ retries: 100
+ delay: 5
+ until: not result.failed
+
+ - name: assert correct keys are returned
+ assert:
+ that:
+ - result.changed
+
+ - name: remove rotation lambda from secret
+ aws_secret:
+ name: "{{ secret_name }}"
+ description: 'this is a change to this secret'
+ state: present
+ secret_type: 'string'
+ secret: "{{ super_secret_string }}"
+ register: result
+
+ - name: assert correct keys are returned
+ assert:
+ that:
+ - result.changed
+
+ always:
+ - name: remove secret
+ aws_secret:
+ name: "{{ secret_name }}"
+ state: absent
+ secret_type: 'string'
+ secret: "{{ super_secret_string }}"
+ recovery_window: 0
+ ignore_errors: yes
+
+ - name: remove lambda policy
+ lambda_policy:
+ state: absent
+ function_name: "{{ lambda_name }}"
+ statement_id: lambda-secretsmanager-test-policy
+ action: lambda:InvokeFunction
+ principal: secretsmanager.amazonaws.com
+ ignore_errors: yes
+
+ - name: remove dummy lambda
+ lambda:
+ name: "{{ lambda_name }}"
+ state: absent
+ zip_file: "{{ tmp.path }}/hello_world.zip"
+ runtime: 'python2.7'
+ role: "{{ secret_manager_role }}"
+ handler: 'hello_world.lambda_handler'
+ ignore_errors: yes
+
+ # CI does not remove the IAM role
+ - name: remove IAM role
+ iam_role:
+ name: "{{ secret_manager_role }}"
+ assume_role_policy_document: "{{ lookup('file','secretsmanager-trust-policy.json') }}"
+ state: absent
+ create_instance_profile: no
+ managed_policy:
+ - 'arn:aws:iam::aws:policy/SecretsManagerReadWrite'
+ ignore_errors: yes
+
+ - name: remove temporary dir
+ file:
+ path: "{{ tmp.path }}"
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity/aliases
new file mode 100644
index 00000000..157ce0c9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group3
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity/defaults/main.yaml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity/defaults/main.yaml
new file mode 100644
index 00000000..f36d0179
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity/defaults/main.yaml
@@ -0,0 +1,4 @@
+---
+email_identity: "{{ resource_prefix }}@example.com"
+domain_identity: "{{ resource_prefix }}.example.com"
+notification_queue_name: "{{ resource_prefix }}-notification-queue"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity/meta/main.yaml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity/meta/main.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity/meta/main.yaml
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity/tasks/assert_defaults.yaml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity/tasks/assert_defaults.yaml
new file mode 100644
index 00000000..0f74d2f0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity/tasks/assert_defaults.yaml
@@ -0,0 +1,23 @@
+- name: assert returned identity
+ assert:
+ that:
+ - result.identity == identity
+- name: assert returned identity_arn
+ assert:
+ that:
+ - "result.identity_arn|regex_search('^arn:aws:ses:' + ec2_region + ':[0-9]*:identity/' + identity + '$')"
+ msg: "'{{ result.identity_arn}}' doesn't match regex '^arn:aws:ses:{{ ec2_region }}:[0-9]*:identity/{{ identity }}'"
+- name: assert verification_attributes.verification_status == 'Pending'
+ assert:
+ that:
+ - result.verification_attributes.verification_status == 'Pending'
+- name: assert notification defaults
+ assert:
+ that:
+ - result.notification_attributes.forwarding_enabled == True
+ - result.notification_attributes.headers_in_bounce_notifications_enabled == False
+ - result.notification_attributes.headers_in_complaint_notifications_enabled == False
+ - result.notification_attributes.headers_in_delivery_notifications_enabled == False
+ - "'bounce_topic' not in result.notification_attributes"
+ - "'complaint_topic' not in result.notification_attributes"
+ - "'delivery_topic' not in result.notification_attributes"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity/tasks/main.yaml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity/tasks/main.yaml
new file mode 100644
index 00000000..1be6c954
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity/tasks/main.yaml
@@ -0,0 +1,648 @@
+---
+# ============================================================
+- name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+- name: test register email identity
+ block:
+ - name: register email identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: present
+ <<: *aws_connection_info
+ register: result
+ - name: assert changed is True
+ assert:
+ that:
+ - result.changed == True
+ - import_tasks: assert_defaults.yaml
+ vars:
+ identity: "{{ email_identity }}"
+ always:
+ - name: cleanup email identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: absent
+ <<: *aws_connection_info
+# ============================================================
+- name: test register domain identity
+ block:
+ - name: register domain identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: present
+ <<: *aws_connection_info
+ register: result
+ - name: assert changed is True
+ assert:
+ that:
+ - result.changed == True
+ - import_tasks: assert_defaults.yaml
+ vars:
+ identity: "{{ domain_identity }}"
+ - name: assert verification_attributes.verification_token is defined
+ assert:
+ that:
+ - result.verification_attributes.verification_token
+ always:
+ - name: cleanup domain identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: absent
+ <<: *aws_connection_info
+# ============================================================
+- name: test email_identity unchanged when already existing
+ block:
+ - name: register identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: present
+ <<: *aws_connection_info
+ - name: duplicate register identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: present
+ <<: *aws_connection_info
+ register: result
+ - name: assert changed is False
+ assert:
+ that:
+ - result.changed == False
+ - import_tasks: assert_defaults.yaml
+ vars:
+ identity: "{{ email_identity }}"
+ always:
+ - name: cleanup identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: absent
+ <<: *aws_connection_info
+# ============================================================
+- name: test domain_identity unchanged when already existing
+ block:
+ - name: register identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: present
+ <<: *aws_connection_info
+ - name: duplicate register identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: present
+ <<: *aws_connection_info
+ register: result
+ - name: assert changed is False
+ assert:
+ that:
+ - result.changed == False
+ - import_tasks: assert_defaults.yaml
+ vars:
+ identity: "{{ domain_identity }}"
+ always:
+ - name: cleanup identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: absent
+ <<: *aws_connection_info
+# ============================================================
+# Test for https://github.com/ansible/ansible/issues/51531
+# because aws region is explicitly used rather than just to
+# obtain a connection, make sure this still works when
+# region comes from an environment rather than a parameter.
+- name: test register identity without explicit region
+ block:
+ - name: register email identity without explicit region
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: present
+ <<: *aws_connection_info
+ region: "{{ omit }}"
+ register: result
+ environment:
+ AWS_DEFAULT_REGION: "{{ aws_region }}"
+ - name: assert changed is True
+ assert:
+ that:
+ - result.changed == True
+ - import_tasks: assert_defaults.yaml
+ vars:
+ identity: "{{ email_identity }}"
+ always:
+ - name: cleanup email identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: absent
+ <<: *aws_connection_info
+# ============================================================
+- name: test register email identity check mode
+ block:
+ - name: register email identity check mode
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: present
+ <<: *aws_connection_info
+ register: result
+ check_mode: True
+
+ - name: assert changed is True
+ assert:
+ that:
+ - result.changed == True
+
+ - import_tasks: assert_defaults.yaml
+ vars:
+ identity: "{{ email_identity }}"
+
+ always:
+ - name: cleanup email identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: absent
+ <<: *aws_connection_info
+ register: result
+
+ - name: assert nothing to clean up since check mode
+ assert:
+ that:
+ - result.changed == False
+# ============================================================
+- name: test register domain identity check mode
+ block:
+ - name: register domain identity check mode
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: present
+ <<: *aws_connection_info
+ register: result
+ check_mode: True
+
+ - name: assert changed is True
+ assert:
+ that:
+ - result.changed == True
+
+ - import_tasks: assert_defaults.yaml
+ vars:
+ identity: "{{ domain_identity }}"
+
+ always:
+ - name: cleanup domain identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: absent
+ <<: *aws_connection_info
+ register: result
+
+ - name: assert nothing to clean up since check mode
+ assert:
+ that:
+ - result.changed == False
+# ============================================================
+- name: remove non-existent email identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: absent
+ <<: *aws_connection_info
+ register: result
+- name: assert changed is False
+ assert:
+ that:
+ - result.changed == False
+# ============================================================
+- name: remove non-existent domain identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: absent
+ <<: *aws_connection_info
+ register: result
+- name: assert changed is False
+ assert:
+ that:
+ - result.changed == False
+# ============================================================
+- name: test remove email identity check mode
+ block:
+ - name: register email identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: present
+ <<: *aws_connection_info
+ register: result
+
+ - name: remove email identity check mode
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: absent
+ <<: *aws_connection_info
+ register: result
+ check_mode: True
+
+ - name: assert changed is True
+ assert:
+ that:
+ - result.changed == True
+ always:
+ - name: cleanup email identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: absent
+ <<: *aws_connection_info
+ register: result
+
+ - name: assert something to clean up since remove was check mode
+ assert:
+ that:
+ - result.changed == True
+# ============================================================
+- name: test remove domain identity check mode
+ block:
+ - name: register domain identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: present
+ <<: *aws_connection_info
+ register: result
+
+ - name: remove domain identity check mode
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: absent
+ <<: *aws_connection_info
+ register: result
+ check_mode: True
+
+ - name: assert changed is True
+ assert:
+ that:
+ - result.changed == True
+ always:
+ - name: cleanup domain identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: absent
+ <<: *aws_connection_info
+ register: result
+
+ - name: assert something to clean up since remove was check mode
+ assert:
+ that:
+ - result.changed == True
+# ============================================================
+- name: test set notification queues
+ block:
+ - name: test topic
+ sns_topic:
+ name: "{{ notification_queue_name }}-{{ item }}"
+ state: present
+ <<: *aws_connection_info
+ register: topic_info
+ with_items:
+ - bounce
+ - complaint
+ - delivery
+ - name: register email identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: present
+ bounce_notifications:
+ topic: "{{ topic_info.results[0].sns_arn }}"
+ complaint_notifications:
+ topic: "{{ topic_info.results[1].sns_arn }}"
+ delivery_notifications:
+ topic: "{{ topic_info.results[2].sns_arn }}"
+ <<: *aws_connection_info
+ register: result
+ - name: assert notification settings
+ assert:
+ that:
+ - result.notification_attributes.bounce_topic == topic_info.results[0].sns_arn
+ - result.notification_attributes.complaint_topic == topic_info.results[1].sns_arn
+ - result.notification_attributes.delivery_topic == topic_info.results[2].sns_arn
+ - name: assert notification headers unchanged
+ assert:
+ that:
+ - result.notification_attributes.headers_in_bounce_notifications_enabled == False
+ - result.notification_attributes.headers_in_complaint_notifications_enabled == False
+ - result.notification_attributes.headers_in_delivery_notifications_enabled == False
+ always:
+ - name: cleanup topics
+ sns_topic:
+ name: "{{ notification_queue_name }}-{{ item }}"
+ state: absent
+ <<: *aws_connection_info
+ with_items:
+ - bounce
+ - complaint
+ - delivery
+ - name: cleanup email identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: absent
+ <<: *aws_connection_info
+# ============================================================
+- name: test change notification queues after create
+ block:
+ - name: test topic
+ sns_topic:
+ name: "{{ notification_queue_name }}-{{ item }}"
+ state: present
+ <<: *aws_connection_info
+ register: topic_info
+ with_items:
+ - bounce
+ - complaint
+ - delivery
+ - name: register email identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: present
+ <<: *aws_connection_info
+ - name: set notification topics
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: present
+ bounce_notifications:
+ topic: "{{ topic_info.results[0].sns_arn }}"
+ complaint_notifications:
+ topic: "{{ topic_info.results[1].sns_arn }}"
+ delivery_notifications:
+ topic: "{{ topic_info.results[2].sns_arn }}"
+ <<: *aws_connection_info
+ register: result
+ - name: assert changed is True
+ assert:
+ that:
+ - result.changed == True
+ - name: assert notification settings
+ assert:
+ that:
+ - result.notification_attributes.bounce_topic == topic_info.results[0].sns_arn
+ - result.notification_attributes.complaint_topic == topic_info.results[1].sns_arn
+ - result.notification_attributes.delivery_topic == topic_info.results[2].sns_arn
+ always:
+ - name: cleanup topics
+ sns_topic:
+ name: "{{ notification_queue_name }}-{{ item }}"
+ state: absent
+ <<: *aws_connection_info
+ with_items:
+ - bounce
+ - complaint
+ - delivery
+ - name: cleanup email identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: absent
+ <<: *aws_connection_info
+# ============================================================
+- name: test change notification settings check mode
+ block:
+ - name: test topic
+ sns_topic:
+ name: "{{ notification_queue_name }}-{{ item }}"
+ state: present
+ <<: *aws_connection_info
+ register: topic_info
+ with_items:
+ - bounce
+ - complaint
+ - delivery
+
+ - name: register email identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: present
+ <<: *aws_connection_info
+
+ - name: set notification settings check mode
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: present
+ bounce_notifications:
+ topic: "{{ topic_info.results[0].sns_arn }}"
+ include_headers: Yes
+ complaint_notifications:
+ topic: "{{ topic_info.results[1].sns_arn }}"
+ include_headers: Yes
+ delivery_notifications:
+ topic: "{{ topic_info.results[2].sns_arn }}"
+ include_headers: Yes
+ feedback_forwarding: No
+ <<: *aws_connection_info
+ register: result
+ check_mode: True
+
+ - name: assert changed is True
+ assert:
+ that:
+ - result.changed == True
+
+ - name: assert notification settings
+ assert:
+ that:
+ - result.notification_attributes.bounce_topic == topic_info.results[0].sns_arn
+ - result.notification_attributes.headers_in_bounce_notifications_enabled == True
+ - result.notification_attributes.delivery_topic == topic_info.results[2].sns_arn
+ - result.notification_attributes.headers_in_delivery_notifications_enabled == True
+ - result.notification_attributes.complaint_topic == topic_info.results[1].sns_arn
+ - result.notification_attributes.headers_in_complaint_notifications_enabled == True
+ - result.notification_attributes.forwarding_enabled == False
+
+ - name: re-register base email identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: present
+ <<: *aws_connection_info
+ register: result
+
+ - name: assert no change since notifications were check mode
+ assert:
+ that:
+ - result.changed == False
+ - "'bounce_topic' not in result.notification_attributes"
+ - result.notification_attributes.headers_in_bounce_notifications_enabled == False
+ - "'delivery_topic' not in result.notification_attributes"
+ - result.notification_attributes.headers_in_delivery_notifications_enabled == False
+ - "'complaint_topic' not in result.notification_attributes"
+ - result.notification_attributes.headers_in_complaint_notifications_enabled == False
+ - result.notification_attributes.forwarding_enabled == True
+
+ always:
+ - name: cleanup topics
+ sns_topic:
+ name: "{{ notification_queue_name }}-{{ item }}"
+ state: absent
+ <<: *aws_connection_info
+ with_items:
+ - bounce
+ - complaint
+ - delivery
+
+ - name: cleanup email identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: absent
+ <<: *aws_connection_info
+# ============================================================
+- name: test include headers on notification queues
+ block:
+ - name: register email identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: present
+ bounce_notifications:
+ include_headers: Yes
+ complaint_notifications:
+ include_headers: Yes
+ delivery_notifications:
+ include_headers: Yes
+ <<: *aws_connection_info
+ register: result
+ - name: assert notification headers enabled
+ assert:
+ that:
+ - result.notification_attributes.headers_in_bounce_notifications_enabled == True
+ - result.notification_attributes.headers_in_complaint_notifications_enabled == True
+ - result.notification_attributes.headers_in_delivery_notifications_enabled == True
+ always:
+ - name: cleanup email identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: absent
+ <<: *aws_connection_info
+# ============================================================
+- name: test disable feedback forwarding
+ block:
+ - name: test topic
+ sns_topic:
+ name: "{{ notification_queue_name }}-{{ item }}"
+ state: present
+ <<: *aws_connection_info
+ register: topic_info
+ with_items:
+ - bounce
+ - complaint
+ - name: register email identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: present
+ bounce_notifications:
+ topic: "{{ topic_info.results[0].sns_arn }}"
+ complaint_notifications:
+ topic: "{{ topic_info.results[1].sns_arn }}"
+ feedback_forwarding: No
+ <<: *aws_connection_info
+ register: result
+ - name: assert feedback_forwarding == False
+ assert:
+ that:
+ - result.notification_attributes.forwarding_enabled == False
+ always:
+ - name: cleanup topics
+ sns_topic:
+ name: "{{ notification_queue_name }}-{{ item }}"
+ state: absent
+ <<: *aws_connection_info
+ with_items:
+ - bounce
+ - complaint
+ - name: cleanup email identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: absent
+ <<: *aws_connection_info
+# ============================================================
+- name: test disable feedback forwarding fails if no topics
+ block:
+ - name: register identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: present
+ feedback_forwarding: No
+ <<: *aws_connection_info
+ register: result
+ failed_when: result.failed == False
+ - name: assert error message starts with "Invalid Parameter Value"
+ assert:
+ that:
+ - '"Invalid Parameter Value" in result.msg'
+ always:
+ - name: cleanup identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: absent
+ <<: *aws_connection_info
+# ============================================================
+- name: test disable feedback forwarding fails if no complaint topic
+ block:
+ - name: test topic
+ sns_topic:
+ name: "{{ notification_queue_name }}-bounce"
+ state: present
+ <<: *aws_connection_info
+ register: topic_info
+ - name: register email identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: present
+ bounce_notifications:
+ topic: "{{ topic_info.sns_arn }}"
+ feedback_forwarding: No
+ <<: *aws_connection_info
+ register: result
+ failed_when: result.failed == False
+ - name: assert error message starts with "Invalid Parameter Value"
+ assert:
+ that:
+ - '"Invalid Parameter Value" in result.msg'
+ always:
+ - name: cleanup topics
+ sns_topic:
+ name: "{{ notification_queue_name }}-bounce"
+ state: absent
+ <<: *aws_connection_info
+ - name: cleanup identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: absent
+ <<: *aws_connection_info
+# ============================================================
+- name: test disable feedback forwarding fails if no bounce topic
+ block:
+ - name: test topic
+ sns_topic:
+ name: "{{ notification_queue_name }}-complaint"
+ state: present
+ <<: *aws_connection_info
+ register: topic_info
+ - name: register email identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: present
+ complaint_notifications:
+ topic: "{{ topic_info.sns_arn }}"
+ feedback_forwarding: No
+ <<: *aws_connection_info
+ register: result
+ failed_when: result.failed == False
+ - name: assert error message starts with "Invalid Parameter Value"
+ assert:
+ that:
+ - '"Invalid Parameter Value" in result.msg'
+ always:
+ - name: cleanup topics
+ sns_topic:
+ name: "{{ notification_queue_name }}-complaint"
+ state: absent
+ <<: *aws_connection_info
+ - name: cleanup identity
+ aws_ses_identity:
+ identity: "{{ email_identity }}"
+ state: absent
+ <<: *aws_connection_info
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity_policy/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity_policy/aliases
new file mode 100644
index 00000000..a112c3d1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity_policy/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group1
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity_policy/defaults/main.yaml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity_policy/defaults/main.yaml
new file mode 100644
index 00000000..e77f32d0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity_policy/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+domain_identity: "{{ resource_prefix }}.example.com"
+policy_name: "TestPolicy"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity_policy/tasks/main.yaml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity_policy/tasks/main.yaml
new file mode 100644
index 00000000..ee10c0b8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity_policy/tasks/main.yaml
@@ -0,0 +1,334 @@
+---
+# ============================================================
+- name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+# ============================================================
+- name: test add identity policy
+ block:
+ - name: register identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: present
+ <<: *aws_connection_info
+ register: identity_info
+
+ - name: register identity policy
+ aws_ses_identity_policy:
+ identity: "{{ domain_identity }}"
+ policy_name: "{{ policy_name }}"
+ policy: "{{ lookup('template', 'policy.json.j2') }}"
+ state: present
+ <<: *aws_connection_info
+ register: result
+
+ - name: assert result.changed == True
+ assert:
+ that:
+ - result.changed == True
+
+ - name: assert result.policies contains only policy
+ assert:
+ that:
+ - result.policies|length == 1
+ - result.policies|select('equalto', policy_name)|list|length == 1
+
+ always:
+ - name: clean-up identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: absent
+ <<: *aws_connection_info
+# ============================================================
+- name: test add duplicate identity policy
+ block:
+ - name: register identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: present
+ <<: *aws_connection_info
+ register: identity_info
+
+ - name: register identity policy
+ aws_ses_identity_policy:
+ identity: "{{ domain_identity }}"
+ policy_name: "{{ policy_name }}"
+ policy: "{{ lookup('template', 'policy.json.j2') }}"
+ state: present
+ <<: *aws_connection_info
+
+ - name: register duplicate identity policy
+ aws_ses_identity_policy:
+ identity: "{{ domain_identity }}"
+ policy_name: "{{ policy_name }}"
+ policy: "{{ lookup('template', 'policy.json.j2') }}"
+ state: present
+ <<: *aws_connection_info
+ register: result
+
+ - name: assert result.changed == False
+ assert:
+ that:
+ - result.changed == False
+
+ - name: assert result.policies contains only policy
+ assert:
+ that:
+ - result.policies|length == 1
+ - result.policies|select('equalto', policy_name)|list|length == 1
+
+ always:
+ - name: clean-up identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: absent
+ <<: *aws_connection_info
+# ============================================================
+- name: test add identity policy by identity arn
+ block:
+ - name: register identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: present
+ <<: *aws_connection_info
+ register: identity_info
+
+ - name: register identity policy
+ aws_ses_identity_policy:
+ identity: "{{ identity_info.identity_arn }}"
+ policy_name: "{{ policy_name }}"
+ policy: "{{ lookup('template', 'policy.json.j2') }}"
+ state: present
+ <<: *aws_connection_info
+ register: result
+
+ - name: assert result.changed == True
+ assert:
+ that:
+ - result.changed == True
+
+ - name: assert result.policies contains only policy
+ assert:
+ that:
+ - result.policies|length == 1
+ - result.policies|select('equalto', policy_name)|list|length == 1
+
+ always:
+ - name: clean-up identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: absent
+ <<: *aws_connection_info
+# ============================================================
+- name: test add multiple identity policies
+ block:
+ - name: register identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: present
+ <<: *aws_connection_info
+ register: identity_info
+
+ - name: register identity policy
+ aws_ses_identity_policy:
+ identity: "{{ domain_identity }}"
+ policy_name: "{{ policy_name }}-{{ item }}"
+ policy: "{{ lookup('template', 'policy.json.j2') }}"
+ state: present
+ <<: *aws_connection_info
+ with_items:
+ - 1
+ - 2
+ register: result
+
+ - name: assert result.policies contains policies
+ assert:
+ that:
+ - result.results[1].policies|length == 2
+ - result.results[1].policies|select('equalto', policy_name + '-1')|list|length == 1
+ - result.results[1].policies|select('equalto', policy_name + '-2')|list|length == 1
+
+ always:
+ - name: clean-up identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: absent
+ <<: *aws_connection_info
+# ============================================================
+- name: test add inline identity policy
+ block:
+ - name: register identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: present
+ <<: *aws_connection_info
+ register: identity_info
+
+ - name: register identity policy
+ aws_ses_identity_policy:
+ identity: "{{ domain_identity }}"
+ policy_name: "{{ policy_name }}"
+ policy:
+ Id: SampleAuthorizationPolicy
+ Version: "2012-10-17"
+ Statement:
+ - Sid: DenyAll
+ Effect: Deny
+ Resource: "{{ identity_info.identity_arn }}"
+ Principal: "*"
+ Action: "*"
+ state: present
+ <<: *aws_connection_info
+ register: result
+
+ - name: assert result.changed == True
+ assert:
+ that:
+ - result.changed == True
+
+ - name: assert result.policies contains only policy
+ assert:
+ that:
+ - result.policies|length == 1
+ - result.policies|select('equalto', policy_name)|list|length == 1
+
+ - name: register duplicate identity policy
+ aws_ses_identity_policy:
+ identity: "{{ domain_identity }}"
+ policy_name: "{{ policy_name }}"
+ policy:
+ Id: SampleAuthorizationPolicy
+ Version: "2012-10-17"
+ Statement:
+ - Sid: DenyAll
+ Effect: Deny
+ Resource: "{{ identity_info.identity_arn }}"
+ Principal: "*"
+ Action: "*"
+ state: present
+ <<: *aws_connection_info
+ register: result
+
+ - name: assert result.changed == False
+ assert:
+ that:
+ - result.changed == False
+
+ always:
+ - name: clean-up identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: absent
+ <<: *aws_connection_info
+# ============================================================
+- name: test remove identity policy
+ block:
+ - name: register identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: present
+ <<: *aws_connection_info
+ register: identity_info
+
+ - name: register identity policy
+ aws_ses_identity_policy:
+ identity: "{{ domain_identity }}"
+ policy_name: "{{ policy_name }}"
+ policy: "{{ lookup('template', 'policy.json.j2') }}"
+ state: present
+ <<: *aws_connection_info
+
+ - name: delete identity policy
+ aws_ses_identity_policy:
+ identity: "{{ domain_identity }}"
+ policy_name: "{{ policy_name }}"
+ state: absent
+ <<: *aws_connection_info
+ register: result
+
+ - name: assert result.changed == True
+ assert:
+ that:
+ - result.changed == True
+
+ - name: assert result.policies empty
+ assert:
+ that:
+ - result.policies|length == 0
+
+ always:
+ - name: clean-up identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: absent
+ <<: *aws_connection_info
+# ============================================================
+- name: test remove missing identity policy
+ block:
+ - name: register identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: present
+ <<: *aws_connection_info
+ register: identity_info
+
+ - name: delete identity policy
+ aws_ses_identity_policy:
+ identity: "{{ domain_identity }}"
+ policy_name: "{{ policy_name }}"
+ state: absent
+ <<: *aws_connection_info
+ register: result
+
+ - name: assert result.changed == False
+ assert:
+ that:
+ - result.changed == False
+
+ - name: assert result.policies empty
+ assert:
+ that:
+ - result.policies|length == 0
+
+ always:
+ - name: clean-up identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: absent
+ <<: *aws_connection_info
+# ============================================================
+- name: test add identity policy with invalid policy
+ block:
+ - name: register identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: present
+ <<: *aws_connection_info
+ register: identity_info
+
+ - name: register identity policy
+ aws_ses_identity_policy:
+ identity: "{{ domain_identity }}"
+ policy_name: "{{ policy_name }}"
+ policy: '{"noSuchAttribute": 2}'
+ state: present
+ <<: *aws_connection_info
+ register: result
+ failed_when: result.failed == False
+
+ - name: assert error.code == InvalidPolicy
+ assert:
+ that:
+ - result.error.code == 'InvalidPolicy'
+
+ always:
+ - name: clean-up identity
+ aws_ses_identity:
+ identity: "{{ domain_identity }}"
+ state: absent
+ <<: *aws_connection_info
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity_policy/templates/policy.json.j2 b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity_policy/templates/policy.json.j2
new file mode 100644
index 00000000..b198e38f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_identity_policy/templates/policy.json.j2
@@ -0,0 +1,13 @@
+{
+ "Id": "SampleAuthorizationPolicy",
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "DenyAll",
+ "Effect": "Deny",
+ "Resource": "{{ identity_info.identity_arn }}",
+ "Principal": "*",
+ "Action": "*"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/aliases
new file mode 100644
index 00000000..6e3860be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/defaults/main.yaml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/defaults/main.yaml
new file mode 100644
index 00000000..f9fecf7b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/defaults/main.yaml
@@ -0,0 +1,9 @@
+---
+default_rule_set: "{{ resource_prefix }}-default-rule-set"
+second_rule_set: "{{ resource_prefix }}-second-rule-set"
+
+# See comment in obtain-lock.yaml for definitions of these variables
+max_obtain_lock_attempts: 10
+obtain_lock_delay_seconds: 30
+lock_timeout_seconds: 900
+lock_log_group_prefix: "ansible-testing-locks/aws_ses_rule_set"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/tasks/active-rule-set-tests.yaml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/tasks/active-rule-set-tests.yaml
new file mode 100644
index 00000000..99655e85
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/tasks/active-rule-set-tests.yaml
@@ -0,0 +1,349 @@
+---
+# ============================================================
+# These tests all rely on making rule sets active. There can only be
+# a single active rule set so multiple builds must not run these tests
+# in parallel or they will fail intermittently.
+# See the locking block in main.yaml for how this restriction is enforced
+# ============================================================
+
+- name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+# ============================================================
+- name: mark rule set active
+ block:
+ - name: create rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ <<: *aws_connection_info
+ - name: mark rule set active
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ active: True
+ <<: *aws_connection_info
+ register: result
+ - name: assert changed to active
+ assert:
+ that:
+ - result.changed == True
+ - result.active == True
+ - name: remark rule set active
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ active: True
+ <<: *aws_connection_info
+ register: result
+ - name: assert changed is False
+ assert:
+ that:
+ - result.changed == False
+ always:
+ - name: cleanup rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ state: absent
+ force: True
+ <<: *aws_connection_info
+
+# ============================================================
+- name: create rule set active
+ block:
+ - name: create rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ active: True
+ <<: *aws_connection_info
+ register: result
+ - name: assert changed to existing and active
+ assert:
+ that:
+ - result.changed == True
+ - result.active == True
+ - "default_rule_set in result.rule_sets|map(attribute='name')"
+ - name: remark rule set active
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ active: True
+ <<: *aws_connection_info
+ register: result
+ - name: assert changed is False
+ assert:
+ that:
+ - result.changed == False
+ always:
+ - name: cleanup rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ state: absent
+ force: True
+ <<: *aws_connection_info
+
+# ============================================================
+- name: mark rule set inactive
+ block:
+ - name: create active rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ active: True
+ <<: *aws_connection_info
+ - name: mark rule set inactive
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ active: False
+ <<: *aws_connection_info
+ register: result
+ - name: assert changed to inactive
+ assert:
+ that:
+ - result.changed == True
+ - result.active == False
+ - name: remark rule set inactive
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ active: False
+ <<: *aws_connection_info
+ register: result
+ - name: assert changed is False
+ assert:
+ that:
+ - result.changed == False
+ always:
+ - name: cleanup rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ state: absent
+ force: True
+ <<: *aws_connection_info
+
+# ============================================================
+- name: Absent active flag does not change active status
+ block:
+ - name: create active rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ active: True
+ <<: *aws_connection_info
+ - name: recreate rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ <<: *aws_connection_info
+ register: result
+ - name: assert not changed and still active
+ assert:
+ that:
+ - result.changed == False
+ - result.active == True
+ always:
+ - name: cleanup rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ state: absent
+ force: True
+ <<: *aws_connection_info
+
+# ============================================================
+- name: Cannot Remove Active Rule Set
+ block:
+ - name: create active rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ active: True
+ <<: *aws_connection_info
+ - name: remove rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ state: absent
+ <<: *aws_connection_info
+ register: result
+ failed_when: "result.error.code != 'CannotDelete'"
+ - name: assert error code is CannotDelete
+ assert:
+ that:
+ - "result.error.code == 'CannotDelete'"
+ always:
+ - name: cleanup rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ state: absent
+ force: True
+ <<: *aws_connection_info
+
+# ============================================================
+- name: Remove Active Rule Set with Force
+ block:
+ - name: create active rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ active: True
+ <<: *aws_connection_info
+ - name: force remove rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ state: absent
+ force: True
+ <<: *aws_connection_info
+ register: result
+ - name: assert changed and absent
+ assert:
+ that:
+ - result.changed == True
+ - "default_rule_set not in result.rule_sets|map(attribute='name')"
+ always:
+ - name: cleanup rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ state: absent
+ force: True
+ <<: *aws_connection_info
+
+# ============================================================
+- name: Force Remove of Inactive Rule Set does Not Affect Active Rule Set
+ block:
+ - name: create active rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ active: True
+ <<: *aws_connection_info
+ - name: create inactive rule set
+ aws_ses_rule_set:
+ name: "{{ second_rule_set }}"
+ active: False
+ <<: *aws_connection_info
+ - name: force remove inactiave rule set
+ aws_ses_rule_set:
+ name: "{{ second_rule_set }}"
+ state: absent
+ force: True
+ <<: *aws_connection_info
+ register: result
+ - name: assert changed and absent
+ assert:
+ that:
+ - result.changed == True
+ - "second_rule_set not in result.rule_sets|map(attribute='name')"
+ - name: remark active rule set active
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ active: True
+ <<: *aws_connection_info
+ register: result
+ - name: assert no change
+ assert:
+ that:
+ - result.changed == False
+ always:
+ - name: cleanup rule set
+ aws_ses_rule_set:
+ name: "{{ item }}"
+ state: absent
+ force: True
+ <<: *aws_connection_info
+ loop:
+ - "{{ default_rule_set }}"
+ - "{{ second_rule_set }}"
+
+# ============================================================
+- name: mark rule set inactive in check mode
+ block:
+ - name: create rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ active: True
+ <<: *aws_connection_info
+ - name: mark rule set inactive in check mode
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ active: False
+ <<: *aws_connection_info
+ register: result
+ check_mode: True
+ - name: assert changed to inactive
+ assert:
+ that:
+ - result.changed == True
+ - result.active == False
+ - name: remark rule set inactive
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ active: False
+ <<: *aws_connection_info
+ register: result
+ - name: assert changed is True since previous inactive was in check mode
+ assert:
+ that:
+ - result.changed == True
+ always:
+ - name: cleanup rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ state: absent
+ force: True
+ <<: *aws_connection_info
+
+# ============================================================
+- name: Cannot Remove Active Rule Set in check mode
+ block:
+ - name: create active rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ active: True
+ <<: *aws_connection_info
+ - name: remove rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ state: absent
+ <<: *aws_connection_info
+ register: result
+ failed_when: "result.error.code != 'CannotDelete'"
+ check_mode: True
+ - name: assert error code is CannotDelete
+ assert:
+ that:
+ - "result.error.code == 'CannotDelete'"
+ always:
+ - name: cleanup rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ state: absent
+ force: True
+ <<: *aws_connection_info
+
+# ============================================================
+- name: Remove Active Rule Set with Force in check mode
+ block:
+ - name: create active rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ active: True
+ <<: *aws_connection_info
+ - name: force remove rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ state: absent
+ force: True
+ <<: *aws_connection_info
+ register: result
+ check_mode: True
+ - name: assert changed and absent
+ assert:
+ that:
+ - result.changed == True
+ - "default_rule_set not in result.rule_sets|map(attribute='name')"
+ always:
+ - name: cleanup rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ state: absent
+ force: True
+ <<: *aws_connection_info
+ register: result
+ - name: assert changed is True since previous removal was in check mode
+ assert:
+ that:
+ - result.changed == True
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/tasks/cleanup-lock.yaml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/tasks/cleanup-lock.yaml
new file mode 100644
index 00000000..155bf472
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/tasks/cleanup-lock.yaml
@@ -0,0 +1,15 @@
+---
+# ============================================================
+# Release a lock obtained using obtain-lock.yaml
+# This should be included in the always clause of a block to
+# ensure the lock is released. See obtain-lock.yaml for more
+# details of how the locking works.
+# ============================================================
+
+- cloudwatchlogs_log_group:
+ log_group_name: "{{ lock_attempt_log_group_name }}"
+ state: absent
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/tasks/inactive-rule-set-tests.yaml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/tasks/inactive-rule-set-tests.yaml
new file mode 100644
index 00000000..4bd5250a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/tasks/inactive-rule-set-tests.yaml
@@ -0,0 +1,187 @@
+---
+# ============================================================
+# These tests work on rule sets without making them active.
+# so multiple builds can safely run these tests as is normal.
+#
+# DO NOT ADD TESTS THAT RELY ON ACTIVE RULE SETS TO THIS FILE
+#
+# Any test that make rule sets active must be added in
+# active-rule-set-tests.yaml or you will have intermittent failures
+# from multiple builds interacting
+# ============================================================
+
+- name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+# ============================================================
+- name: test create rule sets
+ block:
+ - name: create rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ <<: *aws_connection_info
+ register: result
+ - name: assert changed to exists inactive
+ assert:
+ that:
+ - result.changed == True
+ - result.active == False
+ - "default_rule_set in result.rule_sets|map(attribute='name')"
+ - name: recreate rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ <<: *aws_connection_info
+ register: result
+ - name: assert changed is False
+ assert:
+ that:
+ - result.changed == False
+ always:
+ - name: cleanup rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ state: absent
+ force: True
+ <<: *aws_connection_info
+# ============================================================
+- name: Remove No Such Rules Set
+ block:
+ - name: remove ruleset
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ state: absent
+ <<: *aws_connection_info
+ register: result
+ - name: assert not changed and absent
+ assert:
+ that:
+ - result.changed == False
+ - "default_rule_set not in result.rule_sets|map(attribute='name')"
+# ============================================================
+- name: Remove Inactive Rule Set
+ block:
+ - name: create rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ <<: *aws_connection_info
+ - name: remove rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ state: absent
+ <<: *aws_connection_info
+ register: result
+ - name: assert changed and removed
+ assert:
+ that:
+ - result.changed == True
+ - "default_rule_set not in result.rule_sets|map(attribute='name')"
+ always:
+ - name: cleanup rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ state: absent
+ force: True
+ <<: *aws_connection_info
+# ============================================================
+- name: test create in check mode
+ block:
+ - name: create rule set in check mode
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ <<: *aws_connection_info
+ register: result
+ check_mode: True
+ - name: assert changed inactive and present
+ assert:
+ that:
+ - result.changed == True
+ - result.active == False
+ - "default_rule_set in result.rule_sets|map(attribute='name')"
+ always:
+ - name: cleanup rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ state: absent
+ force: True
+ <<: *aws_connection_info
+ register: result
+ - name: assert nothing to clean up since create was in check mode
+ assert:
+ that:
+ - result.changed == False
+# ============================================================
+- name: mark rule set active in check mode
+ block:
+ - name: create rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ <<: *aws_connection_info
+ - name: mark rule set active in check mode
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ active: True
+ <<: *aws_connection_info
+ register: result
+ check_mode: True
+ - name: assert changed and active
+ assert:
+ that:
+ - result.changed == True
+ - result.active == True
+ # We check the rule set is still inactive rather than making
+ # it active again as that way this test can be run in
+ # parallel
+ - name: Ensure rule set is inactive
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ active: False
+ <<: *aws_connection_info
+ register: result
+ - name: assert unchanged since activation was in check mode
+ assert:
+ that:
+ - result.changed == False
+ always:
+ - name: cleanup rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ state: absent
+ force: True
+ <<: *aws_connection_info
+# ============================================================
+- name: Remove Inactive Rule Set in check mode
+ block:
+ - name: create rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ <<: *aws_connection_info
+ - name: remove rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ state: absent
+ <<: *aws_connection_info
+ register: result
+ check_mode: True
+ - name: assert changed and removed
+ assert:
+ that:
+ - result.changed == True
+ - "default_rule_set not in result.rule_sets|map(attribute='name')"
+ always:
+ - name: cleanup rule set
+ aws_ses_rule_set:
+ name: "{{ default_rule_set }}"
+ state: absent
+ force: True
+ <<: *aws_connection_info
+ register: result
+ - name: assert changed is True since previous removal was in check mode
+ assert:
+ that:
+ - result.changed == True
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/tasks/main.yaml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/tasks/main.yaml
new file mode 100644
index 00000000..fe0e7877
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/tasks/main.yaml
@@ -0,0 +1,36 @@
+---
+- include_tasks: inactive-rule-set-tests.yaml
+
+# ============================================================
+# There can only be a single active rule set, tests that
+# relies on the active state of the rule cannot be run in
+# parallel.
+# To prevent failures due to parallel runs in the integration
+# builds, the below block creates a lock to ensure that only
+# one process will be running these tests in the same region
+# and same AWS account.
+# See obtain-lock.yaml for explanation of how the lock is
+# constructed.
+# ============================================================
+- name: Active Rule Set Tests
+ block:
+ - name: Obtain Lock
+ include_tasks: obtain-lock-wrapper.yaml
+ # Use of loop here is a workaround for lack of support for
+ # do-until loops on includes. See:
+ # https://github.com/ansible/ansible/issues/17098
+ loop: "{{ range(0, max_obtain_lock_attempts, 1)|list }}"
+ loop_control:
+ loop_var: obtain_lock_attempt
+
+ # Because of the above workaround we have to explicitly check
+ # that the lock was obtained
+ - name: Check Obtained Lock
+ assert:
+ msg: "Could not obtain lock after {{ max_obtain_lock_attempts }} attempts."
+ that: won_lock|bool
+
+ - include_tasks: active-rule-set-tests.yaml
+
+ always:
+ - include_tasks: cleanup-lock.yaml
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/tasks/obtain-lock-wrapper.yaml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/tasks/obtain-lock-wrapper.yaml
new file mode 100644
index 00000000..36969897
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/tasks/obtain-lock-wrapper.yaml
@@ -0,0 +1,26 @@
+# ============================================================
+# Do While loops cannot be used on task includes.
+# See: https://github.com/ansible/ansible/issues/17098
+#
+# So as a workaround we use a regular loop to repeatedly attempt
+# obtaining a lock.
+#
+# For this to work we need to skip the subsequent iterations
+# once we get a lock, and delay between iterations if we
+# did not obtain the lock.
+#
+# This file encapsulates this logic to reduce the spam from
+# skipped tasks in the ansible log.
+# ============================================================
+
+- include_tasks: obtain-lock.yaml
+ # Skip obtaining a lock if we've already succeeded in getting it
+ when: "not won_lock|default(False)|bool"
+
+- name: Lock Retry Delay
+ wait_for:
+ # Add some random jitter to the delay to reduce lock contention
+ timeout: "{{ obtain_lock_delay_seconds + 15|random }}"
+ # Only delay if we're retrying, so skip the delay if we're
+ # on the last attempt or have got the lock
+ when: "obtain_lock_attempt < (max_obtain_lock_attempts - 1) and not won_lock|bool"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/tasks/obtain-lock.yaml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/tasks/obtain-lock.yaml
new file mode 100644
index 00000000..d12c57c6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ses_rule_set/tasks/obtain-lock.yaml
@@ -0,0 +1,126 @@
+# ============================================================
+# This file attempts to obtain a global lock (for a given
+# region / account combination.
+#
+# This makes one attempt to get the lock and will set the
+# won_lock variable to True or False to indicate whether
+# or not we got the lock.
+#
+# It's expected that this will be executed in a retry loop
+# so that if we don't get the lock we delay then try again.
+#
+# This should only be used in a block with cleanup-lock.yaml
+# included in the always clause to ensure the lock is released.
+#
+# There are several variables that control the locking behaviour:
+# * lock_timeout_seconds
+# How old a lock must be before it's assumed to be an expired
+# lock that was not cleaned up by the owner. Any locks older
+# than this will not prevent a lock being obtained and will
+# be deleted when a new process obtains the lock.
+# * lock_log_group_prefix
+# The log_group prefix that represents the lock being obtained.
+# This must be the same across all processes trying to obtain
+# the lock.
+# * lock_process_id
+# A unique identifier of this process. Each process that might
+# attempt to lock the process must have a different identifier.
+# This defaults to the resource_prefix which is generally
+# appropriate.
+# * max_obtain_lock_attempts
+# How many attempts to make to get the lock before giving up
+# NB: This is actually done in main.yaml
+# * obtain_lock_delay_seconds:
+# How long to delay after failing to get the lock before
+# trying again.
+# NB: This is actually done in obtain-lock-wrapper.yaml
+#
+# The locking here is based around creating cloudwatch log groups.
+# This resource was chosen because:
+# A) it's free
+# B) we have a built in grouping concept because of the hierarchy
+# that allows us to easily group attempts for the same lock
+# C) the creation time is tracked and returned which gives us
+# a mechanism for deterministically picking a winner
+#
+# Each lock is represented by a log group prefix. Each attempt
+# to obtain the lock is a log group of the lock_process_id below
+# that prefix.
+#
+# The winning lock is the one with the earliest creation time.
+#
+# To prevent a hanging lock from permanently hanging the build
+# lock attempts older than the lock timeout are ignored and
+# cleaned up by the next process to win the lock.
+# ============================================================
+
+- name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+- name: Set lock_attempt_log_group_name
+ set_fact:
+ lock_attempt_log_group_name: "{{ lock_log_group_prefix }}/{{ lock_process_id|default(resource_prefix) }}"
+
+ # Note the overwrite below to ensure that the creation time
+ # is upated. This is important as we calculate expiry relative
+ # the attempt creation.
+ #
+ # Because of this it's imporatnt that we delete the attempt
+ # if we don't get the lock. Otherwise we can get a deadlock
+ # where the stale atttempt from one process wins, but then
+ # because that process updates the creation date it doesn't
+ # consider its self to havewone.
+- name: Create Lock Attempt Log Group
+ cloudwatchlogs_log_group:
+ log_group_name: "{{ lock_attempt_log_group_name }}"
+ state: present
+ overwrite: True
+ <<: *aws_connection_info
+ register: lock_attempt_log_group_result
+
+- name: Get Lock Attempt Lock Groups
+ cloudwatchlogs_log_group_info:
+ log_group_name: "{{ lock_log_group_prefix }}/"
+ <<: *aws_connection_info
+ register: lock_attempt_log_groups
+
+- name: Calculate Expired Lock Attempt Timestamp
+ set_fact:
+ expired_lock_timestamp: "{{ lock_attempt_log_group_result.creation_time - (lock_timeout_seconds * 1000) }}"
+
+- name: Get Expired and Active Lock Attempts
+ set_fact:
+ expired_lock_attempts: "{{ lock_attempt_log_groups.log_groups|selectattr('creation_time', 'lt', expired_lock_timestamp|int)|list }}"
+ active_lock_attempts: "{{ lock_attempt_log_groups.log_groups|selectattr('creation_time', 'ge', expired_lock_timestamp|int)|list }}"
+
+- name: Pick Winning Lock Attempt
+ set_fact:
+ winning_lock_attempt: "{{ active_lock_attempts|sort(attribute='creation_time')|first }}"
+
+- name: Determine if Won Lock
+ set_fact:
+ won_lock: "{{ winning_lock_attempt.log_group_name == lock_attempt_log_group_name }}"
+
+ # Remove the lock attempt if we didn't get the lock. This prevents
+ # our stale lock attempt blocking another process from getting the lock.
+ # See more detailed comment above Create Lock Attempt Log Group
+- name: Remove Failed Lock Attempt Log Group
+ cloudwatchlogs_log_group:
+ log_group_name: "{{ lock_attempt_log_group_name }}"
+ state: absent
+ <<: *aws_connection_info
+ when: "not won_lock|bool"
+
+- name: Delete Expired Lock Attempts
+ cloudwatchlogs_log_group:
+ log_group_name: "{{ item.log_group_name }}"
+ state: absent
+ <<: *aws_connection_info
+ when: "won_lock|bool"
+ loop: "{{ expired_lock_attempts }}"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ssm_parameter_store/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ssm_parameter_store/aliases
new file mode 100644
index 00000000..72a9fb4f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ssm_parameter_store/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group4
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ssm_parameter_store/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ssm_parameter_store/defaults/main.yml
new file mode 100644
index 00000000..13f8ba31
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ssm_parameter_store/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+# defaults file for aws_lambda test
+ssm_key_prefix: '{{resource_prefix}}'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ssm_parameter_store/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ssm_parameter_store/tasks/main.yml
new file mode 100644
index 00000000..0f538df9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_ssm_parameter_store/tasks/main.yml
@@ -0,0 +1,132 @@
+---
+#
+# Author: Michael De La Rue
+# based on aws_lambda test cases
+
+- block:
+
+ # ============================================================
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+ # ============================================================
+ - name: Create or update key/value pair in aws parameter store
+ aws_ssm_parameter_store:
+ name: "/{{ssm_key_prefix}}/Hello"
+ description: "This is your first key"
+ value: "World"
+ <<: *aws_connection_info
+
+ - name: Check that parameter was stored correctly
+ assert:
+ that:
+ - "'{{lookup('amazon.aws.aws_ssm', '/' ~ ssm_key_prefix ~ '/Hello', region=ec2_region, aws_access_key=ec2_access_key, aws_secret_key=ec2_secret_key, aws_security_token=security_token )}}' == 'World'"
+
+ # ============================================================
+ - name: Create or update key/value pair in aws parameter store
+ aws_ssm_parameter_store:
+ name: "/{{ssm_key_prefix}}/path/wonvar"
+ description: "This is your first key"
+ value: "won value"
+ <<: *aws_connection_info
+
+ - name: Create or update key/value pair in aws parameter store
+ aws_ssm_parameter_store:
+ name: "/{{ssm_key_prefix}}/path/toovar"
+ description: "This is your first key"
+ value: "too value"
+ <<: *aws_connection_info
+
+ - name: Create or update key/value pair in aws parameter store
+ aws_ssm_parameter_store:
+ name: "/{{ssm_key_prefix}}/path/tree/treevar"
+ description: "This is your first key"
+ value: "tree value"
+ <<: *aws_connection_info
+
+ # ============================================================
+ - name: Create or update key/value pair in aws parameter store
+ aws_ssm_parameter_store:
+ name: "/{{ssm_key_prefix}}/deeppath/wondir/samevar"
+ description: "This is your first key"
+ value: "won value"
+ <<: *aws_connection_info
+
+ - name: Create or update key/value pair in aws parameter store
+ aws_ssm_parameter_store:
+ name: "/{{ssm_key_prefix}}/deeppath/toodir/samevar"
+ description: "This is your first key"
+ value: "too value"
+ <<: *aws_connection_info
+
+ # ============================================================
+ - name: debug the lookup
+ debug:
+ msg: "{{lookup('amazon.aws.aws_ssm', '/' ~ ssm_key_prefix ~ '/path', region=ec2_region, aws_access_key=ec2_access_key, aws_secret_key=ec2_secret_key, aws_security_token=security_token, bypath=True )}}'"
+
+ - name: Check that parameter path is stored and retrieved
+ assert:
+ that:
+ - "'{{lookup('amazon.aws.aws_ssm', '/' ~ ssm_key_prefix ~ '/path', region=ec2_region, aws_access_key=ec2_access_key, aws_secret_key=ec2_secret_key, aws_security_token=security_token, bypath=True, shortnames=true ) | to_json }}' == '{\"toovar\": \"too value\", \"wonvar\": \"won value\"}'"
+
+ # ============================================================
+ - name: Returns empty value in case we don't find a named parameter and default filter works
+ assert:
+ that:
+ - "'{{lookup('amazon.aws.aws_ssm', '/' ~ ssm_key_prefix ~ '/Goodbye', region=ec2_region, aws_access_key=ec2_access_key, aws_secret_key=ec2_secret_key, aws_security_token=security_token )}}' == ''"
+ - "'{{lookup('amazon.aws.aws_ssm', '/' ~ ssm_key_prefix ~ '/Goodbye', region=ec2_region, aws_access_key=ec2_access_key, aws_secret_key=ec2_secret_key, aws_security_token=security_token ) | default('I_can_has_default', true)}}' == 'I_can_has_default'"
+
+ # ============================================================
+ - name: Handle multiple paths with one that doesn't exist - default to full names.
+ assert:
+ that:
+ - "'{{lookup('amazon.aws.aws_ssm', '/' ~ ssm_key_prefix ~ '/path', '/' ~ ssm_key_prefix ~ '/dont_create_this_path_you_will_break_the_ansible_tests', region=ec2_region, aws_access_key=ec2_access_key, aws_secret_key=ec2_secret_key, aws_security_token=security_token, bypath=True ) | to_json }}' in ( '[{\"/' ~ ssm_key_prefix ~ '/path/toovar\": \"too value\", \"/' ~ ssm_key_prefix ~ '/path/wonvar\": \"won value\"}, {}]', '[{\"/' ~ ssm_key_prefix ~ '/path/wonvar\": \"won value\", \"/' ~ ssm_key_prefix ~ '/path/toovar\": \"too value\"}, {}]' )"
+
+
+ # ============================================================
+ # this may be a bit of a nasty test case; we should perhaps accept _either_ value that was stored
+ # in the two variables named 'samevar'
+
+ - name: Handle multiple paths with one that doesn't exist - shortnames - including overlap.
+ assert:
+ that:
+ - "'{{lookup('amazon.aws.aws_ssm', '/' ~ ssm_key_prefix ~ '/path', '/' ~ ssm_key_prefix ~ '/dont_create_this_path_you_will_break_the_ansible_tests', '/' ~ ssm_key_prefix ~ '/deeppath', region=ec2_region, aws_access_key=ec2_access_key, aws_secret_key=ec2_secret_key, aws_security_token=security_token, bypath=True, shortnames=true, recursive=true ) | to_json }}' == '[{\"toovar\": \"too value\", \"treevar\": \"tree value\", \"wonvar\": \"won value\"}, {}, {\"samevar\": \"won value\"}]'"
+
+
+ # ============================================================
+ - name: Delete key/value pair in aws parameter store
+ aws_ssm_parameter_store:
+ name: "/{{ssm_key_prefix}}/Hello"
+ state: absent
+ <<: *aws_connection_info
+
+ # ============================================================
+ - name: Attempt delete key/value pair in aws parameter store again
+ aws_ssm_parameter_store:
+ name: "/{{ssm_key_prefix}}/Hello"
+ state: absent
+ <<: *aws_connection_info
+ register: result
+
+ - name: assert that changed is False since parameter should be deleted
+ assert:
+ that:
+ - result.changed == False
+ always:
+ # ============================================================
+ - name: Delete remaining key/value pairs in aws parameter store
+ aws_ssm_parameter_store:
+ name: "{{item}}"
+ state: absent
+ <<: *aws_connection_info
+ with_items:
+ - "/{{ssm_key_prefix}}/Hello"
+ - "/{{ssm_key_prefix}}/path/wonvar"
+ - "/{{ssm_key_prefix}}/path/toovar"
+ - "/{{ssm_key_prefix}}/path/tree/treevar"
+ - "/{{ssm_key_prefix}}/deeppath/wondir/samevar"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_step_functions_state_machine/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_step_functions_state_machine/aliases
new file mode 100644
index 00000000..65b315eb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_step_functions_state_machine/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+shippable/aws/group2
+aws_step_functions_state_machine_execution
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_step_functions_state_machine/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_step_functions_state_machine/defaults/main.yml
new file mode 100644
index 00000000..273a0c78
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_step_functions_state_machine/defaults/main.yml
@@ -0,0 +1,4 @@
+# the random_num is generated in a set_fact task at the start of the testsuite
+state_machine_name: "{{ resource_prefix }}_step_functions_state_machine_ansible_test_{{ random_num }}"
+step_functions_role_name: "ansible-test-sts-{{ resource_prefix }}-step_functions-role"
+execution_name: "{{ resource_prefix }}_sfn_execution"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_step_functions_state_machine/files/alternative_state_machine.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_step_functions_state_machine/files/alternative_state_machine.json
new file mode 100644
index 00000000..7b51bebb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_step_functions_state_machine/files/alternative_state_machine.json
@@ -0,0 +1,15 @@
+{
+ "StartAt": "HelloWorld",
+ "States": {
+ "HelloWorld": {
+ "Type": "Pass",
+ "Result": "Some other result",
+ "Next": "Wait"
+ },
+ "Wait": {
+ "Type": "Wait",
+ "Seconds": 30,
+ "End": true
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_step_functions_state_machine/files/state_machine.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_step_functions_state_machine/files/state_machine.json
new file mode 100644
index 00000000..c07d5ceb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_step_functions_state_machine/files/state_machine.json
@@ -0,0 +1,10 @@
+{
+ "StartAt": "HelloWorld",
+ "States": {
+ "HelloWorld": {
+ "Type": "Pass",
+ "Result": "Hello World!",
+ "End": true
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_step_functions_state_machine/files/state_machines_iam_trust_policy.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_step_functions_state_machine/files/state_machines_iam_trust_policy.json
new file mode 100644
index 00000000..48d62722
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_step_functions_state_machine/files/state_machines_iam_trust_policy.json
@@ -0,0 +1,12 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "states.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_step_functions_state_machine/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_step_functions_state_machine/tasks/main.yml
new file mode 100644
index 00000000..005af35f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_step_functions_state_machine/tasks/main.yml
@@ -0,0 +1,303 @@
+---
+
+- name: Integration test for AWS Step Function state machine module
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ collections:
+ - amazon.aws
+
+ block:
+
+ # ==== Setup ==================================================
+
+ - name: Create IAM service role needed for Step Functions
+ iam_role:
+ name: "{{ step_functions_role_name }}"
+ description: Role with permissions for AWS Step Functions actions.
+ assume_role_policy_document: "{{ lookup('file', 'state_machines_iam_trust_policy.json') }}"
+ state: present
+ register: step_functions_role
+
+ - name: Pause a few seconds to ensure IAM role is available to next task
+ pause:
+ seconds: 10
+
+ # ==== Tests ===================================================
+
+ - name: Create a random component for state machine name
+ set_fact:
+ random_num: "{{ 999999999 | random }}"
+
+ - name: Create a new state machine -- check_mode
+ aws_step_functions_state_machine:
+ name: "{{ state_machine_name }}"
+ definition: "{{ lookup('file','state_machine.json') }}"
+ role_arn: "{{ step_functions_role.iam_role.arn }}"
+ tags:
+ project: helloWorld
+ state: present
+ register: creation_check
+ check_mode: yes
+
+ - assert:
+ that:
+ - creation_check.changed == True
+ - creation_check.output == 'State machine would be created.'
+
+ - name: Create a new state machine
+ aws_step_functions_state_machine:
+ name: "{{ state_machine_name }}"
+ definition: "{{ lookup('file','state_machine.json') }}"
+ role_arn: "{{ step_functions_role.iam_role.arn }}"
+ tags:
+ project: helloWorld
+ state: present
+ register: creation_output
+
+ - assert:
+ that:
+ - creation_output.changed == True
+
+ - name: Pause a few seconds to ensure state machine role is available
+ pause:
+ seconds: 5
+
+ - name: Idempotent rerun of same state function -- check_mode
+ aws_step_functions_state_machine:
+ name: "{{ state_machine_name }}"
+ definition: "{{ lookup('file','state_machine.json') }}"
+ role_arn: "{{ step_functions_role.iam_role.arn }}"
+ tags:
+ project: helloWorld
+ state: present
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - result.changed == False
+ - result.output == 'State is up-to-date.'
+
+ - name: Idempotent rerun of same state function
+ aws_step_functions_state_machine:
+ name: "{{ state_machine_name }}"
+ definition: "{{ lookup('file','state_machine.json') }}"
+ role_arn: "{{ step_functions_role.iam_role.arn }}"
+ tags:
+ project: helloWorld
+ state: present
+ register: result
+
+ - assert:
+ that:
+ - result.changed == False
+
+ - name: Update an existing state machine -- check_mode
+ aws_step_functions_state_machine:
+ name: "{{ state_machine_name }}"
+ definition: "{{ lookup('file','alternative_state_machine.json') }}"
+ role_arn: "{{ step_functions_role.iam_role.arn }}"
+ tags:
+ differentTag: different_tag
+ state: present
+ register: update_check
+ check_mode: yes
+
+ - assert:
+ that:
+ - update_check.changed == True
+ - "update_check.output == 'State machine would be updated: {{ creation_output.state_machine_arn }}'"
+
+ - name: Update an existing state machine
+ aws_step_functions_state_machine:
+ name: "{{ state_machine_name }}"
+ definition: "{{ lookup('file','alternative_state_machine.json') }}"
+ role_arn: "{{ step_functions_role.iam_role.arn }}"
+ tags:
+ differentTag: different_tag
+ state: present
+ register: update_output
+
+ - assert:
+ that:
+ - update_output.changed == True
+ - update_output.state_machine_arn == creation_output.state_machine_arn
+
+ - name: Start execution of state machine -- check_mode
+ aws_step_functions_state_machine_execution:
+ name: "{{ execution_name }}"
+ execution_input: "{}"
+ state_machine_arn: "{{ creation_output.state_machine_arn }}"
+ register: start_execution_output
+ check_mode: yes
+
+ - assert:
+ that:
+ - start_execution_output.changed == True
+ - "start_execution_output.output == 'State machine execution would be started.'"
+
+ - name: Start execution of state machine
+ aws_step_functions_state_machine_execution:
+ name: "{{ execution_name }}"
+ execution_input: "{}"
+ state_machine_arn: "{{ creation_output.state_machine_arn }}"
+ register: start_execution_output
+
+ - assert:
+ that:
+ - start_execution_output.changed
+ - "'execution_arn' in start_execution_output"
+ - "'start_date' in start_execution_output"
+
+ - name: Start execution of state machine (check for idempotency) (check mode)
+ aws_step_functions_state_machine_execution:
+ name: "{{ execution_name }}"
+ execution_input: "{}"
+ state_machine_arn: "{{ creation_output.state_machine_arn }}"
+ register: start_execution_output_idem_check
+ check_mode: yes
+
+ - assert:
+ that:
+ - not start_execution_output_idem_check.changed
+ - "start_execution_output_idem_check.output == 'State machine execution already exists.'"
+
+ - name: Start execution of state machine (check for idempotency)
+ aws_step_functions_state_machine_execution:
+ name: "{{ execution_name }}"
+ execution_input: "{}"
+ state_machine_arn: "{{ creation_output.state_machine_arn }}"
+ register: start_execution_output_idem
+
+ - assert:
+ that:
+ - not start_execution_output_idem.changed
+
+ - name: Stop execution of state machine -- check_mode
+ aws_step_functions_state_machine_execution:
+ action: stop
+ execution_arn: "{{ start_execution_output.execution_arn }}"
+ cause: "cause of the failure"
+ error: "error code of the failure"
+ register: stop_execution_output
+ check_mode: yes
+
+ - assert:
+ that:
+ - stop_execution_output.changed
+ - "stop_execution_output.output == 'State machine execution would be stopped.'"
+
+ - name: Stop execution of state machine
+ aws_step_functions_state_machine_execution:
+ action: stop
+ execution_arn: "{{ start_execution_output.execution_arn }}"
+ cause: "cause of the failure"
+ error: "error code of the failure"
+ register: stop_execution_output
+
+ - assert:
+ that:
+ - stop_execution_output.changed
+ - "'stop_date' in stop_execution_output"
+
+ - name: Stop execution of state machine (check for idempotency)
+ aws_step_functions_state_machine_execution:
+ action: stop
+ execution_arn: "{{ start_execution_output.execution_arn }}"
+ cause: "cause of the failure"
+ error: "error code of the failure"
+ register: stop_execution_output
+
+ - assert:
+ that:
+ - not stop_execution_output.changed
+
+ - name: Try stopping a non-running execution -- check_mode
+ aws_step_functions_state_machine_execution:
+ action: stop
+ execution_arn: "{{ start_execution_output.execution_arn }}"
+ cause: "cause of the failure"
+ error: "error code of the failure"
+ register: stop_execution_output
+ check_mode: yes
+
+ - assert:
+ that:
+ - not stop_execution_output.changed
+ - "stop_execution_output.output == 'State machine execution is not running.'"
+
+ - name: Try stopping a non-running execution
+ aws_step_functions_state_machine_execution:
+ action: stop
+ execution_arn: "{{ start_execution_output.execution_arn }}"
+ cause: "cause of the failure"
+ error: "error code of the failure"
+ register: stop_execution_output
+ check_mode: yes
+
+ - assert:
+ that:
+ - not stop_execution_output.changed
+
+ - name: Start execution of state machine with the same execution name
+ aws_step_functions_state_machine_execution:
+ name: "{{ execution_name }}"
+ state_machine_arn: "{{ creation_output.state_machine_arn }}"
+ register: start_execution_output_again
+
+ - assert:
+ that:
+ - not start_execution_output_again.changed
+
+ - name: Remove state machine -- check_mode
+ aws_step_functions_state_machine:
+ name: "{{ state_machine_name }}"
+ state: absent
+ register: deletion_check
+ check_mode: yes
+
+ - assert:
+ that:
+ - deletion_check.changed == True
+ - "deletion_check.output == 'State machine would be deleted: {{ creation_output.state_machine_arn }}'"
+
+ - name: Remove state machine
+ aws_step_functions_state_machine:
+ name: "{{ state_machine_name }}"
+ state: absent
+ register: deletion_output
+
+ - assert:
+ that:
+ - deletion_output.changed == True
+ - deletion_output.state_machine_arn == creation_output.state_machine_arn
+
+ - name: Non-existent state machine is absent
+ aws_step_functions_state_machine:
+ name: "non_existing_state_machine"
+ state: absent
+ register: result
+
+ - assert:
+ that:
+ - result.changed == False
+
+ # ==== Cleanup ====================================================
+
+ always:
+
+ - name: Cleanup - delete state machine
+ aws_step_functions_state_machine:
+ name: "{{ state_machine_name }}"
+ state: absent
+ ignore_errors: true
+
+ - name: Cleanup - delete IAM role needed for Step Functions test
+ iam_role:
+ name: "{{ step_functions_role_name }}"
+ state: absent
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_waf_web_acl/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_waf_web_acl/aliases
new file mode 100644
index 00000000..ed291667
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_waf_web_acl/aliases
@@ -0,0 +1,6 @@
+cloud/aws
+aws_waf_info
+aws_waf_web_acl
+aws_waf_web_match
+aws_waf_web_rule
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_waf_web_acl/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_waf_web_acl/tasks/main.yml
new file mode 100644
index 00000000..7d0c8f97
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/aws_waf_web_acl/tasks/main.yml
@@ -0,0 +1,1201 @@
+- name: aws_waf_condition tests
+ collections:
+ - amazon.aws
+ block:
+ - name: set yaml anchor
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ no_log: yes
+
+ ##################################################
+ # aws_waf_condition tests
+ ##################################################
+
+ - name: create WAF IP condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_ip_condition"
+ filters:
+ - ip_address: "10.0.0.0/8"
+ type: ip
+ <<: *aws_connection_info
+ register: create_waf_ip_condition
+
+ - name: add an IP address to WAF condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_ip_condition"
+ filters:
+ - ip_address: "10.0.0.0/8"
+ - ip_address: "192.168.0.0/24"
+ type: ip
+ <<: *aws_connection_info
+ register: add_ip_address_to_waf_condition
+
+ - name: check expected waf filter length
+ assert:
+ that:
+ - add_ip_address_to_waf_condition.condition.ip_set_descriptors|length == 2
+
+ - name: add an IP address to WAF condition (rely on purge_filters defaulting to false)
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_ip_condition"
+ filters:
+ - ip_address: "192.168.10.0/24"
+ type: ip
+ <<: *aws_connection_info
+ register: add_ip_address_to_waf_condition_no_purge
+
+ - name: check waf filter length has increased
+ assert:
+ that:
+ - add_ip_address_to_waf_condition_no_purge.condition.ip_set_descriptors|length == 3
+ - add_ip_address_to_waf_condition_no_purge.changed
+
+ - name: add an IP address to WAF condition (set purge_filters)
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_ip_condition"
+ filters:
+ - ip_address: "192.168.20.0/24"
+ purge_filters: yes
+ type: ip
+ <<: *aws_connection_info
+ register: add_ip_address_to_waf_condition_purge
+
+ - name: check waf filter length has reduced
+ assert:
+ that:
+ - add_ip_address_to_waf_condition_purge.condition.ip_set_descriptors|length == 1
+ - add_ip_address_to_waf_condition_purge.changed
+
+ - name: create WAF byte condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_byte_condition"
+ filters:
+ - field_to_match: header
+ position: STARTS_WITH
+ target_string: Hello
+ header: Content-type
+ type: byte
+ <<: *aws_connection_info
+ register: create_waf_byte_condition
+
+ - name: recreate WAF byte condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_byte_condition"
+ filters:
+ - field_to_match: header
+ position: STARTS_WITH
+ target_string: Hello
+ header: Content-type
+ type: byte
+ <<: *aws_connection_info
+ register: recreate_waf_byte_condition
+
+ - name: assert that no change was made
+ assert:
+ that:
+ - not recreate_waf_byte_condition.changed
+
+ - name: create WAF geo condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_geo_condition"
+ filters:
+ - country: US
+ - country: AU
+ - country: AT
+ type: geo
+ <<: *aws_connection_info
+ register: create_waf_geo_condition
+
+ - name: create WAF size condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_size_condition"
+ filters:
+ - field_to_match: query_string
+ size: 300
+ comparison: GT
+ type: size
+ <<: *aws_connection_info
+ register: create_waf_size_condition
+
+ - name: create WAF sql condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_sql_condition"
+ filters:
+ - field_to_match: query_string
+ transformation: url_decode
+ type: sql
+ <<: *aws_connection_info
+ register: create_waf_sql_condition
+
+ - name: create WAF xss condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_xss_condition"
+ filters:
+ - field_to_match: query_string
+ transformation: url_decode
+ type: xss
+ <<: *aws_connection_info
+ register: create_waf_xss_condition
+
+ - name: create WAF regex condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_regex_condition"
+ filters:
+ - field_to_match: query_string
+ regex_pattern:
+ name: greetings
+ regex_strings:
+ - '[hH]ello'
+ - '^Hi there'
+ - '.*Good Day to You'
+ type: regex
+ <<: *aws_connection_info
+ register: create_waf_regex_condition
+
+ - name: create a second WAF regex condition with the same regex
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_regex_condition_part_2"
+ filters:
+ - field_to_match: header
+ header: cookie
+ regex_pattern:
+ name: greetings
+ regex_strings:
+ - '[hH]ello'
+ - '^Hi there'
+ - '.*Good Day to You'
+ type: regex
+ <<: *aws_connection_info
+ register: create_second_waf_regex_condition
+
+ - name: check that the pattern is shared
+ assert:
+ that:
+ - >
+ create_waf_regex_condition.condition.regex_match_tuples[0].regex_pattern_set_id ==
+ create_second_waf_regex_condition.condition.regex_match_tuples[0].regex_pattern_set_id
+ - create_second_waf_regex_condition.changed
+
+
+ - name: delete first WAF regex condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_regex_condition"
+ filters:
+ - field_to_match: query_string
+ regex_pattern:
+ name: greetings
+ regex_strings:
+ - '[hH]ello'
+ - '^Hi there'
+ - '.*Good Day to You'
+ type: regex
+ state: absent
+ <<: *aws_connection_info
+ register: delete_waf_regex_condition
+
+ - name: delete second WAF regex condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_regex_condition_part_2"
+ filters:
+ - field_to_match: header
+ header: cookie
+ regex_pattern:
+ name: greetings
+ regex_strings:
+ - '[hH]ello'
+ - '^Hi there'
+ - '.*Good Day to You'
+ type: regex
+ state: absent
+ <<: *aws_connection_info
+ register: delete_second_waf_regex_condition
+
+ - name: create WAF regex condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_regex_condition"
+ filters:
+ - field_to_match: query_string
+ regex_pattern:
+ name: greetings
+ regex_strings:
+ - '[hH]ello'
+ - '^Hi there'
+ - '.*Good Day to You'
+ type: regex
+ <<: *aws_connection_info
+ register: recreate_waf_regex_condition
+
+ - name: check that a new pattern is created (because the first pattern should have been deleted once unused)
+ assert:
+ that:
+ - >
+ recreate_waf_regex_condition.condition.regex_match_tuples[0].regex_pattern_set_id !=
+ create_waf_regex_condition.condition.regex_match_tuples[0].regex_pattern_set_id
+
+ - name: create WAF Regional IP condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_ip_condition"
+ filters:
+ - ip_address: "10.0.0.0/8"
+ type: ip
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: create_waf_regional_ip_condition
+
+ - name: add an IP address to WAF Regional condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_ip_condition"
+ filters:
+ - ip_address: "10.0.0.0/8"
+ - ip_address: "192.168.0.0/24"
+ type: ip
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: add_ip_address_to_waf_regional_condition
+
+ - name: check expected WAF Regional filter length
+ assert:
+ that:
+ - add_ip_address_to_waf_regional_condition.condition.ip_set_descriptors|length == 2
+
+ - name: add an IP address to WAF Regional condition (rely on purge_filters defaulting to false)
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_ip_condition"
+ filters:
+ - ip_address: "192.168.10.0/24"
+ type: ip
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: add_ip_address_to_waf_regional_condition_no_purge
+
+ - name: check WAF Regional filter length has increased
+ assert:
+ that:
+ - add_ip_address_to_waf_regional_condition_no_purge.condition.ip_set_descriptors|length == 3
+ - add_ip_address_to_waf_regional_condition_no_purge.changed
+
+ - name: add an IP address to WAF Regional condition (set purge_filters)
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_ip_condition"
+ filters:
+ - ip_address: "192.168.20.0/24"
+ purge_filters: yes
+ type: ip
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: add_ip_address_to_waf_regional_condition_purge
+
+ - name: check WAF Regional filter length has reduced
+ assert:
+ that:
+ - add_ip_address_to_waf_regional_condition_purge.condition.ip_set_descriptors|length == 1
+ - add_ip_address_to_waf_regional_condition_purge.changed
+
+ - name: create WAF Regional byte condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_byte_condition"
+ filters:
+ - field_to_match: header
+ position: STARTS_WITH
+ target_string: Hello
+ header: Content-type
+ type: byte
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: create_waf_regional_byte_condition
+
+ - name: recreate WAF Regional byte condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_byte_condition"
+ filters:
+ - field_to_match: header
+ position: STARTS_WITH
+ target_string: Hello
+ header: Content-type
+ type: byte
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: recreate_waf_regional_byte_condition
+
+ - name: assert that no change was made
+ assert:
+ that:
+ - not recreate_waf_regional_byte_condition.changed
+
+ - name: create WAF Regional geo condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_geo_condition"
+ filters:
+ - country: US
+ - country: AU
+ - country: AT
+ type: geo
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: create_waf_regional_geo_condition
+
+ - name: create WAF Regional size condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_size_condition"
+ filters:
+ - field_to_match: query_string
+ size: 300
+ comparison: GT
+ type: size
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: create_waf_regional_size_condition
+
+ - name: create WAF Regional sql condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_sql_condition"
+ filters:
+ - field_to_match: query_string
+ transformation: url_decode
+ type: sql
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: create_waf_regional_sql_condition
+
+ - name: create WAF Regional xss condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_xss_condition"
+ filters:
+ - field_to_match: query_string
+ transformation: url_decode
+ type: xss
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: create_waf_regional_xss_condition
+
+ - name: create WAF Regional regex condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_regex_condition"
+ filters:
+ - field_to_match: query_string
+ regex_pattern:
+ name: greetings
+ regex_strings:
+ - '[hH]ello'
+ - '^Hi there'
+ - '.*Good Day to You'
+ type: regex
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: create_waf_regional_regex_condition
+
+ - name: create a second WAF Regional regex condition with the same regex
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_regex_condition_part_2"
+ filters:
+ - field_to_match: header
+ header: cookie
+ regex_pattern:
+ name: greetings
+ regex_strings:
+ - '[hH]ello'
+ - '^Hi there'
+ - '.*Good Day to You'
+ type: regex
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: create_second_waf_regional_regex_condition
+
+ - name: check that the pattern is shared
+ assert:
+ that:
+ - >
+ create_waf_regional_regex_condition.condition.regex_match_tuples[0].regex_pattern_set_id ==
+ create_second_waf_regional_regex_condition.condition.regex_match_tuples[0].regex_pattern_set_id
+ - create_second_waf_regional_regex_condition.changed
+
+
+ - name: delete first WAF Regional regex condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_regex_condition"
+ filters:
+ - field_to_match: query_string
+ regex_pattern:
+ name: greetings
+ regex_strings:
+ - '[hH]ello'
+ - '^Hi there'
+ - '.*Good Day to You'
+ type: regex
+ state: absent
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: delete_waf_regional_regex_condition
+
+ - name: delete second WAF Regional regex condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_regex_condition_part_2"
+ filters:
+ - field_to_match: header
+ header: cookie
+ regex_pattern:
+ name: greetings
+ regex_strings:
+ - '[hH]ello'
+ - '^Hi there'
+ - '.*Good Day to You'
+ type: regex
+ state: absent
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: delete_second_waf_regional_regex_condition
+
+ - name: create WAF Regional regex condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_regex_condition"
+ filters:
+ - field_to_match: query_string
+ regex_pattern:
+ name: greetings
+ regex_strings:
+ - '[hH]ello'
+ - '^Hi there'
+ - '.*Good Day to You'
+ type: regex
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: recreate_waf_regional_regex_condition
+
+ - name: check that a new pattern is created (because the first pattern should have been deleted once unused)
+ assert:
+ that:
+ - >
+ recreate_waf_regional_regex_condition.condition.regex_match_tuples[0].regex_pattern_set_id !=
+ create_waf_regional_regex_condition.condition.regex_match_tuples[0].regex_pattern_set_id
+
+ ##################################################
+ # aws_waf_rule tests
+ ##################################################
+
+ - name: create WAF rule
+ aws_waf_rule:
+ name: "{{ resource_prefix }}_rule"
+ conditions:
+ - name: "{{ resource_prefix }}_regex_condition"
+ type: regex
+ negated: no
+ - name: "{{ resource_prefix }}_geo_condition"
+ type: geo
+ negated: no
+ - name: "{{ resource_prefix }}_byte_condition"
+ type: byte
+ negated: no
+ purge_conditions: yes
+ <<: *aws_connection_info
+ register: create_aws_waf_rule
+
+ - name: check WAF rule
+ assert:
+ that:
+ - create_aws_waf_rule.changed
+ - create_aws_waf_rule.rule.predicates|length == 3
+
+ - name: recreate WAF rule
+ aws_waf_rule:
+ name: "{{ resource_prefix }}_rule"
+ conditions:
+ - name: "{{ resource_prefix }}_regex_condition"
+ type: regex
+ negated: no
+ - name: "{{ resource_prefix }}_geo_condition"
+ type: geo
+ negated: no
+ - name: "{{ resource_prefix }}_byte_condition"
+ type: byte
+ negated: no
+ <<: *aws_connection_info
+ register: create_aws_waf_rule
+
+ - name: check WAF rule did not change
+ assert:
+ that:
+ - not create_aws_waf_rule.changed
+ - create_aws_waf_rule.rule.predicates|length == 3
+
+ - name: add further WAF rules relying on purge_conditions defaulting to false
+ aws_waf_rule:
+ name: "{{ resource_prefix }}_rule"
+ conditions:
+ - name: "{{ resource_prefix }}_ip_condition"
+ type: ip
+ negated: yes
+ - name: "{{ resource_prefix }}_sql_condition"
+ type: sql
+ negated: no
+ - name: "{{ resource_prefix }}_xss_condition"
+ type: xss
+ negated: no
+ <<: *aws_connection_info
+ register: add_conditions_to_aws_waf_rule
+
+ - name: check WAF rule added rules
+ assert:
+ that:
+ - add_conditions_to_aws_waf_rule.changed
+ - add_conditions_to_aws_waf_rule.rule.predicates|length == 6
+
+ - name: remove some rules through purging conditions
+ aws_waf_rule:
+ name: "{{ resource_prefix }}_rule"
+ conditions:
+ - name: "{{ resource_prefix }}_ip_condition"
+ type: ip
+ negated: yes
+ - name: "{{ resource_prefix }}_xss_condition"
+ type: xss
+ negated: no
+ - name: "{{ resource_prefix }}_byte_condition"
+ type: byte
+ negated: no
+ - name: "{{ resource_prefix }}_size_condition"
+ type: size
+ negated: no
+ purge_conditions: yes
+ <<: *aws_connection_info
+ register: add_and_remove_waf_rule_conditions
+
+ - name: check WAF rules were updated as expected
+ assert:
+ that:
+ - add_and_remove_waf_rule_conditions.changed
+ - add_and_remove_waf_rule_conditions.rule.predicates|length == 4
+
+ - name: attempt to remove an in use condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_size_condition"
+ type: size
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+ register: remove_in_use_condition
+
+ - name: check failure was sensible
+ assert:
+ that:
+ - remove_in_use_condition.failed
+ - "'Condition {{ resource_prefix }}_size_condition is in use' in remove_in_use_condition.msg"
+
+ - name: create WAF Regional rule
+ aws_waf_rule:
+ name: "{{ resource_prefix }}_rule"
+ conditions:
+ - name: "{{ resource_prefix }}_regex_condition"
+ type: regex
+ negated: no
+ - name: "{{ resource_prefix }}_geo_condition"
+ type: geo
+ negated: no
+ - name: "{{ resource_prefix }}_byte_condition"
+ type: byte
+ negated: no
+ purge_conditions: yes
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: create_aws_waf_regional_rule
+
+ - name: check WAF Regional rule
+ assert:
+ that:
+ - create_aws_waf_regional_rule.changed
+ - create_aws_waf_regional_rule.rule.predicates|length == 3
+
+ - name: recreate WAF Regional rule
+ aws_waf_rule:
+ name: "{{ resource_prefix }}_rule"
+ conditions:
+ - name: "{{ resource_prefix }}_regex_condition"
+ type: regex
+ negated: no
+ - name: "{{ resource_prefix }}_geo_condition"
+ type: geo
+ negated: no
+ - name: "{{ resource_prefix }}_byte_condition"
+ type: byte
+ negated: no
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: create_aws_waf_regional_rule
+
+ - name: check WAF Regional rule did not change
+ assert:
+ that:
+ - not create_aws_waf_regional_rule.changed
+ - create_aws_waf_regional_rule.rule.predicates|length == 3
+
+ - name: add further WAF Regional rules relying on purge_conditions defaulting to false
+ aws_waf_rule:
+ name: "{{ resource_prefix }}_rule"
+ conditions:
+ - name: "{{ resource_prefix }}_ip_condition"
+ type: ip
+ negated: yes
+ - name: "{{ resource_prefix }}_sql_condition"
+ type: sql
+ negated: no
+ - name: "{{ resource_prefix }}_xss_condition"
+ type: xss
+ negated: no
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: add_conditions_to_aws_waf_regional_rule
+
+ - name: check WAF Regional rule added rules
+ assert:
+ that:
+ - add_conditions_to_aws_waf_regional_rule.changed
+ - add_conditions_to_aws_waf_regional_rule.rule.predicates|length == 6
+
+ - name: remove some rules through purging conditions
+ aws_waf_rule:
+ name: "{{ resource_prefix }}_rule"
+ conditions:
+ - name: "{{ resource_prefix }}_ip_condition"
+ type: ip
+ negated: yes
+ - name: "{{ resource_prefix }}_xss_condition"
+ type: xss
+ negated: no
+ - name: "{{ resource_prefix }}_byte_condition"
+ type: byte
+ negated: no
+ - name: "{{ resource_prefix }}_size_condition"
+ type: size
+ negated: no
+ purge_conditions: yes
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: add_and_remove_waf_regional_rule_conditions
+
+ - name: check WAF Regional rules were updated as expected
+ assert:
+ that:
+ - add_and_remove_waf_regional_rule_conditions.changed
+ - add_and_remove_waf_regional_rule_conditions.rule.predicates|length == 4
+
+ - name: attempt to remove an WAF Regional in use condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_size_condition"
+ type: size
+ state: absent
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ ignore_errors: yes
+ register: remove_in_use_condition
+
+ - name: check failure was sensible
+ assert:
+ that:
+ - remove_in_use_condition.failed
+ - "'Condition {{ resource_prefix }}_size_condition is in use' in remove_in_use_condition.msg"
+
+ ##################################################
+ # aws_waf_web_acl tests
+ ##################################################
+
+ - name: create web ACL
+ aws_waf_web_acl:
+ name: "{{ resource_prefix }}_web_acl"
+ rules:
+ - name: "{{ resource_prefix }}_rule"
+ priority: 1
+ action: block
+ default_action: block
+ purge_rules: yes
+ state: present
+ <<: *aws_connection_info
+ register: create_web_acl
+
+ - name: recreate web acl
+ aws_waf_web_acl:
+ name: "{{ resource_prefix }}_web_acl"
+ rules:
+ - name: "{{ resource_prefix }}_rule"
+ priority: 1
+ action: block
+ default_action: block
+ state: present
+ <<: *aws_connection_info
+ register: recreate_web_acl
+
+ - name: check web acl was not changed
+ assert:
+ that:
+ - not recreate_web_acl.changed
+ - recreate_web_acl.web_acl.rules|length == 1
+
+ - name: create a second WAF rule
+ aws_waf_rule:
+ name: "{{ resource_prefix }}_rule_2"
+ conditions:
+ - name: "{{ resource_prefix }}_ip_condition"
+ type: ip
+ negated: yes
+ - name: "{{ resource_prefix }}_sql_condition"
+ type: sql
+ negated: no
+ - name: "{{ resource_prefix }}_xss_condition"
+ type: xss
+ negated: no
+ <<: *aws_connection_info
+
+ - name: add a new rule to the web acl
+ aws_waf_web_acl:
+ name: "{{ resource_prefix }}_web_acl"
+ rules:
+ - name: "{{ resource_prefix }}_rule_2"
+ priority: 2
+ action: allow
+ default_action: block
+ state: present
+ <<: *aws_connection_info
+ register: web_acl_add_rule
+
+ - name: check that rule was added to the web acl
+ assert:
+ that:
+ - web_acl_add_rule.changed
+ - web_acl_add_rule.web_acl.rules|length == 2
+
+ - name: use purge rules to remove the first rule
+ aws_waf_web_acl:
+ name: "{{ resource_prefix }}_web_acl"
+ rules:
+ - name: "{{ resource_prefix }}_rule_2"
+ priority: 2
+ action: allow
+ purge_rules: yes
+ default_action: block
+ state: present
+ <<: *aws_connection_info
+ register: web_acl_add_rule
+
+ - name: check that rule was removed from the web acl
+ assert:
+ that:
+ - web_acl_add_rule.changed
+ - web_acl_add_rule.web_acl.rules|length == 1
+
+ - name: swap two rules of same priority
+ aws_waf_web_acl:
+ name: "{{ resource_prefix }}_web_acl"
+ rules:
+ - name: "{{ resource_prefix }}_rule"
+ priority: 2
+ action: allow
+ purge_rules: yes
+ default_action: block
+ state: present
+ <<: *aws_connection_info
+ register: web_acl_swap_rule
+
+ - name: attempt to delete the inuse first rule
+ aws_waf_rule:
+ name: "{{ resource_prefix }}_rule"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+ register: remove_inuse_rule
+
+ - name: check that removing in-use rule fails
+ assert:
+ that:
+ - remove_inuse_rule.failed
+
+ - name: delete the web acl
+ aws_waf_web_acl:
+ name: "{{ resource_prefix }}_web_acl"
+ state: absent
+ <<: *aws_connection_info
+ register: delete_web_acl
+
+ - name: check that web acl was deleted
+ assert:
+ that:
+ - delete_web_acl.changed
+ - not delete_web_acl.web_acl
+
+ - name: delete the no longer in use first rule
+ aws_waf_rule:
+ name: "{{ resource_prefix }}_rule"
+ state: absent
+ <<: *aws_connection_info
+
+ - name: create WAF Regional web ACL
+ aws_waf_web_acl:
+ name: "{{ resource_prefix }}_web_acl"
+ rules:
+ - name: "{{ resource_prefix }}_rule"
+ priority: 1
+ action: block
+ default_action: block
+ purge_rules: yes
+ state: present
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: create_waf_regional_web_acl
+
+ - name: recreate WAF Regional web acl
+ aws_waf_web_acl:
+ name: "{{ resource_prefix }}_web_acl"
+ rules:
+ - name: "{{ resource_prefix }}_rule"
+ priority: 1
+ action: block
+ default_action: block
+ state: present
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: recreate_waf_regional_web_acl
+
+ - name: check WAF Regional web acl was not changed
+ assert:
+ that:
+ - not recreate_waf_regional_web_acl.changed
+ - recreate_waf_regional_web_acl.web_acl.rules|length == 1
+
+ - name: create a second WAF Regional rule
+ aws_waf_rule:
+ name: "{{ resource_prefix }}_rule_2"
+ conditions:
+ - name: "{{ resource_prefix }}_ip_condition"
+ type: ip
+ negated: yes
+ - name: "{{ resource_prefix }}_sql_condition"
+ type: sql
+ negated: no
+ - name: "{{ resource_prefix }}_xss_condition"
+ type: xss
+ negated: no
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+
+ - name: add a new rule to the WAF Regional web acl
+ aws_waf_web_acl:
+ name: "{{ resource_prefix }}_web_acl"
+ rules:
+ - name: "{{ resource_prefix }}_rule_2"
+ priority: 2
+ action: allow
+ default_action: block
+ state: present
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: waf_regional_web_acl_add_rule
+
+ - name: check that rule was added to the WAF Regional web acl
+ assert:
+ that:
+ - waf_regional_web_acl_add_rule.changed
+ - waf_regional_web_acl_add_rule.web_acl.rules|length == 2
+
+ - name: use purge rules to remove the WAF Regional first rule
+ aws_waf_web_acl:
+ name: "{{ resource_prefix }}_web_acl"
+ rules:
+ - name: "{{ resource_prefix }}_rule_2"
+ priority: 2
+ action: allow
+ purge_rules: yes
+ default_action: block
+ state: present
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: waf_regional_web_acl_add_rule
+
+ - name: check that rule was removed from the WAF Regional web acl
+ assert:
+ that:
+ - waf_regional_web_acl_add_rule.changed
+ - waf_regional_web_acl_add_rule.web_acl.rules|length == 1
+
+ - name: swap two WAF Regional rules of same priority
+ aws_waf_web_acl:
+ name: "{{ resource_prefix }}_web_acl"
+ rules:
+ - name: "{{ resource_prefix }}_rule"
+ priority: 2
+ action: allow
+ purge_rules: yes
+ default_action: block
+ state: present
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: waf_regional_web_acl_swap_rule
+
+ - name: attempt to delete the WAF Regional inuse first rule
+ aws_waf_rule:
+ name: "{{ resource_prefix }}_rule"
+ state: absent
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ ignore_errors: yes
+ register: remove_waf_regional_inuse_rule
+
+ - name: check that removing WAF Regional in-use rule fails
+ assert:
+ that:
+ - remove_waf_regional_inuse_rule.failed
+
+ - name: delete the WAF Regional web acl
+ aws_waf_web_acl:
+ name: "{{ resource_prefix }}_web_acl"
+ state: absent
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ register: delete_waf_regional_web_acl
+
+ - name: check that WAF Regional web acl was deleted
+ assert:
+ that:
+ - delete_waf_regional_web_acl.changed
+ - not delete_waf_regional_web_acl.web_acl
+
+ - name: delete the no longer in use WAF Regional first rule
+ aws_waf_rule:
+ name: "{{ resource_prefix }}_rule"
+ state: absent
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+
+ ##################################################
+ # TEARDOWN
+ ##################################################
+
+ always:
+ - debug:
+ msg: "****** TEARDOWN STARTS HERE ******"
+
+ - name: delete the web acl
+ aws_waf_web_acl:
+ name: "{{ resource_prefix }}_web_acl"
+ state: absent
+ purge_rules: yes
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove second WAF rule
+ aws_waf_rule:
+ name: "{{ resource_prefix }}_rule_2"
+ state: absent
+ purge_conditions: yes
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove WAF rule
+ aws_waf_rule:
+ name: "{{ resource_prefix }}_rule"
+ state: absent
+ purge_conditions: yes
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove XSS condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_xss_condition"
+ type: xss
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove SQL condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_sql_condition"
+ type: sql
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove size condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_size_condition"
+ type: size
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove geo condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_geo_condition"
+ type: geo
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove byte condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_byte_condition"
+ type: byte
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove ip address condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_ip_condition"
+ type: ip
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove regex part 2 condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_regex_condition_part_2"
+ type: regex
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove first regex condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_regex_condition"
+ type: regex
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: delete the WAF Regional web acl
+ aws_waf_web_acl:
+ name: "{{ resource_prefix }}_web_acl"
+ state: absent
+ purge_rules: yes
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove second WAF Regional rule
+ aws_waf_rule:
+ name: "{{ resource_prefix }}_rule_2"
+ state: absent
+ purge_conditions: yes
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove WAF Regional rule
+ aws_waf_rule:
+ name: "{{ resource_prefix }}_rule"
+ state: absent
+ purge_conditions: yes
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove WAF Regional XSS condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_xss_condition"
+ type: xss
+ state: absent
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove WAF Regional SQL condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_sql_condition"
+ type: sql
+ state: absent
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove WAF Regional size condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_size_condition"
+ type: size
+ state: absent
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove WAF Regional geo condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_geo_condition"
+ type: geo
+ state: absent
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove WAF Regional byte condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_byte_condition"
+ type: byte
+ state: absent
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove WAF Regional ip address condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_ip_condition"
+ type: ip
+ state: absent
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove WAF Regional regex part 2 condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_regex_condition_part_2"
+ type: regex
+ state: absent
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove first WAF Regional regex condition
+ aws_waf_condition:
+ name: "{{ resource_prefix }}_regex_condition"
+ type: regex
+ state: absent
+ region: "{{ aws_region }}"
+ waf_regional: true
+ <<: *aws_connection_info
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_exports_info/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_exports_info/aliases
new file mode 100644
index 00000000..777562fd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_exports_info/aliases
@@ -0,0 +1,4 @@
+cloud/aws
+shippable/aws/group3
+# https://github.com/ansible-collections/community.aws/issues/157
+unstable
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_exports_info/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_exports_info/defaults/main.yml
new file mode 100644
index 00000000..4edd7475
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_exports_info/defaults/main.yml
@@ -0,0 +1 @@
+stack_name: "{{ resource_prefix }}"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_exports_info/files/test_stack.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_exports_info/files/test_stack.yml
new file mode 100644
index 00000000..f1dcba30
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_exports_info/files/test_stack.yml
@@ -0,0 +1,24 @@
+AWSTemplateFormatVersion: 2010-09-09
+Description: Create some item in Exports
+Parameters:
+ TestParamValue:
+ Type: String
+ Description: A param Value to be placed in Exports
+ TestParamName:
+ Type: String
+ Description: A param Name for SSM Parameter Store
+ BucketSuffix:
+ Type: String
+Resources:
+ TestBucket:
+ Type: AWS::S3::Bucket
+ Properties:
+ BucketName:
+ Fn::Sub: "cf-export-${BucketSuffix}"
+Outputs:
+ TestParamValue:
+ Value:
+ Ref: TestParamValue
+ Export:
+ Name:
+ Fn::Sub: "${TestParamName}"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_exports_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_exports_info/tasks/main.yml
new file mode 100644
index 00000000..eb703d49
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_exports_info/tasks/main.yml
@@ -0,0 +1,41 @@
+- name: set connection information for aws modules and run tasks
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ collections:
+ - amazon.aws
+
+ block:
+ - name: Create a minimal stack with an export set by parameter
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ template_body: "{{ lookup('file','test_stack.yml') }}"
+ template_parameters:
+ TestParamName: "cf-exports-param"
+ TestParamValue: "Set By CF Exports"
+ BucketSuffix: "{{ resource_prefix }}"
+ register: cf_stack
+ - name: Read from Exports
+ cloudformation_exports_info:
+ region: "{{ aws_region }}"
+ register: exports_result
+ - set_fact:
+ export_items: "{{ exports_result['export_items'] }}"
+ - assert:
+ that:
+ - export_items is defined
+ - export_items['cf-exports-param'] is defined
+ # - export_items | length == 1
+
+
+# Cleanup
+ always:
+
+ - name: delete stack
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ state: absent
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/aliases
new file mode 100644
index 00000000..56927195
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/files/test_bucket_stack.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/files/test_bucket_stack.yml
new file mode 100644
index 00000000..dfbc5224
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/files/test_bucket_stack.yml
@@ -0,0 +1,6 @@
+AWSTemplateFormatVersion: "2010-09-09"
+Parameters: {}
+Resources:
+ Bukkit:
+ Type: "AWS::S3::Bucket"
+ Properties: {}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/files/test_modded_bucket_stack.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/files/test_modded_bucket_stack.yml
new file mode 100644
index 00000000..68df61c6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/files/test_modded_bucket_stack.yml
@@ -0,0 +1,9 @@
+AWSTemplateFormatVersion: "2010-09-09"
+Parameters: {}
+Resources:
+ Bukkit:
+ Type: "AWS::S3::Bucket"
+ Properties: {}
+ other:
+ Type: "AWS::SNS::Topic"
+ Properties: {}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/playbooks/full_test.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/playbooks/full_test.yml
new file mode 100644
index 00000000..257e1e48
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/playbooks/full_test.yml
@@ -0,0 +1,6 @@
+- hosts: localhost
+ connection: local
+ environment: "{{ ansible_test.environment }}"
+
+ roles:
+ - ../../cloudformation_stack_set
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/runme.sh b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/runme.sh
new file mode 100755
index 00000000..d499c679
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/runme.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# Run full test suite
+source virtualenv.sh
+pip install 'botocore>1.10.26' boto3
+ansible-playbook -i ../../inventory -v playbooks/full_test.yml "$@"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/tasks/main.yml
new file mode 100644
index 00000000..cb6aa2cc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudformation_stack_set/tasks/main.yml
@@ -0,0 +1,190 @@
+---
+# tasks file for cloudformation_stack_set module tests
+# These tests require access to two separate AWS accounts
+
+- name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ aws_secondary_connection_info: &aws_secondary_connection_info
+ aws_access_key: "{{ secondary_aws_access_key }}"
+ aws_secret_key: "{{ secondary_aws_secret_key }}"
+ security_token: "{{ secondary_security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+- name: cloudformation_stack_set tests
+ collections:
+ - amazon.aws
+
+ block:
+ - name: Get current account ID
+ aws_caller_info:
+ <<: *aws_connection_info
+ register: whoami
+ - name: Get current account ID
+ aws_caller_info:
+ <<: *aws_secondary_connection_info
+ register: target_acct
+
+ - name: Policy to allow assuming stackset execution role
+ iam_managed_policy:
+ policy_name: AssumeCfnStackSetExecRole
+ state: present
+ <<: *aws_connection_info
+ policy:
+ Version: '2012-10-17'
+ Statement:
+ - Action: 'sts:AssumeRole'
+ Effect: Allow
+ Resource: arn:aws:iam::*:role/CfnStackSetExecRole
+ policy_description: Assume CfnStackSetExecRole
+
+ - name: Create an execution role for us to use
+ iam_role:
+ name: CfnStackSetExecRole
+ <<: *aws_secondary_connection_info
+ assume_role_policy_document:
+ Version: '2012-10-17'
+ Statement:
+ - Action: 'sts:AssumeRole'
+ Effect: Allow
+ Principal:
+ AWS: '{{ whoami.account }}'
+ managed_policy:
+ - arn:aws:iam::aws:policy/PowerUserAccess
+
+ - name: Create an administration role for us to use
+ iam_role:
+ name: CfnStackSetAdminRole
+ <<: *aws_connection_info
+ assume_role_policy_document:
+ Version: '2012-10-17'
+ Statement:
+ - Action: 'sts:AssumeRole'
+ Effect: Allow
+ Principal:
+ Service: 'cloudformation.amazonaws.com'
+ managed_policy:
+ - arn:aws:iam::{{ whoami.account }}:policy/AssumeCfnStackSetExecRole
+ #- arn:aws:iam::aws:policy/PowerUserAccess
+
+ - name: Should fail without account/regions
+ cloudformation_stack_set:
+ <<: *aws_connection_info
+ name: TestSetOne
+ description: TestStack Prime
+ tags:
+ Some: Thing
+ Type: Test
+ wait: true
+ template: test_bucket_stack.yml
+ register: result
+ ignore_errors: true
+ - name: assert that running with no account fails
+ assert:
+ that:
+ - result is failed
+ - >
+ "Can't create a stack set without choosing at least one account" in result.msg
+ - name: Should fail without roles
+ cloudformation_stack_set:
+ <<: *aws_connection_info
+ name: TestSetOne
+ description: TestStack Prime
+ tags:
+ Some: Thing
+ Type: Test
+ wait: true
+ regions:
+ - '{{ aws_region }}'
+ accounts:
+ - '{{ whoami.account }}'
+ template_body: '{{ lookup("file", "test_bucket_stack.yml") }}'
+ register: result
+ ignore_errors: true
+ - name: assert that running with no account fails
+ assert:
+ that:
+ - result is failed
+
+ - name: Create an execution role for us to use
+ iam_role:
+ name: CfnStackSetExecRole
+ state: absent
+ <<: *aws_connection_info
+ assume_role_policy_document:
+ Version: '2012-10-17'
+ Statement:
+ - Action: 'sts:AssumeRole'
+ Effect: Allow
+ Principal:
+ AWS: arn:aws:iam::{{ whoami.account }}:root
+ managed_policy:
+ - arn:aws:iam::aws:policy/PowerUserAccess
+
+ - name: Create stack with roles
+ cloudformation_stack_set:
+ <<: *aws_connection_info
+ name: TestSetTwo
+ description: TestStack Dos
+ tags:
+ Some: Thing
+ Type: Test
+ wait: true
+ regions:
+ - '{{ aws_region }}'
+ accounts:
+ - '{{ target_acct.account }}'
+ exec_role_name: CfnStackSetExecRole
+ admin_role_arn: arn:aws:iam::{{ whoami.account }}:role/CfnStackSetAdminRole
+ template_body: '{{ lookup("file", "test_bucket_stack.yml") }}'
+ register: result
+
+ - name: Update stack with roles
+ cloudformation_stack_set:
+ <<: *aws_connection_info
+ name: TestSetTwo
+ description: TestStack Dos
+ tags:
+ Some: Thing
+ Type: Test
+ wait: true
+ regions:
+ - '{{ aws_region }}'
+ accounts:
+ - '{{ target_acct.account }}'
+ exec_role_name: CfnStackSetExecRole
+ admin_role_arn: arn:aws:iam::{{ whoami.account }}:role/CfnStackSetAdminRole
+ template_body: '{{ lookup("file", "test_modded_bucket_stack.yml") }}'
+ always:
+ - name: Clean up stack one
+ cloudformation_stack_set:
+ <<: *aws_connection_info
+ name: TestSetOne
+ wait: true
+ regions:
+ - '{{ aws_region }}'
+ accounts:
+ - '{{ whoami.account }}'
+ purge_stacks: true
+ state: absent
+ - name: Clean up stack two
+ cloudformation_stack_set:
+ <<: *aws_connection_info
+ name: TestSetTwo
+ description: TestStack Dos
+ purge_stacks: true
+ tags:
+ Some: Thing
+ Type: Test
+ wait: true
+ regions:
+ - '{{ aws_region }}'
+ accounts:
+ - '{{ target_acct.account }}'
+ template_body: '{{ lookup("file", "test_bucket_stack.yml") }}'
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudfront_distribution/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudfront_distribution/aliases
new file mode 100644
index 00000000..56927195
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudfront_distribution/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudfront_distribution/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudfront_distribution/defaults/main.yml
new file mode 100644
index 00000000..b88dbc24
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudfront_distribution/defaults/main.yml
@@ -0,0 +1,49 @@
+cloudfront_hostname: "{{ resource_prefix }}01"
+
+# Use a domain that has a wildcard DNS
+# Using an alias requires also having an SSL cert...
+#cloudfront_alias: "{{ cloudfront_hostname }}.github.io"
+#cloudfront_viewer_cert:
+# acm_certificate_arn: ...
+# certificate: ...
+# certificate_source: ...
+# minimum_protocol_version: ...
+# ssl_support_method: ...
+
+cloudfront_test_cache_behaviors:
+ - path_pattern: /test/path
+ forwarded_values:
+ headers:
+ - Host
+ - X-HTTP-Forwarded-For
+ - CloudFront-Forwarded-Proto
+ - Origin
+ - Referer
+ allowed_methods:
+ items:
+ - GET
+ - HEAD
+ - POST
+ - PATCH
+ - PUT
+ - OPTIONS
+ - DELETE
+ cached_methods:
+ - GET
+ - HEAD
+ - path_pattern: /another/path
+ forwarded_values:
+ cookies:
+ forward: whitelist
+ whitelisted_names:
+ - my_header
+ query_string: yes
+ query_string_cache_keys:
+ - whatever
+ allowed_methods:
+ items:
+ - GET
+ - HEAD
+ cached_methods:
+ - GET
+ - HEAD
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudfront_distribution/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudfront_distribution/meta/main.yml
new file mode 100644
index 00000000..32cf5dda
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudfront_distribution/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudfront_distribution/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudfront_distribution/tasks/main.yml
new file mode 100644
index 00000000..62c141cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudfront_distribution/tasks/main.yml
@@ -0,0 +1,442 @@
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ cloudfront_distribution:
+ alias: "{{ cloudfront_alias | default(omit) }}"
+ viewer_certificate: "{{ cloudfront_viewer_cert | default(omit) }}"
+ collections:
+ - amazon.aws
+
+ block:
+
+ - name: create cloudfront distribution using defaults
+ cloudfront_distribution:
+ origins:
+ - domain_name: "{{ cloudfront_hostname }}-origin.example.com"
+ id: "{{ cloudfront_hostname }}-origin.example.com"
+ default_cache_behavior:
+ target_origin_id: "{{ cloudfront_hostname }}-origin.example.com"
+ state: present
+ purge_origins: yes
+ register: cf_distribution
+
+ - set_fact:
+ distribution_id: '{{ cf_distribution.id }}'
+
+ - name: re-run cloudfront distribution with same defaults
+ cloudfront_distribution:
+ distribution_id: "{{ distribution_id }}"
+ origins:
+ - domain_name: "{{ cloudfront_hostname }}-origin.example.com"
+ state: present
+ register: cf_dist_no_update
+
+ - name: ensure distribution was not updated
+ assert:
+ that:
+ - not cf_dist_no_update.changed
+
+ - name: re-run cloudfront distribution using distribution id
+ cloudfront_distribution:
+ distribution_id: "{{ distribution_id }}"
+ purge_origins: no
+ state: present
+ register: cf_dist_with_id
+
+ - name: ensure distribution was not updated
+ assert:
+ that:
+ - not cf_dist_with_id.changed
+
+ - name: update origin http port
+ cloudfront_distribution:
+ distribution_id: "{{ distribution_id }}"
+ origins:
+ - domain_name: "{{ cloudfront_hostname }}-origin.example.com"
+ custom_origin_config:
+ http_port: 8080
+ state: present
+ register: update_origin_http_port
+
+ - name: ensure http port was updated
+ assert:
+ that:
+ - update_origin_http_port.changed
+
+ - name: update restrictions
+ cloudfront_distribution:
+ distribution_id: "{{ distribution_id }}"
+ restrictions:
+ geo_restriction:
+ restriction_type: "whitelist"
+ items:
+ - "US"
+ state: present
+ register: update_restrictions
+
+ - name: ensure restrictions was updated
+ assert:
+ that:
+ - update_restrictions.changed
+
+ - name: set a random comment
+ set_fact:
+ comment: "{{'ABCDEFabcdef123456'|shuffle|join }}"
+
+ - name: update comment
+ cloudfront_distribution:
+ distribution_id: "{{ distribution_id }}"
+ comment: "{{ comment }}"
+ state: present
+ register: cf_comment
+
+ - name: ensure comment was updated
+ assert:
+ that:
+ - cf_comment.changed
+ - 'cf_comment.comment == comment'
+
+ - name: create second origin
+ cloudfront_distribution:
+ distribution_id: "{{ distribution_id }}"
+ origins:
+ - domain_name: "{{ resource_prefix }}2.example.com"
+ id: "{{ resource_prefix }}2.example.com"
+ default_root_object: index.html
+ state: present
+ wait: yes
+ register: cf_add_origin
+
+ - name: ensure origin was added
+ assert:
+ that:
+ - cf_add_origin.origins.quantity == 2
+ - cf_add_origin.changed
+ - "cf_add_origin.default_root_object == 'index.html'"
+
+ - name: re-run second origin
+ cloudfront_distribution:
+ distribution_id: "{{ distribution_id }}"
+ origins:
+ - domain_name: "{{ cloudfront_hostname }}-origin.example.com"
+ custom_origin_config:
+ http_port: 8080
+ - domain_name: "{{ resource_prefix }}2.example.com"
+ default_root_object: index.html
+ wait: yes
+ state: present
+ register: cf_rerun_second_origin
+
+ - name: ensure nothing changed after re-run
+ assert:
+ that:
+ - cf_rerun_second_origin.origins.quantity == 2
+ - not cf_rerun_second_origin.changed
+
+ - name: run with origins in reverse order
+ cloudfront_distribution:
+ distribution_id: "{{ distribution_id }}"
+ origins:
+ - domain_name: "{{ resource_prefix }}2.example.com"
+ - domain_name: "{{ cloudfront_hostname }}-origin.example.com"
+ custom_origin_config:
+ http_port: 8080
+ state: present
+ register: cf_rerun_second_origin_reversed
+
+ - name: ensure nothing changed after reversed re-run
+ assert:
+ that:
+ - cf_rerun_second_origin_reversed.origins.quantity == 2
+ - not cf_rerun_second_origin_reversed.changed
+
+
+ - name: purge first origin
+ cloudfront_distribution:
+ distribution_id: "{{ distribution_id }}"
+ origins:
+ - domain_name: "{{ resource_prefix }}2.example.com"
+ default_cache_behavior:
+ target_origin_id: "{{ resource_prefix }}2.example.com"
+ purge_origins: yes
+ state: present
+ register: cf_purge_origin
+
+ - name: ensure origin was removed
+ assert:
+ that:
+ - cf_purge_origin.origins.quantity == 1
+ - cf_purge_origin.changed
+
+ - name: update default_root_object of existing distribution
+ cloudfront_distribution:
+ distribution_id: "{{ distribution_id }}"
+ origins:
+ - domain_name: "{{ resource_prefix }}2.example.com"
+ default_root_object: index.php
+ state: present
+ register: cf_update_default_root_object
+
+ - name: ensure origin was updated
+ assert:
+ that:
+ - "cf_update_default_root_object.default_root_object == 'index.php'"
+ - cf_update_default_root_object.changed
+
+ - name: add tags to existing distribution
+ cloudfront_distribution:
+ distribution_id: "{{ distribution_id }}"
+ origins:
+ - domain_name: "{{ resource_prefix }}2.example.com"
+ tags:
+ ATag: tag1
+ Another: tag
+ default_root_object: index.php
+ state: present
+ register: cf_add_tags
+
+ - name: ensure tags were added
+ assert:
+ that:
+ - cf_add_tags.changed
+ - cf_add_tags.tags|length == 2
+
+ - name: delete distribution
+ cloudfront_distribution:
+ distribution_id: "{{ distribution_id }}"
+ enabled: no
+ wait: yes
+ state: absent
+
+ - name: create distribution with tags
+ cloudfront_distribution:
+ origins:
+ - domain_name: "{{ resource_prefix }}2.example.com"
+ id: "{{ resource_prefix }}2.example.com"
+ tags:
+ ATag: tag1
+ Another: tag
+ state: present
+ register: cf_second_distribution
+
+ - set_fact:
+ distribution_id: '{{ cf_second_distribution.id }}'
+
+ - name: ensure tags were set on creation
+ assert:
+ that:
+ - cf_second_distribution.changed
+ - cf_second_distribution.tags|length == 2
+ - "'ATag' in cf_second_distribution.tags"
+ - "'Another' in cf_second_distribution.tags"
+
+ - name: re-run create distribution with same tags and purge_tags
+ cloudfront_distribution:
+ distribution_id: "{{ distribution_id }}"
+ origins:
+ - domain_name: "{{ resource_prefix }}2.example.com"
+ id: "{{ resource_prefix }}2.example.com"
+ tags:
+ ATag: tag1
+ Another: tag
+ purge_tags: yes
+ state: present
+ register: rerun_with_purge_tags
+
+ - name: ensure that re-running didn't change
+ assert:
+ that:
+ - not rerun_with_purge_tags.changed
+ - rerun_with_purge_tags.tags|length == 2
+
+ - name: add new tag to distribution
+ cloudfront_distribution:
+ distribution_id: "{{ distribution_id }}"
+ origins:
+ - domain_name: "{{ resource_prefix }}2.example.com"
+ tags:
+ Third: thing
+ purge_tags: no
+ state: present
+ register: update_with_new_tag
+
+ - name: ensure tags are correct
+ assert:
+ that:
+ - update_with_new_tag.changed
+ - "'Third' in update_with_new_tag.tags"
+ - "'Another' in update_with_new_tag.tags"
+ - "'ATag' in update_with_new_tag.tags"
+ - update_with_new_tag.tags|length == 3
+
+ - name: create some cache behaviors
+ cloudfront_distribution:
+ distribution_id: "{{ distribution_id }}"
+ origins:
+ - domain_name: "{{ resource_prefix }}2.example.com"
+ cache_behaviors: "{{ cloudfront_test_cache_behaviors }}"
+ state: present
+ register: add_cache_behaviors
+
+ - name: reverse some cache behaviors
+ cloudfront_distribution:
+ distribution_id: "{{ distribution_id }}"
+ origins:
+ - domain_name: "{{ resource_prefix }}2.example.com"
+ cache_behaviors: "{{ cloudfront_test_cache_behaviors|reverse|list }}"
+ state: present
+ register: reverse_cache_behaviors
+
+ - name: check that reversing cache behaviors changes nothing when purge_cache_behaviors unset
+ assert:
+ that:
+ - not reverse_cache_behaviors.changed
+ - reverse_cache_behaviors.cache_behaviors|length == 2
+
+ - name: reverse some cache behaviors properly
+ cloudfront_distribution:
+ distribution_id: "{{ distribution_id }}"
+ origins:
+ - domain_name: "{{ resource_prefix }}2.example.com"
+ cache_behaviors: "{{ cloudfront_test_cache_behaviors|reverse|list }}"
+ purge_cache_behaviors: yes
+ state: present
+ register: reverse_cache_behaviors_with_purge
+
+ - name: check that reversing cache behaviors changes nothing when purge_cache_behaviors unset
+ assert:
+ that:
+ - reverse_cache_behaviors_with_purge.changed
+ - reverse_cache_behaviors_with_purge.cache_behaviors|length == 2
+
+ - name: update origin that changes target id (failure expected)
+ cloudfront_distribution:
+ distribution_id: "{{ distribution_id }}"
+ origins:
+ - domain_name: "{{ resource_prefix }}3.example.com"
+ id: "{{ resource_prefix }}3.example.com"
+ purge_origins: yes
+ state: present
+ register: remove_origin_in_use
+ ignore_errors: yes
+
+ - name: check that removing in use origin fails
+ assert:
+ that:
+ - remove_origin_in_use.failed
+
+ # FIXME: This currently fails due to AWS side problems
+ # not clear whether to hope they fix or prevent this issue from happening
+ #- name: update origin and update cache behavior to point to new origin
+ # cloudfront_distribution:
+ # origins:
+ # - domain_name: "{{ resource_prefix }}3.example.com"
+ # id: "{{ resource_prefix }}3.example.com"
+ # cache_behaviors:
+ # - path_pattern: /test/path
+ # target_origin_id: "{{ resource_prefix }}3.example.com"
+ # - path_pattern: /another/path
+ # target_origin_id: "{{ resource_prefix }}3.example.com"
+ # state: present
+ # aws_access_key: "{{ aws_access_key|default(omit) }}"
+ # aws_secret_key: "{{ aws_secret_key|default(omit) }}"
+ # security_token: "{{ security_token|default(omit) }}"
+ # profile: "{{ profile|default(omit) }}"
+ # register: update_cache_behaviors in use
+
+ - name: create an s3 bucket for next test
+ # note that although public-read allows reads that we want to stop with origin_access_identity,
+ # we also need to test without origin_access_identity and it's hard to change bucket perms later
+ aws_s3:
+ bucket: "{{ resource_prefix }}-bucket"
+ mode: create
+
+ - name: update origin to point to the s3 bucket
+ cloudfront_distribution:
+ distribution_id: "{{ distribution_id }}"
+ origins:
+ - domain_name: "{{ resource_prefix }}-bucket.s3.amazonaws.com"
+ id: "{{ resource_prefix }}3.example.com"
+ s3_origin_access_identity_enabled: yes
+ state: present
+ register: update_origin_to_s3
+
+ - name: check that s3 origin access is in result
+ assert:
+ that:
+ - item.s3_origin_config.origin_access_identity.startswith('origin-access-identity/cloudfront/')
+ when: "'s3_origin_config' in item"
+ loop: "{{ update_origin_to_s3.origins['items'] }}"
+
+ - name: update origin to remove s3 origin access identity
+ cloudfront_distribution:
+ distribution_id: "{{ distribution_id }}"
+ origins:
+ - domain_name: "{{ resource_prefix }}-bucket.s3.amazonaws.com"
+ id: "{{ resource_prefix }}3.example.com"
+ s3_origin_access_identity_enabled: no
+ state: present
+ register: update_origin_to_s3_without_origin_access
+
+ - name: check that s3 origin access is not in result
+ assert:
+ that:
+ - not item.s3_origin_config.origin_access_identity
+ when: "'s3_origin_config' in item"
+ loop: "{{ update_origin_to_s3_without_origin_access.origins['items'] }}"
+
+ - name: delete the s3 bucket
+ aws_s3:
+ bucket: "{{ resource_prefix }}-bucket"
+ mode: delete
+
+ - name: check that custom_origin_config can't be used with origin_access_identity enabled
+ cloudfront_distribution:
+ distribution_id: "{{ distribution_id }}"
+ origins:
+ - domain_name: "{{ resource_prefix }}-bucket.s3.amazonaws.com"
+ id: "{{ resource_prefix }}3.example.com"
+ s3_origin_access_identity_enabled: yes
+ custom_origin_config:
+ origin_protocol_policy: 'http-only'
+ state: present
+ register: update_origin_to_s3_with_origin_access_and_with_custom_origin_config
+ ignore_errors: True
+
+ - name: check that custom origin with origin access identity fails
+ assert:
+ that:
+ - update_origin_to_s3_with_origin_access_and_with_custom_origin_config.failed
+
+ - name: Update distribution to use specific access identity
+ cloudfront_distribution:
+ distribution_id: "{{ distribution_id }}"
+ origins:
+ - id: "{{ resource_prefix }}"
+ domain_name: "{{ resource_prefix }}.s3.amazonaws.com"
+ s3_origin_access_identity_enabled: true
+ s3_origin_config:
+ origin_access_identity: origin-access-identity/cloudfront/ANYTHING
+ register: update_distribution_with_specific_access_identity
+
+ - name: check that custom origin uses the provided origin_access_identity
+ assert:
+ that:
+ - update_distribution_with_specific_access_identity.changed
+ - update_distribution_with_specific_access_identity.origins.items[0].s3_origin_config.origin_access_identity == 'origin-access-identity/cloudfront/ANYTHING'
+
+ always:
+ # TEARDOWN STARTS HERE
+ - name: delete the s3 bucket
+ aws_s3:
+ bucket: "{{ resource_prefix }}-bucket"
+ mode: delete
+
+ - name: clean up cloudfront distribution
+ cloudfront_distribution:
+ distribution_id: "{{ distribution_id }}"
+ enabled: no
+ wait: yes
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/aliases
new file mode 100644
index 00000000..56927195
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/defaults/main.yml
new file mode 100644
index 00000000..7338e364
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/defaults/main.yml
@@ -0,0 +1,7 @@
+cloudtrail_name: '{{ resource_prefix }}-cloudtrail'
+s3_bucket_name: '{{ resource_prefix }}-cloudtrail-bucket'
+kms_alias: '{{ resource_prefix }}-cloudtrail'
+sns_topic: '{{ resource_prefix }}-cloudtrail-notifications'
+cloudtrail_prefix: 'test-prefix'
+cloudwatch_log_group: '{{ resource_prefix }}-cloudtrail'
+cloudwatch_role: '{{ resource_prefix }}-cloudtrail'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/tasks/main.yml
new file mode 100644
index 00000000..8aa695b9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/tasks/main.yml
@@ -0,0 +1,1425 @@
+---
+# General Tests:
+# - s3_bucket_name required when state is 'present'
+# - Creation / Deletion
+# - Enable/Disable logging
+# - Enable/Disable log file validation option
+# - Manipulation of Global Event logging option
+# - Manipulation of Multi-Region logging option
+# - Manipulation of S3 bucket option
+# - Manipulation of Encryption option
+# - Manipulation of SNS options
+# - Manipulation of CloudWatch Log group options
+# - Manipulation of Tags
+#
+# Notes:
+# - results include the updates, even when check_mode is true
+# - Poor handling of disable global + enable multi-region
+# botocore.errorfactory.InvalidParameterCombinationException: An error
+# occurred (InvalidParameterCombinationException) when calling the
+# UpdateTrail operation: Multi-Region trail must include global service
+# events.
+# - Using blank string for KMS ID doesn't remove encryption
+# - Using blank string for SNS Topic doesn't remove it
+# - Using blank string for CloudWatch Log Group / Role doesn't remove them
+#
+# Possible Bugs:
+# - output.exists == false when creating
+# - Changed reports true when using a KMS alias
+# - Tags Keys are being lower-cased
+
+- module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ # Add this as a default because we (almost) always need it
+ cloudtrail:
+ s3_bucket_name: '{{ s3_bucket_name }}'
+ collections:
+ - amazon.aws
+ block:
+
+ # ============================================================
+ # Argument Tests
+ # ============================================================
+ - name: 'S3 Bucket required when state is "present"'
+ module_defaults: { cloudtrail: {} }
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ register: output
+ ignore_errors: yes
+ - assert:
+ that:
+ - output is failed
+ - '"s3_bucket_name" in output.msg'
+
+ - name: 'CloudWatch cloudwatch_logs_log_group_arn required when cloudwatch_logs_role_arn passed'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ cloudwatch_logs_role_arn: 'SomeValue'
+ register: output
+ ignore_errors: yes
+ - assert:
+ that:
+ - output is failed
+ - '"parameters are required together" in output.msg'
+ - '"cloudwatch_logs_log_group_arn" in output.msg'
+
+ - name: 'CloudWatch cloudwatch_logs_role_arn required when cloudwatch_logs_log_group_arn passed'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ cloudwatch_logs_log_group_arn: 'SomeValue'
+ register: output
+ ignore_errors: yes
+ - assert:
+ that:
+ - output is failed
+ - '"parameters are required together" in output.msg'
+ - '"cloudwatch_logs_role_arn" in output.msg'
+
+ #- name: 'Global Logging must be enabled when enabling Multi-region'
+ # cloudtrail:
+ # state: present
+ # name: '{{ cloudtrail_name }}'
+ # include_global_events: no
+ # is_multi_region_trail: yes
+ # register: output
+ # ignore_errors: yes
+ #- assert:
+ # that:
+ # - output is failed
+
+ # ============================================================
+ # Preparation
+ # ============================================================
+ - name: 'Retrieve caller facts'
+ aws_caller_info: {}
+ register: aws_caller_info
+
+ - name: 'Create S3 bucket'
+ vars:
+ bucket_name: '{{ s3_bucket_name }}'
+ s3_bucket:
+ state: present
+ name: '{{ bucket_name }}'
+ policy: '{{ lookup("template", "s3-policy.j2") }}'
+ - name: 'Create second S3 bucket'
+ vars:
+ bucket_name: '{{ s3_bucket_name }}-2'
+ s3_bucket:
+ state: present
+ name: '{{ bucket_name }}'
+ policy: '{{ lookup("template", "s3-policy.j2") }}'
+
+ - name: 'Create SNS Topic'
+ vars:
+ sns_topic_name: '{{ sns_topic }}'
+ sns_topic:
+ state: present
+ name: '{{ sns_topic_name }}'
+ display_name: 'Used for testing SNS/CloudWatch integration'
+ policy: "{{ lookup('template', 'sns-policy.j2') | to_json }}"
+ register: output_sns_topic
+ - name: 'Create second SNS Topic'
+ vars:
+ sns_topic_name: '{{ sns_topic }}-2'
+ sns_topic:
+ state: present
+ name: '{{ sns_topic_name }}'
+ display_name: 'Used for testing SNS/CloudWatch integration'
+ policy: "{{ lookup('template', 'sns-policy.j2') | to_json }}"
+
+ - name: 'Create KMS Key'
+ aws_kms:
+ state: present
+ alias: '{{ kms_alias }}'
+ enabled: yes
+ policy: "{{ lookup('template', 'kms-policy.j2') | to_json }}"
+ register: kms_key
+ - name: 'Create second KMS Key'
+ aws_kms:
+ state: present
+ alias: '{{ kms_alias }}-2'
+ enabled: yes
+ policy: "{{ lookup('template', 'kms-policy.j2') | to_json }}"
+ register: kms_key2
+
+ - name: 'Create CloudWatch IAM Role'
+ iam_role:
+ state: present
+ name: '{{ cloudwatch_role }}'
+ assume_role_policy_document: "{{ lookup('template', 'cloudwatch-assume-policy.j2') }}"
+ register: output_cloudwatch_role
+ - name: 'Create CloudWatch Log Group'
+ cloudwatchlogs_log_group:
+ state: present
+ log_group_name: '{{ cloudwatch_log_group }}'
+ retention: 1
+ register: output_cloudwatch_log_group
+ - name: 'Create second CloudWatch Log Group'
+ cloudwatchlogs_log_group:
+ state: present
+ log_group_name: '{{ cloudwatch_log_group }}-2'
+ retention: 1
+ register: output_cloudwatch_log_group2
+ - name: 'Add inline policy to CloudWatch Role'
+ iam_policy:
+ state: present
+ iam_type: role
+ iam_name: '{{ cloudwatch_role }}'
+ policy_name: 'CloudWatch'
+ policy_json: "{{ lookup('template', 'cloudwatch-policy.j2') | to_json }}"
+
+ # ============================================================
+ # Tests
+ # ============================================================
+
+ - name: 'Create a trail (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Create a trail'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ # XXX This appears to be a bug...
+ #- output.exists == True
+ - output.trail.name == cloudtrail_name
+
+ - name: 'No-op update to trail'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.exists == True
+ # Check everything is what we expect before we start making changes
+ - output.trail.name == cloudtrail_name
+ - output.trail.home_region == aws_region
+ - output.trail.include_global_service_events == True
+ - output.trail.is_multi_region_trail == False
+ - output.trail.is_logging == True
+ - output.trail.log_file_validation_enabled == False
+ - output.trail.s3_bucket_name == s3_bucket_name
+ - output.trail.s3_key_prefix is none
+ - output.trail.kms_key_id is none
+ - output.trail.sns_topic_arn is none
+ - output.trail.sns_topic_name is none
+ - output.trail.tags | length == 0
+
+ # ============================================================
+
+ - name: 'Set S3 prefix (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_key_prefix: '{{ cloudtrail_prefix }}'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Set S3 prefix'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_key_prefix: '{{ cloudtrail_prefix }}'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.s3_key_prefix == cloudtrail_prefix
+
+ - name: 'Set S3 prefix (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_key_prefix: '{{ cloudtrail_prefix }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.s3_key_prefix == cloudtrail_prefix
+
+ - name: 'No-op update to trail'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.s3_key_prefix == cloudtrail_prefix
+
+ - name: 'Update S3 prefix (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_key_prefix: '{{ cloudtrail_prefix }}-2'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Update S3 prefix'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_key_prefix: '{{ cloudtrail_prefix }}-2'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - 'output.trail.s3_key_prefix == "{{ cloudtrail_prefix }}-2"'
+
+ - name: 'Update S3 prefix (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_key_prefix: '{{ cloudtrail_prefix }}-2'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - 'output.trail.s3_key_prefix == "{{ cloudtrail_prefix }}-2"'
+
+ - name: 'Remove S3 prefix (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_key_prefix: '/'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Remove S3 prefix'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_key_prefix: '/'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.s3_key_prefix is none
+
+ - name: 'Remove S3 prefix (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_key_prefix: '/'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.s3_key_prefix is none
+
+ # ============================================================
+
+ - name: 'Add Tag (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ tags:
+ tag1: Value1
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Add Tag'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ tags:
+ tag1: Value1
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.tags | length == 1
+ - '("tag1" in output.trail.tags) and (output.trail.tags["tag1"] == "Value1")'
+
+ - name: 'Add Tag (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ tags:
+ tag1: Value1
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.tags | length == 1
+ - '("tag1" in output.trail.tags) and (output.trail.tags["tag1"] == "Value1")'
+
+ - name: 'Change tags (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ tags:
+ tag2: Value2
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Change tags'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ tags:
+ tag2: Value2
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.tags | length == 1
+ - '("tag2" in output.trail.tags) and (output.trail.tags["tag2"] == "Value2")'
+
+ - name: 'Change tags (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ tags:
+ tag2: Value2
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.tags | length == 1
+ - '("tag2" in output.trail.tags) and (output.trail.tags["tag2"] == "Value2")'
+
+ - name: 'Change tags (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ tags:
+ tag2: Value2
+ Tag3: Value3
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Change tags'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ tags:
+ tag2: Value2
+ Tag3: Value3
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.tags | length == 2
+ - '("tag2" in output.trail.tags) and (output.trail.tags["tag2"] == "Value2")'
+ #- '("Tag3" in output.trail.tags) and (output.trail.tags["Tag3"] == "Value3")'
+
+ - name: 'Change tags (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ tags:
+ tag2: Value2
+ Tag3: Value3
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.tags | length == 2
+ - '("tag2" in output.trail.tags) and (output.trail.tags["tag2"] == "Value2")'
+ #- '("Tag3" in output.trail.tags) and (output.trail.tags["Tag3"] == "Value3")'
+
+ - name: 'Remove tags (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Remove tags'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.tags | length == 0
+
+ - name: 'Remove tags (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.tags | length == 0
+
+ # ============================================================
+
+ - name: 'Set SNS Topic (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ sns_topic_name: '{{ sns_topic }}'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Set SNS Topic'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ sns_topic_name: '{{ sns_topic }}'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.sns_topic_name == sns_topic
+
+ - name: 'Set SNS Topic (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ sns_topic_name: '{{ sns_topic }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.sns_topic_name == sns_topic
+
+ - name: 'No-op update to trail'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.sns_topic_name == sns_topic
+
+ - name: 'Update SNS Topic (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ sns_topic_name: '{{ sns_topic }}-2'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Update SNS Topic'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ sns_topic_name: '{{ sns_topic }}-2'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - 'output.trail.sns_topic_name == "{{ sns_topic }}-2"'
+
+ - name: 'Update SNS Topic (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ sns_topic_name: '{{ sns_topic }}-2'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - 'output.trail.sns_topic_name == "{{ sns_topic }}-2"'
+
+ #- name: 'Remove SNS Topic (CHECK MODE)'
+ # cloudtrail:
+ # state: present
+ # name: '{{ cloudtrail_name }}'
+ # sns_topic_name: ''
+ # register: output
+ # check_mode: yes
+ #- assert:
+ # that:
+ # - output is changed
+
+ #- name: 'Remove SNS Topic'
+ # cloudtrail:
+ # state: present
+ # name: '{{ cloudtrail_name }}'
+ # sns_topic_name: ''
+ # register: output
+ #- assert:
+ # that:
+ # - output is changed
+ # - output.trail.name == cloudtrail_name
+ # - output.trail.sns_topic_name is none
+
+ #- name: 'Remove SNS Topic (no change)'
+ # cloudtrail:
+ # state: present
+ # name: '{{ cloudtrail_name }}'
+ # sns_topic_name: ''
+ # register: output
+ #- assert:
+ # that:
+ # - output is not changed
+ # - output.trail.name == cloudtrail_name
+ # - output.trail.sns_topic_name is none
+
+
+ # ============================================================
+
+ - name: 'Set CloudWatch Log Group (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}'
+ cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Set CloudWatch Log Group'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}'
+ cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn
+ - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn
+
+ - name: 'Set CloudWatch Log Group (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}'
+ cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn
+ - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn
+
+ - name: 'No-op update to trail'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn
+ - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn
+
+ - name: 'Update CloudWatch Log Group (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group2.arn }}'
+ cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+ - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn
+ - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn
+
+ - name: 'Update CloudWatch Log Group'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group2.arn }}'
+ cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn
+ - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn
+
+ - name: 'Update CloudWatch Log Group (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group2.arn }}'
+ cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn
+ - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn
+
+ #- name: 'Remove CloudWatch Log Group (CHECK MODE)'
+ # cloudtrail:
+ # state: present
+ # name: '{{ cloudtrail_name }}'
+ # cloudwatch_logs_log_group_arn: ''
+ # cloudwatch_logs_role_arn: ''
+ # register: output
+ # check_mode: yes
+ #- assert:
+ # that:
+ # - output is changed
+ # - output.trail.name == cloudtrail_name
+ # - output.trail.cloud_watch_logs_log_group_arn is none
+ # - output.trail.cloud_watch_logs_role_arn is none
+
+ #- name: 'Remove CloudWatch Log Group'
+ # cloudtrail:
+ # state: present
+ # name: '{{ cloudtrail_name }}'
+ # cloudwatch_logs_log_group_arn: ''
+ # cloudwatch_logs_role_arn: ''
+ # register: output
+ #- assert:
+ # that:
+ # - output is changed
+ # - output.trail.name == cloudtrail_name
+ # - output.trail.cloud_watch_logs_log_group_arn is none
+ # - output.trail.cloud_watch_logs_role_arn is none
+
+ #- name: 'Remove CloudWatch Log Group (no change)'
+ # cloudtrail:
+ # state: present
+ # name: '{{ cloudtrail_name }}'
+ # cloudwatch_logs_log_group_arn: ''
+ # cloudwatch_logs_role_arn: ''
+ # register: output
+ #- assert:
+ # that:
+ # - output is not changed
+ # - output.trail.name == cloudtrail_name
+ # - output.trail.cloud_watch_logs_log_group_arn is none
+ # - output.trail.cloud_watch_logs_role_arn is none
+
+ # ============================================================
+
+ - name: 'Update S3 bucket (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_bucket_name: '{{ s3_bucket_name }}-2'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Update S3 bucket'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_bucket_name: '{{ s3_bucket_name }}-2'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - 'output.trail.s3_bucket_name == "{{ s3_bucket_name }}-2"'
+
+ - name: 'Update S3 bucket (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_bucket_name: '{{ s3_bucket_name }}-2'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - 'output.trail.s3_bucket_name == "{{ s3_bucket_name }}-2"'
+
+ - name: 'Reset S3 bucket'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ register: output
+ - assert:
+ that:
+ - output.trail.name == cloudtrail_name
+ - output.trail.s3_bucket_name == s3_bucket_name
+
+ # ============================================================
+
+ - name: 'Disable logging (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ enable_logging: no
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Disable logging'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ enable_logging: no
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.is_logging == False
+
+ - name: 'Disable logging (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ enable_logging: no
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.is_logging == False
+
+ # Ansible Documentation lists logging as explicitly defaulting to enabled
+
+ - name: 'Enable logging (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ enable_logging: yes
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Enable logging'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ enable_logging: yes
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.is_logging == True
+
+ - name: 'Enable logging (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ enable_logging: yes
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.is_logging == True
+
+ # ============================================================
+
+ - name: 'Disable global logging (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ include_global_events: no
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Disable global logging'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ include_global_events: no
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.include_global_service_events == False
+
+ - name: 'Disable global logging (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ include_global_events: no
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.include_global_service_events == False
+
+ # Ansible Documentation lists Global-logging as explicitly defaulting to enabled
+
+ - name: 'Enable global logging (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ include_global_events: yes
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Enable global logging'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ include_global_events: yes
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.include_global_service_events == True
+
+ - name: 'Enable global logging (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ include_global_events: yes
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.include_global_service_events == True
+
+ # ============================================================
+
+ - name: 'Enable multi-region logging (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ is_multi_region_trail: yes
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Enable multi-region logging'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ is_multi_region_trail: yes
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.is_multi_region_trail == True
+
+ - name: 'Enable multi-region logging (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ is_multi_region_trail: yes
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.is_multi_region_trail == True
+
+ # Ansible Documentation lists Multi-Region-logging as explicitly defaulting to disabled
+
+ - name: 'Disable multi-region logging (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ is_multi_region_trail: no
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Disable multi-region logging'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ is_multi_region_trail: no
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.is_multi_region_trail == False
+
+ - name: 'Disable multi-region logging (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ is_multi_region_trail: no
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.is_multi_region_trail == False
+
+ # ============================================================
+
+ - name: 'Enable logfile validation (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ enable_log_file_validation: yes
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Enable logfile validation'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ enable_log_file_validation: yes
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.log_file_validation_enabled == True
+
+ - name: 'Enable logfile validation (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ enable_log_file_validation: yes
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.log_file_validation_enabled == True
+
+ - name: 'No-op update to trail'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.log_file_validation_enabled == True
+
+ - name: 'Disable logfile validation (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ enable_log_file_validation: no
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Disable logfile validation'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ enable_log_file_validation: no
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.log_file_validation_enabled == False
+
+ - name: 'Disable logfile validation (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ enable_log_file_validation: no
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.log_file_validation_enabled == False
+
+ # ============================================================
+
+ - name: 'Enable logging encryption (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ kms_key_id: '{{ kms_key.key_arn }}'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Enable logging encryption'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ kms_key_id: '{{ kms_key.key_arn }}'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.kms_key_id == kms_key.key_arn
+
+ - name: 'Enable logging encryption (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ kms_key_id: '{{ kms_key.key_arn }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.kms_key_id == kms_key.key_arn
+
+ - name: 'No-op update to trail'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.kms_key_id == kms_key.key_arn
+
+ - name: 'Update logging encryption key (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ kms_key_id: '{{ kms_key2.key_arn }}'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Update logging encryption key'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ kms_key_id: '{{ kms_key2.key_arn }}'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.kms_key_id == kms_key2.key_arn
+
+ - name: 'Update logging encryption key (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ kms_key_id: '{{ kms_key2.key_arn }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.kms_key_id == kms_key2.key_arn
+
+ - name: 'Update logging encryption to alias (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ kms_key_id: 'alias/{{ kms_alias }}'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Update logging encryption to alias'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ kms_key_id: 'alias/{{ kms_alias }}'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.kms_key_id == kms_key.key_arn
+
+ - name: 'Update logging encryption to alias (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ kms_key_id: 'alias/{{ kms_alias }}'
+ register: output
+ - assert:
+ that:
+ # - output is not changed
+ - output.trail.kms_key_id == kms_key.key_arn
+
+ #- name: 'Disable logging encryption (CHECK MODE)'
+ # cloudtrail:
+ # state: present
+ # name: '{{ cloudtrail_name }}'
+ # kms_key_id: ''
+ # register: output
+ # check_mode: yes
+ #- assert:
+ # that:
+ # - output is changed
+
+ #- name: 'Disable logging encryption'
+ # cloudtrail:
+ # state: present
+ # name: '{{ cloudtrail_name }}'
+ # kms_key_id: ''
+ # register: output
+ #- assert:
+ # that:
+ # - output.trail.kms_key_id == None
+ # - output is changed
+
+ #- name: 'Disable logging encryption (no change)'
+ # cloudtrail:
+ # state: present
+ # name: '{{ cloudtrail_name }}'
+ # kms_key_id: ''
+ # register: output
+ #- assert:
+ # that:
+ # - output.kms_key_id == None
+ # - output is not changed
+
+ # ============================================================
+
+ - name: 'Delete a trail without providing bucket_name (CHECK MODE)'
+ module_defaults: { cloudtrail: {} }
+ cloudtrail:
+ state: absent
+ name: '{{ cloudtrail_name }}'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Delete a trail while providing bucket_name (CHECK MODE)'
+ cloudtrail:
+ state: absent
+ name: '{{ cloudtrail_name }}'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Delete a trail'
+ cloudtrail:
+ state: absent
+ name: '{{ cloudtrail_name }}'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.exists == False
+
+ - name: 'Delete a non-existent trail'
+ cloudtrail:
+ state: absent
+ name: '{{ cloudtrail_name }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.exists == False
+
+ # ============================================================
+
+ - name: 'Test creation of a complex Trail (all features)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_key_prefix: '{{ cloudtrail_prefix }}'
+ sns_topic_name: '{{ sns_topic }}'
+ cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}'
+ cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}'
+ is_multi_region_trail: yes
+ include_global_events: yes
+ enable_log_file_validation: yes
+ kms_key_id: '{{ kms_key.key_arn }}'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ #- output.exists == True
+ - output.trail.name == cloudtrail_name
+ - output.trail.home_region == aws_region
+ - output.trail.include_global_service_events == True
+ - output.trail.is_multi_region_trail == True
+ - output.trail.is_logging == True
+ - output.trail.log_file_validation_enabled == True
+ - output.trail.s3_bucket_name == s3_bucket_name
+ - output.trail.s3_key_prefix == cloudtrail_prefix
+ - output.trail.kms_key_id == kms_key.key_arn
+ - output.trail.sns_topic_arn == output_sns_topic.sns_arn
+ - output.trail.sns_topic_name == sns_topic
+ - output.trail.tags | length == 0
+
+ - name: 'Test creation of a complex Trail (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_key_prefix: '{{ cloudtrail_prefix }}'
+ sns_topic_name: '{{ sns_topic }}'
+ cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}'
+ cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}'
+ is_multi_region_trail: yes
+ include_global_events: yes
+ enable_log_file_validation: yes
+ kms_key_id: '{{ kms_key.key_arn }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.exists == True
+ - output.trail.name == cloudtrail_name
+ - output.trail.home_region == aws_region
+ - output.trail.include_global_service_events == True
+ - output.trail.is_multi_region_trail == True
+ - output.trail.is_logging == True
+ - output.trail.log_file_validation_enabled == True
+ - output.trail.s3_bucket_name == s3_bucket_name
+ - output.trail.s3_key_prefix == cloudtrail_prefix
+ - output.trail.kms_key_id == kms_key.key_arn
+ - output.trail.sns_topic_arn == output_sns_topic.sns_arn
+ - output.trail.sns_topic_name == sns_topic
+ - output.trail.tags | length == 0
+
+ always:
+ # ============================================================
+ # Cleanup
+ # ============================================================
+ - name: 'Delete test trail'
+ cloudtrail:
+ state: absent
+ name: '{{ cloudtrail_name }}'
+ ignore_errors: yes
+ - name: 'Delete S3 bucket'
+ s3_bucket:
+ state: absent
+ name: '{{ s3_bucket_name }}'
+ force: yes
+ ignore_errors: yes
+ - name: 'Delete second S3 bucket'
+ s3_bucket:
+ state: absent
+ name: '{{ s3_bucket_name }}-2'
+ force: yes
+ ignore_errors: yes
+ - name: 'Delete KMS Key'
+ aws_kms:
+ state: absent
+ alias: '{{ kms_alias }}'
+ ignore_errors: yes
+ - name: 'Delete second KMS Key'
+ aws_kms:
+ state: absent
+ alias: '{{ kms_alias }}-2'
+ ignore_errors: yes
+ - name: 'Delete SNS Topic'
+ sns_topic:
+ state: absent
+ name: '{{ sns_topic }}'
+ ignore_errors: yes
+ - name: 'Delete second SNS Topic'
+ sns_topic:
+ state: absent
+ name: '{{ sns_topic }}-2'
+ ignore_errors: yes
+ - name: 'Delete CloudWatch Log Group'
+ cloudwatchlogs_log_group:
+ state: absent
+ log_group_name: '{{ cloudwatch_log_group }}'
+ ignore_errors: yes
+ - name: 'Delete second CloudWatch Log Group'
+ cloudwatchlogs_log_group:
+ state: absent
+ log_group_name: '{{ cloudwatch_log_group }}-2'
+ ignore_errors: yes
+ - name: 'Remove inline policy to CloudWatch Role'
+ iam_policy:
+ state: absent
+ iam_type: role
+ iam_name: '{{ cloudwatch_role }}'
+ policy_name: 'CloudWatch'
+ ignore_errors: yes
+ - name: 'Delete CloudWatch IAM Role'
+ iam_role:
+ state: absent
+ name: '{{ cloudwatch_role }}'
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/templates/cloudwatch-assume-policy.j2 b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/templates/cloudwatch-assume-policy.j2
new file mode 100644
index 00000000..6d7fb7b8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/templates/cloudwatch-assume-policy.j2
@@ -0,0 +1,13 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "AssumeFromCloudTrails",
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "cloudtrail.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/templates/cloudwatch-policy.j2 b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/templates/cloudwatch-policy.j2
new file mode 100644
index 00000000..8f354a70
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/templates/cloudwatch-policy.j2
@@ -0,0 +1,17 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "CloudTrail2CloudWatch",
+ "Effect": "Allow",
+ "Action": [
+ "logs:CreateLogStream",
+ "logs:PutLogEvents"
+ ],
+ "Resource": [
+ "arn:aws:logs:{{ aws_region }}:{{ aws_caller_info.account }}:log-group:{{ cloudwatch_log_group }}:log-stream:*",
+ "arn:aws:logs:{{ aws_region }}:{{ aws_caller_info.account }}:log-group:{{ cloudwatch_log_group }}-2:log-stream:*"
+ ]
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/templates/kms-policy.j2 b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/templates/kms-policy.j2
new file mode 100644
index 00000000..35730f1d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/templates/kms-policy.j2
@@ -0,0 +1,34 @@
+{
+ "Version": "2012-10-17",
+ "Id": "CloudTrailPolicy",
+ "Statement": [
+ {
+ "Sid": "EncryptLogs",
+ "Effect": "Allow",
+ "Principal": { "Service": "cloudtrail.amazonaws.com" },
+ "Action": "kms:GenerateDataKey*",
+ "Resource": "*",
+ "Condition": {
+ "StringLike": {
+ "kms:EncryptionContext:aws:cloudtrail:arn": [
+ "arn:aws:cloudtrail:*:{{ aws_caller_info.account }}:trail/{{ resource_prefix }}*"
+ ]
+ }
+ }
+ },
+ {
+ "Sid": "DescribeKey",
+ "Effect": "Allow",
+ "Principal": { "Service": "cloudtrail.amazonaws.com" },
+ "Action": "kms:DescribeKey",
+ "Resource": "*"
+ },
+ {
+ "Sid": "AnsibleTestManage",
+ "Effect": "Allow",
+ "Principal": { "AWS": "{{ aws_caller_info.arn }}" },
+ "Action": "*",
+ "Resource": "*"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/templates/s3-policy.j2 b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/templates/s3-policy.j2
new file mode 100644
index 00000000..78c056e3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/templates/s3-policy.j2
@@ -0,0 +1,34 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "CloudTrailCheckAcl",
+ "Effect": "Allow",
+ "Principal": { "Service": "cloudtrail.amazonaws.com" },
+ "Action": "s3:GetBucketAcl",
+ "Resource": "arn:aws:s3:::{{ bucket_name }}",
+ },
+ {
+ "Sid": "CloudTrailWriteLogs",
+ "Effect": "Allow",
+ "Principal": { "Service": "cloudtrail.amazonaws.com" },
+ "Action": "s3:PutObject",
+ "Resource": [
+ "arn:aws:s3:::{{ bucket_name }}/AWSLogs/{{ aws_caller_info.account }}/*",
+ "arn:aws:s3:::{{ bucket_name }}/{{ cloudtrail_prefix }}*/AWSLogs/{{ aws_caller_info.account }}/*"
+ ],
+ "Condition": {
+ "StringEquals": {
+ "s3:x-amz-acl": "bucket-owner-full-control"
+ }
+ }
+ },
+ {
+ "Sid": "AnsibleTestManage",
+ "Effect": "Allow",
+ "Principal": { "AWS": "{{ aws_caller_info.arn }}" },
+ "Action": "*",
+ "Resource": "arn:aws:s3:::{{ bucket_name }}"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/templates/sns-policy.j2 b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/templates/sns-policy.j2
new file mode 100644
index 00000000..3c267b80
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudtrail/templates/sns-policy.j2
@@ -0,0 +1,34 @@
+{
+ "Version": "2008-10-17",
+ "Id": "AnsibleSNSTesting",
+ "Statement": [
+ {
+ "Sid": "CloudTrailSNSPolicy",
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "cloudtrail.amazonaws.com"
+ },
+ "Action": "sns:Publish",
+ "Resource": "arn:aws:sns:{{ aws_region }}:{{ aws_caller_info.account }}:{{ sns_topic_name }}"
+ },
+ {
+ "Sid": "AnsibleTestManage",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "{{ aws_caller_info.arn }}"
+ },
+ "Action": [
+ "sns:Subscribe",
+ "sns:ListSubscriptionsByTopic",
+ "sns:DeleteTopic",
+ "sns:GetTopicAttributes",
+ "sns:Publish",
+ "sns:RemovePermission",
+ "sns:AddPermission",
+ "sns:Receive",
+ "sns:SetTopicAttributes"
+ ],
+ "Resource": "arn:aws:sns:{{ aws_region }}:{{ aws_caller_info.account }}:{{ sns_topic_name }}"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudwatchlogs/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudwatchlogs/aliases
new file mode 100644
index 00000000..500ca9a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudwatchlogs/aliases
@@ -0,0 +1,4 @@
+cloud/aws
+shippable/aws/group1
+cloudwatchlogs_log_group
+cloudwatchlogs_log_group_metric_filter
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudwatchlogs/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudwatchlogs/defaults/main.yml
new file mode 100644
index 00000000..c6db709f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudwatchlogs/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+log_group_name: '{{ resource_prefix }}/integrationtest'
+filter_name: '{{ resource_prefix }}/AnsibleTest' \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudwatchlogs/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudwatchlogs/tasks/main.yml
new file mode 100644
index 00000000..a36c6643
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/cloudwatchlogs/tasks/main.yml
@@ -0,0 +1,157 @@
+---
+
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+
+ block:
+ - name: create cloudwatch log group for integration test
+ cloudwatchlogs_log_group:
+ state: present
+ log_group_name: '{{ log_group_name }}'
+ retention: 1
+
+ - name: check_mode set metric filter on '{{ log_group_name }}'
+ cloudwatchlogs_log_group_metric_filter:
+ log_group_name: '{{ log_group_name }}'
+ filter_name: '{{ filter_name }}'
+ filter_pattern: '{ ($.value = *) && ($.hostname = "box")}'
+ state: present
+ metric_transformation:
+ metric_name: box_free_space
+ metric_namespace: fluentd_metrics
+ metric_value: "$.value"
+ check_mode: yes
+ register: out
+
+ - name: check_mode state must be changed
+ assert:
+ that:
+ - out is changed
+ - out.metric_filters | count == 1
+
+ - name: set metric filter on '{{ log_group_name }}'
+ cloudwatchlogs_log_group_metric_filter:
+ log_group_name: '{{ log_group_name }}'
+ filter_name: '{{ filter_name }}'
+ filter_pattern: '{ ($.value = *) && ($.hostname = "box")}'
+ state: present
+ metric_transformation:
+ metric_name: box_free_space
+ metric_namespace: fluentd_metrics
+ metric_value: "$.value"
+ register: out
+
+ - name: create metric filter
+ assert:
+ that:
+ - out is changed
+ - out.metric_filters | count == 1
+
+ - name: re-set metric filter on '{{ log_group_name }}'
+ cloudwatchlogs_log_group_metric_filter:
+ log_group_name: '{{ log_group_name }}'
+ filter_name: '{{ filter_name }}'
+ filter_pattern: '{ ($.value = *) && ($.hostname = "box")}'
+ state: present
+ metric_transformation:
+ metric_name: box_free_space
+ metric_namespace: fluentd_metrics
+ metric_value: "$.value"
+ register: out
+
+ - name: metric filter must not change
+ assert:
+ that:
+ - out is not changed
+
+ - name: update metric transformation on '{{ log_group_name }}'
+ cloudwatchlogs_log_group_metric_filter:
+ log_group_name: '{{ log_group_name }}'
+ filter_name: '{{ filter_name }}'
+ filter_pattern: '{ ($.value = *) && ($.hostname = "box")}'
+ state: present
+ metric_transformation:
+ metric_name: box_free_space
+ metric_namespace: made_with_ansible
+ metric_value: "$.value"
+ default_value: 3.1415
+ register: out
+
+ - name: update metric filter
+ assert:
+ that:
+ - out is changed
+ - out.metric_filters[0].metric_namespace == "made_with_ansible"
+ - out.metric_filters[0].default_value == 3.1415
+
+ - name: update filter_pattern on '{{ log_group_name }}'
+ cloudwatchlogs_log_group_metric_filter:
+ log_group_name: '{{ log_group_name }}'
+ filter_name: '{{ filter_name }}'
+ filter_pattern: '{ ($.value = *) && ($.hostname = "ansible")}'
+ state: present
+ metric_transformation:
+ metric_name: box_free_space
+ metric_namespace: made_with_ansible
+ metric_value: "$.value"
+ register: out
+
+ - name: update metric filter
+ assert:
+ that:
+ - out is changed
+ - out.metric_filters[0].metric_namespace == "made_with_ansible"
+
+ - name: checkmode delete metric filter on '{{ log_group_name }}'
+ cloudwatchlogs_log_group_metric_filter:
+ log_group_name: '{{ log_group_name }}'
+ filter_name: '{{ filter_name }}'
+ state: absent
+ check_mode: yes
+ register: out
+
+ - name: check_mode state must be changed
+ assert:
+ that:
+ - out is changed
+
+ - name: delete metric filter on '{{ log_group_name }}'
+ cloudwatchlogs_log_group_metric_filter:
+ log_group_name: '{{ log_group_name }}'
+ filter_name: '{{ filter_name }}'
+ state: absent
+ register: out
+
+ - name: delete metric filter
+ assert:
+ that:
+ - out is changed
+
+ - name: delete metric filter on '{{ log_group_name }}' which does not exist
+ cloudwatchlogs_log_group_metric_filter:
+ log_group_name: '{{ log_group_name }}'
+ filter_name: '{{ filter_name }}'
+ state: absent
+ register: out
+
+ - name: delete metric filter
+ assert:
+ that:
+ - out is not changed
+
+ always:
+ - name: delete metric filter
+ cloudwatchlogs_log_group_metric_filter:
+ log_group_name: '{{ log_group_name }}'
+ filter_name: '{{ filter_name }}'
+ state: absent
+
+ - name: delete cloudwatch log group for integration test
+ cloudwatchlogs_log_group:
+ state: absent
+ log_group_name: '{{ log_group_name }}'
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection/aliases
new file mode 100644
index 00000000..136c05e0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection/aliases
@@ -0,0 +1 @@
+hidden
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection/test.sh b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection/test.sh
new file mode 100755
index 00000000..4e7aa8dd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection/test.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+
+set -eux
+
+[ -f "${INVENTORY}" ]
+
+# Run connection tests with both the default and C locale.
+
+ ansible-playbook test_connection.yml -i "${INVENTORY}" "$@"
+LC_ALL=C LANG=C ansible-playbook test_connection.yml -i "${INVENTORY}" "$@"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection/test_connection.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection/test_connection.yml
new file mode 100644
index 00000000..21699422
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection/test_connection.yml
@@ -0,0 +1,43 @@
+- hosts: "{{ target_hosts }}"
+ gather_facts: no
+ serial: 1
+ tasks:
+
+ ### raw with unicode arg and output
+
+ - name: raw with unicode arg and output
+ raw: echo 汉语
+ register: command
+ - name: check output of raw with unicode arg and output
+ assert:
+ that:
+ - "'汉语' in command.stdout"
+ - command is changed # as of 2.2, raw should default to changed: true for consistency w/ shell/command/script modules
+
+ ### copy local file with unicode filename and content
+
+ - name: create local file with unicode filename and content
+ local_action: lineinfile dest={{ local_tmp }}-汉语/汉语.txt create=true line=汉语
+ - name: remove remote file with unicode filename and content
+ action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语/汉语.txt state=absent"
+ - name: create remote directory with unicode name
+ action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语 state=directory"
+ - name: copy local file with unicode filename and content
+ action: "{{ action_prefix }}copy src={{ local_tmp }}-汉语/汉语.txt dest={{ remote_tmp }}-汉语/汉语.txt"
+
+ ### fetch remote file with unicode filename and content
+
+ - name: remove local file with unicode filename and content
+ local_action: file path={{ local_tmp }}-汉语/汉语.txt state=absent
+ - name: fetch remote file with unicode filename and content
+ fetch: src={{ remote_tmp }}-汉语/汉语.txt dest={{ local_tmp }}-汉语/汉语.txt fail_on_missing=true validate_checksum=true flat=true
+
+ ### remove local and remote temp files
+
+ - name: remove local temp file
+ local_action: file path={{ local_tmp }}-汉语 state=absent
+ - name: remove remote temp file
+ action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语 state=absent"
+
+ ### test wait_for_connection plugin
+ - wait_for_connection:
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aliases
new file mode 100644
index 00000000..0031909d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aliases
@@ -0,0 +1,7 @@
+cloud/aws
+destructive
+shippable/aws/group4
+non_local
+needs/root
+needs/target/connection
+disabled
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup.yml
new file mode 100644
index 00000000..7cd735b9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup.yml
@@ -0,0 +1,3 @@
+- hosts: localhost
+ roles:
+ - role: aws_ssm_integration_test_setup
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/README.md b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/README.md
new file mode 100644
index 00000000..bc12a83e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/README.md
@@ -0,0 +1,43 @@
+# AWS SSM Integration Test Setup
+
+## aws_ssm_integration_test_setup_teardown
+
+An Ansible role was created to perform integration test across aws_ssm connection plugin. The role performs the following actions.
+
+- Create AWS Resources in user specified region.
+- Perform integration Test across aws_ssm connection plugin.
+- TearDown/Remove AWS Resources that are created for testing plugin.
+
+### Prerequisites
+
+- Make sure the machine used for testing already has Ansible repo with ssm connection plugin.
+- AWS CLI/IAM-Role configured to the machine which has permissions to spin-up AWS resources.
+
+### Variables referred in Ansible Role
+
+The following table provide details about variables referred within Ansible Role.
+
+| Variable Name | Details |
+| ------ | ------ |
+| aws_region | Name of AWS-region |
+| iam_role_name | Name of IAM Role which will be attached to newly-created EC2-Instance |
+| iam_policy_name | Name of IAM Policy which will be attached to the IAM role referred above |
+| instance_type | Instance type user for creating EC2-Instance |
+| instance_id | AWS EC2 Instance-Id (This gets populated by role) |
+| bucket_name | Name of S3 buckted used by SSM (This gets populated by role) |
+
+### Example Playbook
+
+A sample example to demonstrate the usage of role within Ansible-playbook.(Make sure the respective variables are passed as parameters.)
+
+```yaml
+ - hosts: localhost
+ roles:
+ - aws_ssm_integration_test_setup_teardown
+```
+
+#### Author's Information
+
+Krishna Nand Choudhary (krishnanandchoudhary)
+Nikhil Araga (araganik)
+Gaurav Ashtikar (gau1991)
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/defaults/main.yml
new file mode 100644
index 00000000..f158bf3f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+instance_type: t2.micro
+linux_ami_name: amzn-ami-hvm-2018.03.0.20190611-x86_64-ebs
+# Windows AMIs get replaced every few months, don't be too specific
+windows_ami_name: Windows_Server-2019-English-Full-Base-*
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/files/ec2-trust-policy.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/files/ec2-trust-policy.json
new file mode 100644
index 00000000..63d22eae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/files/ec2-trust-policy.json
@@ -0,0 +1,13 @@
+{
+ "Version": "2008-10-17",
+ "Statement": [
+ {
+ "Sid": "",
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "ec2.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+ }
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/tasks/debian.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/tasks/debian.yml
new file mode 100644
index 00000000..ff497ef3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/tasks/debian.yml
@@ -0,0 +1,19 @@
+- name: Download SSM plugin
+ get_url:
+ url: https://s3.amazonaws.com/session-manager-downloads/plugin/latest/ubuntu_64bit/session-manager-plugin.deb
+ dest: /tmp/session-manager-plugin.deb
+ mode: '0440'
+ tags: setup_infra
+- name: Extract SSM plugin Deb File
+ shell: ar x session-manager-plugin.deb
+ args:
+ chdir: /tmp
+ tags: setup_infra
+- name: Extract SSM Plugin Control File
+ shell: tar -zxvf data.tar.gz -C /
+ args:
+ chdir: /tmp
+ tags: setup_infra
+- name: Check the SSM Plugin
+ shell: /usr/local/sessionmanagerplugin/bin/session-manager-plugin --version
+ tags: setup_infra
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/tasks/main.yml
new file mode 100644
index 00000000..dae7e277
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/tasks/main.yml
@@ -0,0 +1,156 @@
+---
+## Task file for setup/teardown AWS resources for aws_ssm integration testing
+- block:
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{aws_access_key}}"
+ aws_secret_key: "{{aws_secret_key}}"
+ security_token: "{{security_token}}"
+ region: "{{aws_region}}"
+ no_log: yes
+
+ - name: AMI Lookup
+ ec2_ami_info:
+ owners: 'amazon'
+ filters:
+ name: '{{ item }}'
+ <<: *aws_connection_info
+ register: ec2_amis
+ loop:
+ - '{{ linux_ami_name }}'
+ - '{{ windows_ami_name }}'
+
+ - name: Set facts with latest AMIs
+ vars:
+ latest_linux_ami: '{{ ec2_amis.results[0].images | sort(attribute="creation_date") | last }}'
+ latest_windows_ami: '{{ ec2_amis.results[1].images | sort(attribute="creation_date") | last }}'
+ set_fact:
+ linux_ami_id: '{{ latest_linux_ami.image_id }}'
+ windows_ami_id: '{{ latest_windows_ami.image_id }}'
+
+ - name: Install Session Manager Plugin for Debian/Ubuntu
+ include_tasks: debian.yml
+ when: ansible_distribution == "Ubuntu" or ansible_distribution == "Debian"
+ register: install_plugin_debian
+
+ - name: Install Session Manager Plugin for RedHat/Amazon
+ include_tasks: redhat.yml
+ when: ansible_distribution == "CentOS" or ansible_distribution == "RedHat" or ansible_distribution == "Amazon"
+ register: install_plugin_redhat
+
+ - name: Fail if the plugin was not installed
+ fail:
+ msg: The distribution does not contain the required Session Manager Plugin
+ when:
+ - install_plugin_debian is skipped
+ - install_plugin_redhat is skipped
+
+ - name: Install Boto3
+ pip:
+ name: boto3
+
+ - name: Install Boto
+ pip:
+ name: boto
+
+ - name: Ensure IAM instance role exists
+ iam_role:
+ name: "ansible-test-{{resource_prefix}}-aws-ssm-role"
+ assume_role_policy_document: "{{ lookup('file','ec2-trust-policy.json') }}"
+ state: present
+ create_instance_profile: yes
+ managed_policy:
+ - AmazonEC2RoleforSSM
+ <<: *aws_connection_info
+ register: role_output
+
+ - name: Create S3 bucket
+ s3_bucket:
+ name: "{{resource_prefix}}-aws-ssm-s3"
+ <<: *aws_connection_info
+ register: s3_output
+
+ - name: Wait for IAM Role getting created
+ pause:
+ seconds: 10
+
+ - name: Create Linux EC2 instance
+ ec2:
+ instance_type: "{{instance_type}}"
+ image: "{{linux_ami_id}}"
+ wait: "yes"
+ count: 1
+ instance_profile_name: "{{role_output.iam_role.role_name}}"
+ instance_tags:
+ Name: "{{resource_prefix}}-integration-test-aws-ssm-linux"
+ user_data: |
+ #!/bin/sh
+ sudo systemctl start amazon-ssm-agent
+ state: present
+ <<: *aws_connection_info
+ register: linux_output
+
+ - name: Create Windows EC2 instance
+ ec2:
+ instance_type: "{{instance_type}}"
+ image: "{{windows_ami_id}}"
+ wait: "yes"
+ count: 1
+ instance_profile_name: "{{role_output.iam_role.role_name}}"
+ instance_tags:
+ Name: "{{resource_prefix}}-integration-test-aws-ssm-windows"
+ user_data: |
+ <powershell>
+ Invoke-WebRequest -Uri "https://amazon-ssm-us-east-1.s3.amazonaws.com/latest/windows_amd64/AmazonSSMAgentSetup.exe" -OutFile "C:\AmazonSSMAgentSetup.exe"
+ Start-Process -FilePath C:\AmazonSSMAgentSetup.exe -ArgumentList "/S","/v","/qn" -Wait
+ Restart-Service AmazonSSMAgent
+ </powershell>
+ state: present
+ <<: *aws_connection_info
+ register: windows_output
+
+ - name: Wait for EC2 to be available
+ wait_for_connection:
+ delay: 300
+
+ - name: Create Inventory file for Linux host
+ template:
+ dest: "{{playbook_dir}}/inventory-linux.aws_ssm"
+ src: inventory-linux.aws_ssm.j2
+
+ - name: Create Inventory file for Windows host
+ template:
+ dest: "{{playbook_dir}}/inventory-windows.aws_ssm"
+ src: inventory-windows.aws_ssm.j2
+
+ - name: Create AWS Keys Environement
+ template:
+ dest: "{{playbook_dir}}/aws-env-vars.sh"
+ src: aws-env-vars.j2
+ no_log: yes
+
+ always:
+ - name: Create EC2 Linux vars_to_delete.yml
+ template:
+ dest: "{{playbook_dir}}/ec2_linux_vars_to_delete.yml"
+ src: ec2_linux_vars_to_delete.yml.j2
+ ignore_errors: yes
+
+ - name: Create EC2 Windows vars_to_delete.yml
+ template:
+ dest: "{{playbook_dir}}/ec2_windows_vars_to_delete.yml"
+ src: ec2_windows_vars_to_delete.yml.j2
+ ignore_errors: yes
+
+ - name: Create S3 vars_to_delete.yml
+ template:
+ dest: "{{playbook_dir}}/s3_vars_to_delete.yml"
+ src: s3_vars_to_delete.yml.j2
+ ignore_errors: yes
+
+ - name: Create IAM Role vars_to_delete.yml
+ template:
+ dest: "{{playbook_dir}}/iam_role_vars_to_delete.yml"
+ src: iam_role_vars_to_delete.yml.j2
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/tasks/redhat.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/tasks/redhat.yml
new file mode 100644
index 00000000..d111b4d3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/tasks/redhat.yml
@@ -0,0 +1,11 @@
+- name: Download SSM plugin
+ get_url:
+ url: https://s3.amazonaws.com/session-manager-downloads/plugin/latest/linux_64bit/session-manager-plugin.rpm
+ dest: /tmp/session-manager-plugin.rpm
+ mode: '0440'
+ tags: setup_infra
+- name: Install SSM Plugin
+ yum:
+ name: /tmp/session-manager-plugin.rpm
+ state: present
+ tags: setup_infra
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/aws-env-vars.j2 b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/aws-env-vars.j2
new file mode 100644
index 00000000..1e3821ad
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/aws-env-vars.j2
@@ -0,0 +1,4 @@
+export AWS_ACCESS_KEY_ID={{aws_access_key}}
+export AWS_SECRET_ACCESS_KEY={{aws_secret_key}}
+export AWS_SECURITY_TOKEN={{security_token}}
+export AWS_REGION={{aws_region}}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/ec2_linux_vars_to_delete.yml.j2 b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/ec2_linux_vars_to_delete.yml.j2
new file mode 100644
index 00000000..8af1e3b5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/ec2_linux_vars_to_delete.yml.j2
@@ -0,0 +1,2 @@
+---
+linux_instance_id: {{linux_output.instance_ids[0]}}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/ec2_windows_vars_to_delete.yml.j2 b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/ec2_windows_vars_to_delete.yml.j2
new file mode 100644
index 00000000..d216f372
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/ec2_windows_vars_to_delete.yml.j2
@@ -0,0 +1,2 @@
+---
+windows_instance_id: {{windows_output.instance_ids[0]}}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/iam_role_vars_to_delete.yml.j2 b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/iam_role_vars_to_delete.yml.j2
new file mode 100644
index 00000000..0d87d3ed
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/iam_role_vars_to_delete.yml.j2
@@ -0,0 +1,2 @@
+---
+iam_role_name: {{role_output.iam_role.role_name}}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/inventory-linux.aws_ssm.j2 b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/inventory-linux.aws_ssm.j2
new file mode 100644
index 00000000..7e97e5f8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/inventory-linux.aws_ssm.j2
@@ -0,0 +1,12 @@
+[aws_ssm]
+{{linux_output.instance_ids[0]}} ansible_aws_ssm_instance_id={{linux_output.instance_ids[0]}} ansible_aws_ssm_region={{aws_region}}
+
+[aws_ssm:vars]
+ansible_connection=aws_ssm
+ansible_aws_ssm_bucket_name={{s3_output.name}}
+ansible_aws_ssm_plugin=/usr/local/sessionmanagerplugin/bin/session-manager-plugin
+ansible_python_interpreter=/usr/bin/env python
+
+# support tests that target testhost
+[testhost:children]
+aws_ssm
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/inventory-windows.aws_ssm.j2 b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/inventory-windows.aws_ssm.j2
new file mode 100644
index 00000000..0b6a28c8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/inventory-windows.aws_ssm.j2
@@ -0,0 +1,12 @@
+[aws_ssm]
+{{windows_output.instance_ids[0]}} ansible_aws_ssm_instance_id={{windows_output.instance_ids[0]}} ansible_aws_ssm_region={{aws_region}}
+
+[aws_ssm:vars]
+ansible_shell_type=powershell
+ansible_connection=aws_ssm
+ansible_aws_ssm_bucket_name={{s3_output.name}}
+ansible_aws_ssm_plugin=/usr/local/sessionmanagerplugin/bin/session-manager-plugin
+
+# support tests that target testhost
+[testhost:children]
+aws_ssm
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/s3_vars_to_delete.yml.j2 b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/s3_vars_to_delete.yml.j2
new file mode 100644
index 00000000..3839fb3c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/s3_vars_to_delete.yml.j2
@@ -0,0 +1,2 @@
+---
+bucket_name: {{s3_output.name}}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_teardown.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_teardown.yml
new file mode 100644
index 00000000..13c62c1f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_teardown.yml
@@ -0,0 +1,3 @@
+- hosts: localhost
+ roles:
+ - role: aws_ssm_integration_test_teardown
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_teardown/README.md b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_teardown/README.md
new file mode 100644
index 00000000..bc12a83e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_teardown/README.md
@@ -0,0 +1,43 @@
+# AWS SSM Integration Test Setup
+
+## aws_ssm_integration_test_setup_teardown
+
+An Ansible role was created to perform integration test across aws_ssm connection plugin. The role performs the following actions.
+
+- Create AWS Resources in user specified region.
+- Perform integration Test across aws_ssm connection plugin.
+- TearDown/Remove AWS Resources that are created for testing plugin.
+
+### Prerequisites
+
+- Make sure the machine used for testing already has Ansible repo with ssm connection plugin.
+- AWS CLI/IAM-Role configured to the machine which has permissions to spin-up AWS resources.
+
+### Variables referred in Ansible Role
+
+The following table provide details about variables referred within Ansible Role.
+
+| Variable Name | Details |
+| ------ | ------ |
+| aws_region | Name of AWS-region |
+| iam_role_name | Name of IAM Role which will be attached to newly-created EC2-Instance |
+| iam_policy_name | Name of IAM Policy which will be attached to the IAM role referred above |
+| instance_type | Instance type user for creating EC2-Instance |
+| instance_id | AWS EC2 Instance-Id (This gets populated by role) |
+| bucket_name | Name of S3 buckted used by SSM (This gets populated by role) |
+
+### Example Playbook
+
+A sample example to demonstrate the usage of role within Ansible-playbook.(Make sure the respective variables are passed as parameters.)
+
+```yaml
+ - hosts: localhost
+ roles:
+ - aws_ssm_integration_test_setup_teardown
+```
+
+#### Author's Information
+
+Krishna Nand Choudhary (krishnanandchoudhary)
+Nikhil Araga (araganik)
+Gaurav Ashtikar (gau1991)
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_teardown/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_teardown/tasks/main.yml
new file mode 100644
index 00000000..7993733b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/aws_ssm_integration_test_teardown/tasks/main.yml
@@ -0,0 +1,85 @@
+---
+- name: Set up AWS connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{aws_access_key}}"
+ aws_secret_key: "{{aws_secret_key}}"
+ region: "{{aws_region}}"
+ security_token: "{{security_token}}"
+ no_log: true
+
+- name: Check if ec2_linux_vars_to_delete.yml is present
+ stat:
+ path: "{{playbook_dir}}/ec2_linux_vars_to_delete.yml"
+ register: ec2_linux_vars_file
+
+- name: Include variable file to delete EC2 Linux infra
+ include_vars: "{{playbook_dir}}/ec2_linux_vars_to_delete.yml"
+ when: ec2_linux_vars_file.stat.exists == true
+
+- name: Check if ec2_windows_vars_to_delete.yml is present
+ stat:
+ path: "{{playbook_dir}}/ec2_windows_vars_to_delete.yml"
+ register: ec2_windows_vars_file
+
+- name: Include variable file to delete EC2 Windows infra
+ include_vars: "{{playbook_dir}}/ec2_windows_vars_to_delete.yml"
+ when: ec2_windows_vars_file.stat.exists == true
+
+- name: Check if s3_vars_to_delete.yml is present
+ stat:
+ path: "{{playbook_dir}}/s3_vars_to_delete.yml"
+ register: s3_vars_file
+
+- name: Include variable file to delete S3 Infra infra
+ include_vars: "{{playbook_dir}}/s3_vars_to_delete.yml"
+ when: s3_vars_file.stat.exists == true
+
+- name: Check if iam_role_vars_to_delete.yml is present
+ stat:
+ path: "{{playbook_dir}}/iam_role_vars_to_delete.yml"
+ register: iam_role_vars_file
+
+- name: Include variable file to delete IAM Role infra
+ include_vars: "{{playbook_dir}}/iam_role_vars_to_delete.yml"
+ when: iam_role_vars_file.stat.exists == true
+
+- name: Terminate Windows EC2 instances that were previously launched
+ ec2:
+ instance_ids:
+ - "{{windows_instance_id}}"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+ when: ec2_windows_vars_file.stat.exists == true
+
+- name: Terminate Linux EC2 instances that were previously launched
+ ec2:
+ instance_ids:
+ - "{{linux_instance_id}}"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+ when: ec2_linux_vars_file.stat.exists == true
+
+- name: Delete S3 bucket
+ aws_s3:
+ bucket: "{{bucket_name}}"
+ mode: delete
+ <<: *aws_connection_info
+ ignore_errors: yes
+ when: s3_vars_file.stat.exists == true
+
+- name: Delete IAM role
+ iam_role:
+ name: "{{iam_role_name}}"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+ when: iam_role_vars_file.stat.exists == true
+
+- name: Delete AWS keys environement
+ file:
+ path: "{{playbook_dir}}/aws-env-vars.sh"
+ state: absent
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/inventory.aws_ssm.template b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/inventory.aws_ssm.template
new file mode 100644
index 00000000..afbee1ae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/inventory.aws_ssm.template
@@ -0,0 +1,10 @@
+[aws_ssm]
+@NAME ansible_aws_ssm_instance_id=@HOST ansible_aws_ssm_region=@AWS_REGION
+
+[aws_ssm:vars]
+ansible_connection=aws_ssm
+ansible_aws_ssm_bucket_name=@S3_BUCKET
+
+# support tests that target testhost
+[testhost:children]
+aws_ssm
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/runme.sh b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/runme.sh
new file mode 100755
index 00000000..1d9b3873
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/connection_aws_ssm/runme.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+
+set -eux
+
+CMD_ARGS=("$@")
+
+# Destroy Environment
+cleanup() {
+
+ cd ../connection_aws_ssm
+
+ ansible-playbook -c local aws_ssm_integration_test_teardown.yml "${CMD_ARGS[@]}"
+
+}
+
+trap "cleanup" EXIT
+
+# Setup Environment
+ansible-playbook -c local aws_ssm_integration_test_setup.yml "$@"
+
+# Export the AWS Keys
+set +x
+. ./aws-env-vars.sh
+set -x
+
+cd ../connection
+
+# Execute Integration tests for Linux
+INVENTORY=../connection_aws_ssm/inventory-linux.aws_ssm ./test.sh \
+ -e target_hosts=aws_ssm \
+ -e local_tmp=/tmp/ansible-local \
+ -e remote_tmp=/tmp/ansible-remote \
+ -e action_prefix= \
+ "$@"
+
+# Execute Integration tests for Windows
+INVENTORY=../connection_aws_ssm/inventory-windows.aws_ssm ./test.sh \
+ -e target_hosts=aws_ssm \
+ -e local_tmp=/tmp/ansible-local \
+ -e remote_tmp=c:/windows/temp/ansible-remote \
+ -e action_prefix=win_ \
+ "$@"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/dms_endpoint/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/dms_endpoint/aliases
new file mode 100644
index 00000000..56927195
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/dms_endpoint/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/dms_endpoint/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/dms_endpoint/tasks/main.yml
new file mode 100644
index 00000000..18eb5728
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/dms_endpoint/tasks/main.yml
@@ -0,0 +1,139 @@
+---
+
+- name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ region: "{{ aws_region }}"
+ dms_identifier: "{{ resource_prefix }}-dms"
+ no_log: yes
+
+- name: dms_endpoint tests
+ collections:
+ - amazon.aws
+ block:
+ - name: create endpoints
+ dms_endpoint:
+ state: present
+ endpointidentifier: "{{ dms_identifier }}"
+ endpointtype: source
+ enginename: aurora
+ username: testing
+ password: testint1234
+ servername: "{{ resource_prefix }}.exampledomain.com"
+ port: 3306
+ databasename: 'testdb'
+ sslmode: none
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result is not failed
+
+ - name: create endpoints no change
+ dms_endpoint:
+ state: present
+ endpointidentifier: "{{ dms_identifier }}"
+ endpointtype: source
+ enginename: aurora
+ username: testing
+ password: testint1234
+ servername: "{{ resource_prefix }}.exampledomain.com"
+ port: 3306
+ databasename: 'testdb'
+ sslmode: none
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - result is not failed
+
+ - name: update endpoints
+ dms_endpoint:
+ state: present
+ endpointidentifier: "{{ dms_identifier }}"
+ endpointtype: source
+ enginename: aurora
+ username: testing
+ password: testint1234
+ servername: "{{ resource_prefix }}.exampledomain.com"
+ port: 3306
+ databasename: 'testdb2'
+ sslmode: none
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result is not failed
+
+ - name: update endpoints no change
+ dms_endpoint:
+ state: present
+ endpointidentifier: "{{ dms_identifier }}"
+ endpointtype: source
+ enginename: aurora
+ username: testing
+ password: testint1234
+ servername: "{{ resource_prefix }}.exampledomain.com"
+ port: 3306
+ databasename: 'testdb2'
+ sslmode: none
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - result is not failed
+
+ always:
+ - name: delete endpoints
+ dms_endpoint:
+ state: absent
+ endpointidentifier: "{{ dms_identifier }}"
+ endpointtype: source
+ enginename: aurora
+ username: testing
+ password: testint1234
+ servername: "{{ resource_prefix }}.exampledomain.com"
+ port: 3306
+ databasename: 'testdb'
+ sslmode: none
+ wait: True
+ timeout: 60
+ retries: 10
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result is not failed
+
+ - name: delete endpoints no change
+ dms_endpoint:
+ state: absent
+ endpointidentifier: "{{ dms_identifier }}"
+ endpointtype: source
+ enginename: aurora
+ username: testing
+ password: testint1234
+ servername: "{{ resource_prefix }}.exampledomain.com"
+ port: 3306
+ databasename: 'testdb'
+ sslmode: none
+ wait: False
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - result is not failed
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/dms_replication_subnet_group/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/dms_replication_subnet_group/aliases
new file mode 100644
index 00000000..56927195
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/dms_replication_subnet_group/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/dms_replication_subnet_group/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/dms_replication_subnet_group/defaults/main.yml
new file mode 100644
index 00000000..feed0f4c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/dms_replication_subnet_group/defaults/main.yml
@@ -0,0 +1,2 @@
+resource_prefix: "test_dms_sg"
+dms_role_role_name: dms-vpc-role \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/dms_replication_subnet_group/files/dmsAssumeRolePolicyDocument.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/dms_replication_subnet_group/files/dmsAssumeRolePolicyDocument.json
new file mode 100644
index 00000000..69ee87ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/dms_replication_subnet_group/files/dmsAssumeRolePolicyDocument.json
@@ -0,0 +1,12 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "dms.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/dms_replication_subnet_group/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/dms_replication_subnet_group/tasks/main.yml
new file mode 100644
index 00000000..e0708c31
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/dms_replication_subnet_group/tasks/main.yml
@@ -0,0 +1,192 @@
+---
+
+- name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ region: "{{ aws_region }}"
+ dms_sg_identifier: "{{ resource_prefix }}-dms"
+ no_log: yes
+
+- name: dms_replication_subnet_group tests
+ collections:
+ - amazon.aws
+ block:
+
+ - name: ensure IAM role exists
+ iam_role:
+ <<: *aws_connection_info
+ name: "{{ dms_role_role_name }}"
+ assume_role_policy_document: "{{ lookup('file','dmsAssumeRolePolicyDocument.json') }}"
+ state: present
+ create_instance_profile: no
+ managed_policy:
+ - 'arn:aws:iam::aws:policy/service-role/AmazonDMSVPCManagementRole'
+ register: iam_role_output
+ ignore_errors: yes
+
+ - name: Create VPC for use in testing
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ cidr_block: 10.22.32.0/23
+ tags:
+ Name: Ansible ec2_instance Testing VPC
+ tenancy: default
+ <<: *aws_connection_info
+ register: testing_vpc
+
+ - name: 'Fetch AZ availability'
+ aws_az_info:
+ <<: *aws_connection_info
+ register: az_info
+
+ - name: 'Assert that we have multiple AZs available to us'
+ assert:
+ that: az_info.availability_zones | length >= 2
+
+ - name: 'Pick AZs'
+ set_fact:
+ az_one: '{{ az_info.availability_zones[0].zone_name }}'
+ az_two: '{{ az_info.availability_zones[1].zone_name }}'
+
+ - name: create subnet1
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: 10.22.32.16/28
+ az: "{{ az_one }}"
+ <<: *aws_connection_info
+ register: subnet1
+
+ - name: create subnet2
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: 10.22.32.32/28
+ az: "{{ az_two }}"
+ <<: *aws_connection_info
+ register: subnet2
+
+ - name: create replication subnet group
+ dms_replication_subnet_group:
+ state: present
+ identifier: "{{ dms_sg_identifier }}"
+ description: "Development Subnet Group"
+ subnet_ids: [ "{{ subnet1.subnet.id }}", "{{ subnet2.subnet.id }}"]
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result is not failed
+
+ - name: create subnet group no change
+ dms_replication_subnet_group:
+ state: present
+ identifier: "{{ dms_sg_identifier }}"
+ description: "Development Subnet Group"
+ subnet_ids: [ "{{ subnet1.subnet.id }}", "{{ subnet2.subnet.id }}"]
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - result is not failed
+
+ - name: update subnet group
+ dms_replication_subnet_group:
+ state: present
+ identifier: "{{ dms_sg_identifier }}"
+ description: "Development Subnet Group updated"
+ subnet_ids: [ "{{ subnet1.subnet.id }}", "{{ subnet2.subnet.id }}"]
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result is not failed
+
+ - name: update subnet group no change
+ dms_replication_subnet_group:
+ state: present
+ identifier: "{{ dms_sg_identifier }}"
+ description: "Development Subnet Group updated"
+ subnet_ids: [ "{{ subnet1.subnet.id }}", "{{ subnet2.subnet.id }}"]
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - result is not failed
+
+ always:
+ - name: delete subnet group no change
+ dms_replication_subnet_group:
+ state: absent
+ identifier: "{{ dms_sg_identifier }}"
+ description: "Development Subnet Group updated"
+ subnet_ids: [ "{{ subnet1.subnet.id }}", "{{ subnet2.subnet.id }}"]
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result is not failed
+
+ - name: delete subnet group no change
+ dms_replication_subnet_group:
+ state: absent
+ identifier: "{{ dms_sg_identifier }}"
+ description: "Development Subnet Group updated"
+ subnet_ids: [ "{{ subnet1.subnet.id }}", "{{ subnet2.subnet.id }}"]
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - result is not failed
+
+ - name: delete subnet1
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: 10.22.32.16/28
+ az: "{{ az_one }}"
+ <<: *aws_connection_info
+
+ - name: delete subnet2
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: 10.22.32.32/28
+ az: "{{ az_two }}"
+ <<: *aws_connection_info
+
+ - name: delete VPC for use in testing
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ cidr_block: 10.22.32.0/23
+ tags:
+ Name: Ansible ec2_instance Testing VPC
+ tenancy: default
+ state: absent
+ <<: *aws_connection_info
+
+ - name: delete dms-vpc role
+ iam_role:
+ <<: *aws_connection_info
+ name: "{{ dms_role_role_name }}"
+ assume_role_policy_document: "{{ lookup('file','dmsAssumeRolePolicyDocument.json') }}"
+ state: absent
+ create_instance_profile: no
+ managed_policy:
+ - 'arn:aws:iam::aws:policy/service-role/AmazonDMSVPCManagementRole'
+ register: iam_role_output
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_asg/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_asg/aliases
new file mode 100644
index 00000000..56927195
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_asg/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_asg/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_asg/defaults/main.yml
new file mode 100644
index 00000000..80bf25cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_asg/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+# defaults file for ec2_asg
+# Amazon Linux 2 AMI 2019.06.12 (HVM), GP2 Volume Type
+ec2_ami_name: 'amzn2-ami-hvm-2.0.20190612-x86_64-gp2'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_asg/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_asg/tasks/main.yml
new file mode 100644
index 00000000..aa53e968
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_asg/tasks/main.yml
@@ -0,0 +1,787 @@
+---
+# tasks file for test_ec2_asg
+
+- name: Test incomplete credentials with ec2_asg
+ collections:
+ - amazon.aws
+
+ block:
+
+ # ============================================================
+
+ - name: test invalid profile
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ region: "{{ aws_region }}"
+ profile: notavalidprofile
+ ignore_errors: yes
+ register: result
+
+ - name:
+ assert:
+ that:
+ - "'The config profile (notavalidprofile) could not be found' in result.msg"
+
+ - name: test partial credentials
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ region: "{{ aws_region }}"
+ aws_access_key: "{{ aws_access_key }}"
+ ignore_errors: yes
+ register: result
+
+ - name:
+ assert:
+ that:
+ - "'Partial credentials found in explicit, missing: aws_secret_access_key' in result.msg"
+
+ - name: test without specifying region
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ ignore_errors: yes
+ register: result
+
+ - name:
+ assert:
+ that:
+ - result.msg == 'The ec2_asg module requires a region and none was found in configuration, environment variables or module parameters'
+
+ # ============================================================
+
+- name: Test incomplete arguments with ec2_asg
+
+ block:
+
+ # ============================================================
+
+ - name: test without specifying required module options
+ ec2_asg:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ ignore_errors: yes
+ register: result
+
+ - name: assert name is a required module option
+ assert:
+ that:
+ - "result.msg == 'missing required arguments: name'"
+
+- name: Run ec2_asg integration tests.
+
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+
+ collections:
+ - amazon.aws
+
+ block:
+
+ # ============================================================
+
+ - name: Find AMI to use
+ ec2_ami_info:
+ owners: 'amazon'
+ filters:
+ name: '{{ ec2_ami_name }}'
+ register: ec2_amis
+ - set_fact:
+ ec2_ami_image: '{{ ec2_amis.images[0].image_id }}'
+
+ - name: load balancer name has to be less than 32 characters
+ # the 8 digit identifier at the end of resource_prefix helps determine during which test something
+ # was created
+ set_fact:
+ load_balancer_name: "{{ item }}-lb"
+ loop: "{{ resource_prefix | regex_findall('.{8}$') }}"
+
+ # Set up the testing dependencies: VPC, subnet, security group, and two launch configurations
+
+ - name: Create VPC for use in testing
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ cidr_block: 10.55.77.0/24
+ tenancy: default
+ register: testing_vpc
+
+ - name: Create internet gateway for use in testing
+ ec2_vpc_igw:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ state: present
+ register: igw
+
+ - name: Create subnet for use in testing
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: 10.55.77.0/24
+ az: "{{ aws_region }}a"
+ resource_tags:
+ Name: "{{ resource_prefix }}-subnet"
+ register: testing_subnet
+
+ - name: create routing rules
+ ec2_vpc_route_table:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ tags:
+ created: "{{ resource_prefix }}-route"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: "{{ igw.gateway_id }}"
+ subnets:
+ - "{{ testing_subnet.subnet.id }}"
+
+ - name: create a security group with the vpc created in the ec2_setup
+ ec2_group:
+ name: "{{ resource_prefix }}-sg"
+ description: a security group for ansible tests
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ rules:
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ register: sg
+
+ - name: ensure launch configs exist
+ ec2_lc:
+ name: "{{ item }}"
+ assign_public_ip: true
+ image_id: "{{ ec2_ami_image }}"
+ user_data: |
+ #cloud-config
+ package_upgrade: true
+ package_update: true
+ packages:
+ - httpd
+ runcmd:
+ - "service httpd start"
+ security_groups: "{{ sg.group_id }}"
+ instance_type: t3.micro
+ loop:
+ - "{{ resource_prefix }}-lc"
+ - "{{ resource_prefix }}-lc-2"
+
+ # ============================================================
+
+ - name: launch asg and wait for instances to be deemed healthy (no ELB)
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ launch_config_name: "{{ resource_prefix }}-lc"
+ desired_capacity: 1
+ min_size: 1
+ max_size: 1
+ vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
+ state: present
+ wait_for_instances: yes
+ register: output
+
+ - assert:
+ that:
+ - "output.viable_instances == 1"
+
+ - name: Tag asg
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ tags:
+ - tag_a: 'value 1'
+ propagate_at_launch: no
+ - tag_b: 'value 2'
+ propagate_at_launch: yes
+ register: output
+
+ - assert:
+ that:
+ - "output.tags | length == 2"
+ - output is changed
+
+ - name: Re-Tag asg (different order)
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ tags:
+ - tag_b: 'value 2'
+ propagate_at_launch: yes
+ - tag_a: 'value 1'
+ propagate_at_launch: no
+ register: output
+
+ - assert:
+ that:
+ - "output.tags | length == 2"
+ - output is not changed
+
+ - name: Re-Tag asg new tags
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ tags:
+ - tag_c: 'value 3'
+ propagate_at_launch: no
+ register: output
+
+ - assert:
+ that:
+ - "output.tags | length == 1"
+ - output is changed
+
+ - name: Re-Tag asg update propagate_at_launch
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ tags:
+ - tag_c: 'value 3'
+ propagate_at_launch: yes
+ register: output
+
+ - assert:
+ that:
+ - "output.tags | length == 1"
+ - output is changed
+
+ - name: Enable metrics collection
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ metrics_collection: yes
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+
+ - name: Enable metrics collection (check idempotency)
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ metrics_collection: yes
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+
+ - name: Disable metrics collection
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ metrics_collection: no
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+
+ - name: Disable metrics collection (check idempotency)
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ metrics_collection: no
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+
+ # - name: pause for a bit to make sure that the group can't be trivially deleted
+ # pause: seconds=30
+ - name: kill asg
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ state: absent
+ wait_timeout: 800
+ async: 400
+
+ # ============================================================
+
+ - name: launch asg and do not wait for instances to be deemed healthy (no ELB)
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ launch_config_name: "{{ resource_prefix }}-lc"
+ desired_capacity: 1
+ min_size: 1
+ max_size: 1
+ vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
+ wait_for_instances: no
+ state: present
+ register: output
+
+ - assert:
+ that:
+ - "output.viable_instances == 0"
+
+ - name: kill asg
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ state: absent
+ wait_timeout: 800
+ register: output
+ retries: 3
+ until: output is succeeded
+ delay: 10
+ async: 400
+
+ # ============================================================
+
+ - name: create asg with asg metrics enabled
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ metrics_collection: true
+ launch_config_name: "{{ resource_prefix }}-lc"
+ desired_capacity: 0
+ min_size: 0
+ max_size: 0
+ vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
+ state: present
+ register: output
+
+ - assert:
+ that:
+ - "'Group' in output.metrics_collection.0.Metric"
+
+ - name: kill asg
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ state: absent
+ wait_timeout: 800
+ async: 400
+
+ # ============================================================
+
+ - name: launch load balancer
+ ec2_elb_lb:
+ name: "{{ load_balancer_name }}"
+ state: present
+ security_group_ids:
+ - "{{ sg.group_id }}"
+ subnets: "{{ testing_subnet.subnet.id }}"
+ connection_draining_timeout: 60
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ health_check:
+ ping_protocol: tcp
+ ping_port: 80
+ ping_path: "/"
+ response_timeout: 5
+ interval: 10
+ unhealthy_threshold: 4
+ healthy_threshold: 2
+ register: load_balancer
+
+ - name: launch asg and wait for instances to be deemed healthy (ELB)
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ launch_config_name: "{{ resource_prefix }}-lc"
+ health_check_type: ELB
+ desired_capacity: 1
+ min_size: 1
+ max_size: 1
+ health_check_period: 300
+ vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
+ load_balancers: "{{ load_balancer_name }}"
+ wait_for_instances: yes
+ wait_timeout: 900
+ state: present
+ register: output
+
+ - assert:
+ that:
+ - "output.viable_instances == 1"
+
+ # ============================================================
+
+ # grow scaling group to 3
+
+ - name: add 2 more instances wait for instances to be deemed healthy (ELB)
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ launch_config_name: "{{ resource_prefix }}-lc"
+ health_check_type: ELB
+ desired_capacity: 3
+ min_size: 3
+ max_size: 5
+ health_check_period: 600
+ vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
+ load_balancers: "{{ load_balancer_name }}"
+ wait_for_instances: yes
+ wait_timeout: 1200
+ state: present
+ register: output
+
+ - assert:
+ that:
+ - "output.viable_instances == 3"
+
+ # ============================================================
+
+ # Test max_instance_lifetime option
+
+ - name: enable asg max_instance_lifetime
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ max_instance_lifetime: 604801
+ register: output
+
+ - name: ensure max_instance_lifetime is set
+ assert:
+ that:
+ - output.max_instance_lifetime == 604801
+
+ - name: run without max_instance_lifetime
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ launch_config_name: "{{ resource_prefix }}-lc"
+
+ - name: ensure max_instance_lifetime not affected by defaults
+ assert:
+ that:
+ - output.max_instance_lifetime == 604801
+
+ - name: disable asg max_instance_lifetime
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ launch_config_name: "{{ resource_prefix }}-lc"
+ max_instance_lifetime: 0
+ register: output
+
+ - name: ensure max_instance_lifetime is not set
+ assert:
+ that:
+ - not output.max_instance_lifetime
+
+ # ============================================================
+
+ # # perform rolling replace with different launch configuration
+
+ - name: perform rolling update to new AMI
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ launch_config_name: "{{ resource_prefix }}-lc-2"
+ health_check_type: ELB
+ desired_capacity: 3
+ min_size: 1
+ max_size: 5
+ health_check_period: 900
+ load_balancers: "{{ load_balancer_name }}"
+ vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
+ wait_for_instances: yes
+ replace_all_instances: yes
+ wait_timeout: 1800
+ state: present
+ register: output
+
+ # ensure that all instances have new launch config
+ - assert:
+ that:
+ - "item.value.launch_config_name == '{{ resource_prefix }}-lc-2'"
+ loop: "{{ output.instance_facts | dict2items }}"
+
+ # assert they are all healthy and that the rolling update resulted in the appropriate number of instances
+ - assert:
+ that:
+ - "output.viable_instances == 3"
+
+ # ============================================================
+
+ # perform rolling replace with the original launch configuration
+
+ - name: perform rolling update to new AMI while removing the load balancer
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ launch_config_name: "{{ resource_prefix }}-lc"
+ health_check_type: EC2
+ desired_capacity: 3
+ min_size: 1
+ max_size: 5
+ health_check_period: 900
+ load_balancers: []
+ vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
+ wait_for_instances: yes
+ replace_all_instances: yes
+ wait_timeout: 1800
+ state: present
+ register: output
+
+ # ensure that all instances have new launch config
+ - assert:
+ that:
+ - "item.value.launch_config_name == '{{ resource_prefix }}-lc'"
+ loop: "{{ output.instance_facts | dict2items }}"
+
+ # assert they are all healthy and that the rolling update resulted in the appropriate number of instances
+ # there should be the same number of instances as there were before the rolling update was performed
+ - assert:
+ that:
+ - "output.viable_instances == 3"
+
+ # ============================================================
+
+ # perform rolling replace with new launch configuration and lc_check:false
+
+ # Note - this is done async so we can query asg_facts during
+ # the execution. Issues #28087 and #35993 result in correct
+ # end result, but spin up extraneous instances during execution.
+ - name: "perform rolling update to new AMI with lc_check: false"
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ launch_config_name: "{{ resource_prefix }}-lc-2"
+ health_check_type: EC2
+ desired_capacity: 3
+ min_size: 1
+ max_size: 5
+ health_check_period: 900
+ load_balancers: []
+ vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
+ wait_for_instances: yes
+ replace_all_instances: yes
+ replace_batch_size: 3
+ lc_check: false
+ wait_timeout: 1800
+ state: present
+ async: 1800
+ poll: 0
+ register: asg_job
+
+ - name: get ec2_asg info for 3 minutes
+ ec2_asg_info:
+ name: "{{ resource_prefix }}-asg"
+ register: output
+ loop_control:
+ pause: 15
+ loop: "{{ range(12) | list }}"
+
+ # Since we started with 3 servers and replace all of them.
+ # We should see 6 servers total.
+ - assert:
+ that:
+ - output | community.general.json_query(inst_id_json_query) | unique | length == 6
+ vars:
+ inst_id_json_query: results[].results[].instances[].instance_id
+
+ - name: Ensure ec2_asg task completes
+ async_status: jid="{{ asg_job.ansible_job_id }}"
+ register: status
+ until: status is finished
+ retries: 200
+ delay: 15
+
+ # ============================================================
+
+ - name: kill asg
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ state: absent
+ wait_timeout: 800
+ async: 400
+
+ # Create new asg with replace_all_instances and lc_check:false
+
+ # Note - this is done async so we can query asg_facts during
+ # the execution. Issues #28087 results in correct
+ # end result, but spin up extraneous instances during execution.
+ - name: "new asg with lc_check: false"
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ launch_config_name: "{{ resource_prefix }}-lc"
+ health_check_type: EC2
+ desired_capacity: 3
+ min_size: 1
+ max_size: 5
+ health_check_period: 900
+ load_balancers: []
+ vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
+ wait_for_instances: yes
+ replace_all_instances: yes
+ replace_batch_size: 3
+ lc_check: false
+ wait_timeout: 1800
+ state: present
+ async: 1800
+ poll: 0
+ register: asg_job
+
+ # Collect ec2_asg_info for 3 minutes
+ - name: get ec2_asg information
+ ec2_asg_info:
+ name: "{{ resource_prefix }}-asg"
+ register: output
+ loop_control:
+ pause: 15
+ loop: "{{ range(12) | list }}"
+
+ # Get all instance_ids we saw and assert we saw number expected
+ # Should only see 3 (don't replace instances we just created)
+ - assert:
+ that:
+ - output | community.general.json_query(inst_id_json_query) | unique | length == 3
+ vars:
+ inst_id_json_query: results[].results[].instances[].instance_id
+
+ - name: Ensure ec2_asg task completes
+ async_status: jid="{{ asg_job.ansible_job_id }}"
+ register: status
+ until: status is finished
+ retries: 200
+ delay: 15
+
+ # we need a launch template, otherwise we cannot test the mixed instance policy
+ - name: create launch template for autoscaling group to test its mixed instance policy
+ ec2_launch_template:
+ template_name: "{{ resource_prefix }}-lt"
+ image_id: "{{ ec2_ami_image }}"
+ instance_type: t3.micro
+ credit_specification:
+ cpu_credits: standard
+ network_interfaces:
+ - associate_public_ip_address: yes
+ delete_on_termination: yes
+ device_index: 0
+ groups:
+ - "{{ sg.group_id }}"
+
+ - name: update autoscaling group with mixed-instance policy
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ launch_template:
+ launch_template_name: "{{ resource_prefix }}-lt"
+ desired_capacity: 1
+ min_size: 1
+ max_size: 1
+ vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
+ state: present
+ mixed_instances_policy:
+ instance_types:
+ - t3.micro
+ - t3a.micro
+ wait_for_instances: yes
+ register: output
+
+ - assert:
+ that:
+ - "output.mixed_instances_policy | length == 2"
+ - "output.mixed_instances_policy[0] == 't3.micro'"
+ - "output.mixed_instances_policy[1] == 't3a.micro'"
+
+# ============================================================
+
+ always:
+
+ - name: kill asg
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+ # Remove the testing dependencies
+
+ - name: remove the load balancer
+ ec2_elb_lb:
+ name: "{{ load_balancer_name }}"
+ state: absent
+ security_group_ids:
+ - "{{ sg.group_id }}"
+ subnets: "{{ testing_subnet.subnet.id }}"
+ wait: yes
+ connection_draining_timeout: 60
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ health_check:
+ ping_protocol: tcp
+ ping_port: 80
+ ping_path: "/"
+ response_timeout: 5
+ interval: 10
+ unhealthy_threshold: 4
+ healthy_threshold: 2
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+ - name: remove launch configs
+ ec2_lc:
+ name: "{{ resource_prefix }}-lc"
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+ loop:
+ - "{{ resource_prefix }}-lc"
+ - "{{ resource_prefix }}-lc-2"
+
+ - name: delete launch template
+ ec2_launch_template:
+ name: "{{ resource_prefix }}-lt"
+ state: absent
+ register: del_lt
+ retries: 10
+ until: del_lt is not failed
+ ignore_errors: true
+
+ - name: remove the security group
+ ec2_group:
+ name: "{{ resource_prefix }}-sg"
+ description: a security group for ansible tests
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+ - name: remove routing rules
+ ec2_vpc_route_table:
+ state: absent
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ tags:
+ created: "{{ resource_prefix }}-route"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: "{{ igw.gateway_id }}"
+ subnets:
+ - "{{ testing_subnet.subnet.id }}"
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+ - name: remove internet gateway
+ ec2_vpc_igw:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+ - name: remove the subnet
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: 10.55.77.0/24
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+ - name: remove the VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ cidr_block: 10.55.77.0/24
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_asg/vars/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_asg/vars/main.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_asg/vars/main.yml
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_eip/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_eip/aliases
new file mode 100644
index 00000000..6ac56ac2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_eip/aliases
@@ -0,0 +1,4 @@
+cloud/aws
+shippable/aws/group2
+# https://github.com/ansible-collections/community.aws/issues/159
+unstable
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_eip/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_eip/defaults/main.yml
new file mode 100644
index 00000000..8986714b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_eip/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+# VPCs are identified by the CIDR. Don't hard code the CIDR. shippable will
+# run multiple copies of the test concurrently (Python 2.x and Python 3.x)
+vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16'
+subnet_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.42.0/24'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_eip/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_eip/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_eip/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_eip/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_eip/tasks/main.yml
new file mode 100644
index 00000000..6d904003
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_eip/tasks/main.yml
@@ -0,0 +1,727 @@
+- name: Integration testing for ec2_eip
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ ec2_eip:
+ in_vpc: true
+ collections:
+ - amazon.aws
+ block:
+ # =====================================================
+ - name: Get the current caller identity facts
+ aws_caller_info: null
+ register: caller_info
+ - name: list available AZs
+ aws_az_info: null
+ register: region_azs
+ - name: pick an AZ for testing
+ set_fact:
+ subnet_az: '{{ region_azs.availability_zones[0].zone_name }}'
+ - name: create a VPC
+ ec2_vpc_net:
+ name: '{{ resource_prefix }}-vpc'
+ state: present
+ cidr_block: '{{ vpc_cidr }}'
+ tags:
+ AnsibleEIPTest: Pending
+ AnsibleEIPTestPrefix: '{{ resource_prefix }}'
+ register: vpc_result
+ - name: create subnet
+ ec2_vpc_subnet:
+ cidr: '{{ subnet_cidr }}'
+ az: '{{ subnet_az }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ register: vpc_subnet_create
+ - ec2_vpc_igw:
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ register: vpc_igw
+ - name: "Find AMI to use"
+ ec2_ami_info:
+ owners: 'amazon'
+ filters:
+ name: 'amzn2-ami-hvm-2.0.20190612-x86_64-gp2'
+ register: ec2_amis
+ - name: "create a security group"
+ ec2_group:
+ state: present
+ name: '{{ resource_prefix }}-sg'
+ description: a security group for ansible tests
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ rules:
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 0.0.0.0/0
+ register: security_group
+ - name: Create instance for attaching
+ ec2_instance:
+ name: '{{ resource_prefix }}-instance'
+ image_id: '{{ ec2_amis.images[0].image_id }}'
+ security_group: '{{ security_group.group_id }}'
+ vpc_subnet_id: '{{ vpc_subnet_create.subnet.id }}'
+ wait: no ## Don't delay the tests, we'll check again before we need it
+ register: create_ec2_instance_result
+
+ # =====================================================
+ - name: Look for signs of concurrent EIP tests. Pause if they are running or their prefix comes before ours.
+ vars:
+ running_query: vpcs[?tags.AnsibleEIPTest=='Running']
+ pending_query: vpcs[?tags.AnsibleEIPTest=='Pending'].tags.AnsibleEIPTestPrefix
+ ec2_vpc_net_info:
+ filters:
+ tag:AnsibleEIPTest:
+ - Pending
+ - Running
+ register: vpc_info
+ retries: 120
+ delay: 5
+ until:
+ - ( vpc_info | community.general.json_query(running_query) | length == 0 )
+ - ( vpc_info | community.general.json_query(pending_query) | sort | first == resource_prefix )
+ - name: Make a crude lock
+ ec2_vpc_net:
+ name: '{{ resource_prefix }}-vpc'
+ state: present
+ cidr_block: '{{ vpc_cidr }}'
+ tags:
+ AnsibleEIPTest: Running
+ AnsibleEIPTestPrefix: '{{ resource_prefix }}'
+ # =====================================================
+ - name: Get current state of EIPs
+ ec2_eip_info: null
+ register: eip_info_start
+ - name: Require that there are no free IPs when we start, otherwise we can't test things properly
+ assert:
+ that:
+ - eip_info_start is defined
+ - '"addresses" in eip_info_start'
+ - ( eip_info_start.addresses | length ) == ( eip_info_start | community.general.json_query("addresses[].association_id") | length )
+ - name: Allocate a new eip (no conditions)
+ ec2_eip:
+ state: present
+ register: eip
+ - ec2_eip_info: null
+ register: eip_info
+ - assert:
+ that:
+ - eip is defined
+ - eip is changed
+ - eip.public_ip is defined and ( eip.public_ip | ansible.netcommon.ipaddr )
+ - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-")
+ - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length )
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ - assert:
+ that:
+ - '"addresses" in eip_info'
+ - eip_info.addresses | length == 1
+ - eip_info.addresses[0].allocation_id == eip.allocation_id
+ - eip_info.addresses[0].domain == "vpc"
+ - eip_info.addresses[0].public_ip == eip.public_ip
+ - ec2_eip_info:
+ filters:
+ allocation-id: '{{ eip.allocation_id }}'
+ - assert:
+ that:
+ - '"addresses" in eip_info'
+ - eip_info.addresses | length == 1
+ - eip_info.addresses[0].allocation_id == eip.allocation_id
+ - eip_info.addresses[0].domain == "vpc"
+ - eip_info.addresses[0].public_ip == eip.public_ip
+ - name: Release eip
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ register: eip_release
+ - ec2_eip_info: null
+ register: eip_info
+ - assert:
+ that:
+ - eip_release is defined
+ - eip_release is changed
+ - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length )
+ - name: Allocate a new eip - attempt reusing unallocated ones (none available)
+ ec2_eip:
+ state: present
+ reuse_existing_ip_allowed: true
+ register: eip
+ - ec2_eip_info: null
+ register: eip_info
+ - assert:
+ that:
+ - eip is defined
+ - eip is changed
+ - eip.public_ip is defined and ( eip.public_ip | ansible.netcommon.ipaddr )
+ - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-")
+ - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length )
+ - name: Re-Allocate a new eip - attempt reusing unallocated ones (one available)
+ ec2_eip:
+ state: present
+ reuse_existing_ip_allowed: true
+ register: reallocate_eip
+ - ec2_eip_info: null
+ register: eip_info
+ - assert:
+ that:
+ - reallocate_eip is defined
+ - reallocate_eip is not changed
+ - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ansible.netcommon.ipaddr )
+ - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id.startswith("eipalloc-")
+ - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length )
+ - name: Release eip
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ register: eip_release
+ - ec2_eip_info: null
+ register: eip_info
+ - assert:
+ that:
+ - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length )
+ - eip_release is defined
+ - eip_release is changed
+ - name: Allocate a new eip
+ ec2_eip:
+ state: present
+ register: eip
+ - ec2_eip_info: null
+ register: eip_info
+ - assert:
+ that:
+ - eip is defined
+ - eip is changed
+ - eip.public_ip is defined and ( eip.public_ip | ansible.netcommon.ipaddr )
+ - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-")
+ - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length )
+ - name: Match an existing eip (changed == false)
+ ec2_eip:
+ state: present
+ public_ip: '{{ eip.public_ip }}'
+ register: reallocate_eip
+ - ec2_eip_info: null
+ register: eip_info
+ - assert:
+ that:
+ - reallocate_eip is defined
+ - reallocate_eip is not changed
+ - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ansible.netcommon.ipaddr )
+ - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id.startswith("eipalloc-")
+ - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length )
+ - name: Release eip
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ register: eip_release
+ - ec2_eip_info: null
+ register: eip_info
+ - assert:
+ that:
+ - eip_release is defined
+ - eip_release is changed
+ - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length )
+ - name: Allocate a new eip (no tags)
+ ec2_eip:
+ state: present
+ register: eip
+ - ec2_eip_info: null
+ register: eip_info
+ - assert:
+ that:
+ - eip is defined
+ - eip is changed
+ - eip.public_ip is defined and ( eip.public_ip | ansible.netcommon.ipaddr )
+ - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-")
+ - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length )
+ - name: attempt reusing an existing eip with a tag (No match available)
+ ec2_eip:
+ state: present
+ reuse_existing_ip_allowed: true
+ tag_name: Team
+ register: no_tagged_eip
+ - ec2_eip_info: null
+ register: eip_info
+ - assert:
+ that:
+ - no_tagged_eip is defined
+ - no_tagged_eip is changed
+ - no_tagged_eip.public_ip is defined and ( no_tagged_eip.public_ip | ansible.netcommon.ipaddr )
+ - no_tagged_eip.allocation_id is defined and no_tagged_eip.allocation_id.startswith("eipalloc-")
+ - ( eip_info_start.addresses | length ) + 2 == ( eip_info.addresses | length )
+ - name: tag eip so we can try matching it
+ ec2_tag:
+ state: present
+ resource: '{{ eip.allocation_id }}'
+ tags:
+ Team: Frontend
+ - name: attempt reusing an existing eip with a tag (Match available)
+ ec2_eip:
+ state: present
+ reuse_existing_ip_allowed: true
+ tag_name: Team
+ register: reallocate_eip
+ - ec2_eip_info: null
+ register: eip_info
+ - assert:
+ that:
+ - reallocate_eip is defined
+ - reallocate_eip is not changed
+ - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ansible.netcommon.ipaddr )
+ - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id.startswith("eipalloc-")
+ - ( eip_info_start.addresses | length ) + 2 == ( eip_info.addresses | length )
+ - name: attempt reusing an existing eip with a tag and it's value (no match available)
+ ec2_eip:
+ state: present
+ reuse_existing_ip_allowed: true
+ tag_name: Team
+ tag_value: Backend
+ register: backend_eip
+ - ec2_eip_info: null
+ register: eip_info
+ - assert:
+ that:
+ - backend_eip is defined
+ - backend_eip is changed
+ - backend_eip.public_ip is defined and ( backend_eip.public_ip | ansible.netcommon.ipaddr )
+ - backend_eip.allocation_id is defined and backend_eip.allocation_id.startswith("eipalloc-")
+ - ( eip_info_start.addresses | length ) + 3 == ( eip_info.addresses | length )
+ - name: tag eip so we can try matching it
+ ec2_tag:
+ state: present
+ resource: '{{ eip.allocation_id }}'
+ tags:
+ Team: Backend
+ - name: attempt reusing an existing eip with a tag and it's value (match available)
+ ec2_eip:
+ state: present
+ reuse_existing_ip_allowed: true
+ tag_name: Team
+ tag_value: Backend
+ register: reallocate_eip
+ - ec2_eip_info: null
+ register: eip_info
+ - assert:
+ that:
+ - reallocate_eip is defined
+ - reallocate_eip is not changed
+ - reallocate_eip.public_ip is defined and reallocate_eip.public_ip != ""
+ - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id != ""
+ - ( eip_info_start.addresses | length ) + 3 == ( eip_info.addresses | length )
+ - name: Release backend_eip
+ ec2_eip:
+ state: absent
+ public_ip: '{{ backend_eip.public_ip }}'
+ register: eip_release
+ - ec2_eip_info: null
+ register: eip_info
+ - assert:
+ that:
+ - eip_release is defined
+ - eip_release is changed
+ - ( eip_info_start.addresses | length ) + 2 == ( eip_info.addresses | length )
+ - name: Release no_tagged_eip
+ ec2_eip:
+ state: absent
+ public_ip: '{{ no_tagged_eip.public_ip }}'
+ register: eip_release
+ - ec2_eip_info: null
+ register: eip_info
+ - assert:
+ that:
+ - eip_release is defined
+ - eip_release is changed
+ - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length )
+ - name: Release eip
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ register: eip_release
+ - ec2_eip_info: null
+ register: eip_info
+ - assert:
+ that:
+ - eip_release is defined
+ - eip_release is changed
+ - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length )
+ - name: allocate a new eip from a pool
+ ec2_eip:
+ state: present
+ public_ipv4_pool: amazon
+ register: eip
+ - ec2_eip_info: null
+ register: eip_info
+ - assert:
+ that:
+ - eip is defined
+ - eip is changed
+ - eip.public_ip is defined and ( eip.public_ip | ansible.netcommon.ipaddr )
+ - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-")
+ - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length )
+ - name: create ENI A
+ ec2_eni:
+ subnet_id: '{{ vpc_subnet_create.subnet.id }}'
+ register: eni_create_a
+ - name: create ENI B
+ ec2_eni:
+ subnet_id: '{{ vpc_subnet_create.subnet.id }}'
+ register: eni_create_b
+ - name: Attach EIP to ENI A
+ ec2_eip:
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_a.interface.id }}'
+ register: associate_eip
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+ - assert:
+ that:
+ - associate_eip is defined
+ - associate_eip is changed
+ - eip_info.addresses | length == 1
+ - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip
+ - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id
+ - eip_info.addresses[0].allocation_id == eip.allocation_id
+ - eip_info.addresses[0].domain == "vpc"
+ - eip_info.addresses[0].public_ip == eip.public_ip
+ - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-")
+ - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id
+ - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.netcommon.ipaddr )
+ - eip_info.addresses[0].network_interface_owner_id == caller_info.account
+ - name: Re-Attach EIP to ENI A (no change)
+ ec2_eip:
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_a.interface.id }}'
+ register: associate_eip
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+ - assert:
+ that:
+ - associate_eip is defined
+ - associate_eip is not changed
+ - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip
+ - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id
+ - eip_info.addresses | length == 1
+ - eip_info.addresses[0].allocation_id == eip.allocation_id
+ - eip_info.addresses[0].domain == "vpc"
+ - eip_info.addresses[0].public_ip == eip.public_ip
+ - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-")
+ - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id
+ - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.netcommon.ipaddr )
+ - name: Attach EIP to ENI B (should fail, already associated)
+ ec2_eip:
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_b.interface.id }}'
+ register: associate_eip
+ ignore_errors: true
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+ - assert:
+ that:
+ - associate_eip is defined
+ - associate_eip is failed
+ - eip_info.addresses | length == 1
+ - eip_info.addresses[0].allocation_id == eip.allocation_id
+ - eip_info.addresses[0].domain == "vpc"
+ - eip_info.addresses[0].public_ip == eip.public_ip
+ - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-")
+ - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id
+ - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.netcommon.ipaddr )
+ - name: Attach EIP to ENI B
+ ec2_eip:
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_b.interface.id }}'
+ allow_reassociation: true
+ register: associate_eip
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+ - assert:
+ that:
+ - associate_eip is defined
+ - associate_eip is changed
+ - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip
+ - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id
+ - eip_info.addresses | length == 1
+ - eip_info.addresses[0].allocation_id == eip.allocation_id
+ - eip_info.addresses[0].domain == "vpc"
+ - eip_info.addresses[0].public_ip == eip.public_ip
+ - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-")
+ - eip_info.addresses[0].network_interface_id == eni_create_b.interface.id
+ - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.netcommon.ipaddr )
+ - name: Detach EIP from ENI B, without enabling release on disassociation
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_b.interface.id }}'
+ register: disassociate_eip
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+ - assert:
+ that:
+ - associate_eip is defined
+ - associate_eip is changed
+ - eip_info.addresses | length == 1
+ - name: Re-detach EIP from ENI B, without enabling release on disassociation
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_b.interface.id }}'
+ register: associate_eip
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+ - assert:
+ that:
+ - associate_eip is defined
+ - associate_eip is not changed
+ - eip_info.addresses | length == 1
+ - name: Attach EIP to ENI A
+ ec2_eip:
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_a.interface.id }}'
+ register: associate_eip
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+ - assert:
+ that:
+ - associate_eip is defined
+ - associate_eip is changed
+ - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip
+ - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id
+ - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id
+ - name: Detach EIP from ENI A, enabling release on disassociation
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_a.interface.id }}'
+ release_on_disassociation: true
+ register: disassociate_eip
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+ - assert:
+ that:
+ - associate_eip is defined
+ - associate_eip is changed
+ - eip_info.addresses | length == 0
+ - name: Re-detach EIP from ENI A, enabling release on disassociation
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_a.interface.id }}'
+ release_on_disassociation: true
+ register: associate_eip
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+ - assert:
+ that:
+ - associate_eip is defined
+ - associate_eip is not changed
+ - eip_info.addresses | length == 0
+ - ec2_eip_info: null
+ register: eip_info
+ - assert:
+ that:
+ - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length )
+ - name: Cleanup ENI B
+ ec2_eni:
+ state: absent
+ eni_id: '{{ eni_create_b.interface.id }}'
+ - name: Cleanup ENI A
+ ec2_eni:
+ state: absent
+ eni_id: '{{ eni_create_a.interface.id }}'
+ - name: Make sure the instance is ready
+ ec2_instance_info:
+ filters:
+ "tag:Name": '{{ resource_prefix }}-instance'
+ register: instance_info
+ until: instance_info.instances[0].state.name == 'running'
+ - name: Attach eip to an EC2 instance
+ ec2_eip:
+ device_id: '{{ instance_info.instances[0].instance_id }}'
+ state: present
+ release_on_disassociation: yes
+ register: instance_eip
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ instance_eip.public_ip }}'
+ register: eip_info
+ - assert:
+ that:
+ - instance_eip is success
+ - eip_info.addresses[0].allocation_id is defined
+ - eip_info.addresses[0].instance_id == '{{ instance_info.instances[0].instance_id }}'
+ # =====================================================
+ - name: Cleanup instance
+ ec2_instance:
+ instance_ids: '{{ create_ec2_instance_result.instance_ids }}'
+ state: absent
+ - name: Cleanup instance eip
+ ec2_eip:
+ state: absent
+ public_ip: '{{ instance_eip.public_ip }}'
+ register: eip_cleanup
+ retries: 5
+ delay: 5
+ until: eip_cleanup is successful
+ - name: Cleanup IGW
+ ec2_vpc_igw:
+ state: absent
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ register: vpc_igw
+ - name: Cleanup security group
+ ec2_group:
+ state: absent
+ name: '{{ resource_prefix }}-sg'
+ - name: Cleanup Subnet
+ ec2_vpc_subnet:
+ state: absent
+ cidr: '{{ subnet_cidr }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ - name: Release eip
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ register: eip_release
+ ignore_errors: true
+ - name: allocate a new eip
+ ec2_eip:
+ state: present
+ register: eip
+ - ec2_eip_info: null
+ register: eip_info
+ - assert:
+ that:
+ - eip is defined
+ - eip is changed
+ - eip.public_ip is defined and ( eip.public_ip | ansible.netcommon.ipaddr )
+ - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-")
+ - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length )
+ - name: Release eip
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ register: eip_release
+ - ec2_eip_info: null
+ register: eip_info
+ - assert:
+ that:
+ - eip_release is defined
+ - eip_release is changed
+ - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length )
+ - name: Rerelease eip (no change)
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ register: eip_release
+ - ec2_eip_info: null
+ register: eip_info
+ - assert:
+ that:
+ - eip_release is defined
+ - eip_release is not changed
+ - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length )
+ - name: Cleanup VPC
+ ec2_vpc_net:
+ state: absent
+ name: '{{ resource_prefix }}-vpc'
+ cidr_block: '{{ vpc_cidr }}'
+ # =====================================================
+ always:
+ - name: Cleanup instance (by id)
+ ec2_instance:
+ instance_ids: '{{ create_ec2_instance_result.instance_ids }}'
+ state: absent
+ wait: true
+ ignore_errors: true
+ - name: Cleanup instance (by name)
+ ec2_instance:
+ name: '{{ resource_prefix }}-instance'
+ state: absent
+ wait: true
+ ignore_errors: true
+ - name: Cleanup ENI A
+ ec2_eni:
+ state: absent
+ eni_id: '{{ eni_create_a.interface.id }}'
+ ignore_errors: true
+ - name: Cleanup ENI B
+ ec2_eni:
+ state: absent
+ eni_id: '{{ eni_create_b.interface.id }}'
+ ignore_errors: true
+ - name: Cleanup instance eip
+ ec2_eip:
+ state: absent
+ public_ip: '{{ instance_eip.public_ip }}'
+ retries: 5
+ delay: 5
+ until: eip_cleanup is successful
+ ignore_errors: true
+ - name: Cleanup IGW
+ ec2_vpc_igw:
+ state: absent
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ register: vpc_igw
+ ignore_errors: true
+ - name: Cleanup security group
+ ec2_group:
+ state: absent
+ name: '{{ resource_prefix }}-sg'
+ ignore_errors: true
+ - name: Cleanup Subnet
+ ec2_vpc_subnet:
+ state: absent
+ cidr: '{{ subnet_cidr }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ ignore_errors: true
+ - name: Cleanup eip
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ when: eip is changed
+ ignore_errors: true
+ - name: Cleanup reallocate_eip
+ ec2_eip:
+ state: absent
+ public_ip: '{{ reallocate_eip.public_ip }}'
+ when: reallocate_eip is changed
+ ignore_errors: true
+ - name: Cleanup backend_eip
+ ec2_eip:
+ state: absent
+ public_ip: '{{ backend_eip.public_ip }}'
+ when: backend_eip is changed
+ ignore_errors: true
+ - name: Cleanup no_tagged_eip
+ ec2_eip:
+ state: absent
+ public_ip: '{{ no_tagged_eip.public_ip }}'
+ when: no_tagged_eip is changed
+ ignore_errors: true
+ - name: Cleanup VPC
+ ec2_vpc_net:
+ state: absent
+ name: '{{ resource_prefix }}-vpc'
+ cidr_block: '{{ vpc_cidr }}'
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/aliases
new file mode 100644
index 00000000..62cb1d2c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+shippable/aws/group3
+ec2_instance_info
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/inventory b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/inventory
new file mode 100644
index 00000000..09bae76b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/inventory
@@ -0,0 +1,18 @@
+[tests]
+# Sorted fastest to slowest
+version_fail_wrapper
+ebs_optimized
+block_devices
+cpu_options
+default_vpc_tests
+external_resource_attach
+instance_no_wait
+iam_instance_role
+termination_protection
+tags_and_vpc_settings
+checkmode_tests
+security_group
+
+[all:vars]
+ansible_connection=local
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/main.yml
new file mode 100644
index 00000000..7695f7bc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/main.yml
@@ -0,0 +1,43 @@
+---
+# Beware: most of our tests here are run in parallel.
+# To add new tests you'll need to add a new host to the inventory and a matching
+# '{{ inventory_hostname }}'.yml file in roles/ec2_instance/tasks/
+
+
+# Prepare the VPC and figure out which AMI to use
+- hosts: all
+ gather_facts: no
+ tasks:
+ - module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ vars:
+ # We can't just use "run_once" because the facts don't propagate when
+ # running an 'include' that was run_once
+ setup_run_once: yes
+ block:
+ - include_role:
+ name: 'ec2_instance'
+ tasks_from: find_ami.yml
+ - include_role:
+ name: 'ec2_instance'
+ tasks_from: env_setup.yml
+ rescue:
+ - include_role:
+ name: 'ec2_instance'
+ tasks_from: env_cleanup.yml
+ run_once: yes
+ - fail:
+ msg: 'Environment preparation failed'
+ run_once: yes
+
+# VPC should get cleaned up once all hosts have run
+- hosts: all
+ gather_facts: no
+ strategy: free
+ #serial: 10
+ roles:
+ - ec2_instance
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/meta/main.yml
new file mode 100644
index 00000000..38b31be0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/meta/main.yml
@@ -0,0 +1,4 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
+ - setup_remote_tmp_dir
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/defaults/main.yml
new file mode 100644
index 00000000..8e70ab69
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/defaults/main.yml
@@ -0,0 +1,14 @@
+---
+# defaults file for ec2_instance
+ec2_instance_owner: 'integration-run-{{ resource_prefix }}'
+ec2_instance_type: 't3.micro'
+ec2_instance_tag_TestId: '{{ resource_prefix }}-{{ inventory_hostname }}'
+ec2_ami_name: 'amzn2-ami-hvm-2.*-x86_64-gp2'
+
+vpc_name: '{{ resource_prefix }}-vpc'
+vpc_seed: '{{ resource_prefix }}'
+vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16'
+subnet_a_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24'
+subnet_a_startswith: '10.{{ 256 | random(seed=vpc_seed) }}.32.'
+subnet_b_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.33.0/24'
+subnet_b_startswith: '10.{{ 256 | random(seed=vpc_seed) }}.33.'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/files/assume-role-policy.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/files/assume-role-policy.json
new file mode 100644
index 00000000..72413abd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/files/assume-role-policy.json
@@ -0,0 +1,13 @@
+{
+ "Version": "2008-10-17",
+ "Statement": [
+ {
+ "Sid": "",
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "ec2.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/meta/main.yml
new file mode 100644
index 00000000..77589cc2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/meta/main.yml
@@ -0,0 +1,5 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
+collections:
+ - amazon.aws
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/block_devices.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/block_devices.yml
new file mode 100644
index 00000000..0a8ab63f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/block_devices.yml
@@ -0,0 +1,82 @@
+- block:
+ - name: "New instance with an extra block device"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-ebs-vols"
+ image_id: "{{ ec2_ami_image }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ volumes:
+ - device_name: /dev/sdb
+ ebs:
+ volume_size: 20
+ delete_on_termination: true
+ volume_type: standard
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ instance_type: "{{ ec2_instance_type }}"
+ wait: true
+ register: block_device_instances
+
+ - name: "Gather instance info"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-ebs-vols"
+ register: block_device_instances_info
+
+ - assert:
+ that:
+ - block_device_instances is not failed
+ - block_device_instances is changed
+ - block_device_instances_info.instances[0].block_device_mappings[0]
+ - block_device_instances_info.instances[0].block_device_mappings[1]
+ - block_device_instances_info.instances[0].block_device_mappings[1].device_name == '/dev/sdb'
+
+ - name: "New instance with an extra block device (check mode)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-ebs-vols-checkmode"
+ image_id: "{{ ec2_ami_image }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ volumes:
+ - device_name: /dev/sdb
+ ebs:
+ volume_size: 20
+ delete_on_termination: true
+ volume_type: standard
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ instance_type: "{{ ec2_instance_type }}"
+ check_mode: yes
+
+ - name: "fact presented ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-ebs-vols"
+ "instance-state-name": "running"
+ register: presented_instance_fact
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-ebs-vols-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm whether the check mode is working normally."
+ assert:
+ that:
+ - "{{ presented_instance_fact.instances | length }} > 0"
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
+
+ - name: "Terminate instances"
+ ec2_instance:
+ state: absent
+ instance_ids: "{{ block_device_instances.instance_ids }}"
+
+ always:
+ - name: "Terminate block_devices instances"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: yes
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/checkmode_tests.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/checkmode_tests.yml
new file mode 100644
index 00000000..e13ad440
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/checkmode_tests.yml
@@ -0,0 +1,201 @@
+- block:
+ - name: "Make basic instance"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-checkmode-comparison"
+ image_id: "{{ ec2_ami_image }}"
+ security_groups: "{{ sg.group_id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ wait: false
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ TestTag: "Some Value"
+ register: basic_instance
+
+ - name: "Make basic instance (check mode)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-checkmode-comparison-checkmode"
+ image_id: "{{ ec2_ami_image }}"
+ security_groups: "{{ sg.group_id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ TestTag: "Some Value"
+ check_mode: yes
+
+ - name: "fact presented ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
+ register: presented_instance_fact
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm whether the check mode is working normally."
+ assert:
+ that:
+ - "{{ presented_instance_fact.instances | length }} > 0"
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
+
+ - name: "Stop instance (check mode)"
+ ec2_instance:
+ state: stopped
+ name: "{{ resource_prefix }}-checkmode-comparison"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ TestTag: "Some Value"
+ check_mode: yes
+
+ - name: "fact ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
+ register: confirm_checkmode_stopinstance_fact
+
+ - name: "Verify that it was not stopped."
+ assert:
+ that:
+ - '"{{ confirm_checkmode_stopinstance_fact.instances[0].state.name }}" != "stopped"'
+
+ - name: "Stop instance."
+ ec2_instance:
+ state: stopped
+ name: "{{ resource_prefix }}-checkmode-comparison"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ TestTag: "Some Value"
+ register: instance_stop
+ until: not instance_stop.failed
+ retries: 10
+
+ - name: "fact stopped ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
+ register: confirm_stopinstance_fact
+
+ - name: "Verify that it was stopped."
+ assert:
+ that:
+ - '"{{ confirm_stopinstance_fact.instances[0].state.name }}" in ["stopped", "stopping"]'
+
+ - name: "Running instance in check mode."
+ ec2_instance:
+ state: running
+ name: "{{ resource_prefix }}-checkmode-comparison"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ TestTag: "Some Value"
+ check_mode: yes
+
+ - name: "fact ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
+ register: confirm_checkmode_runninginstance_fact
+
+ - name: "Verify that it was not running."
+ assert:
+ that:
+ - '"{{ confirm_checkmode_runninginstance_fact.instances[0].state.name }}" != "running"'
+
+ - name: "Running instance."
+ ec2_instance:
+ state: running
+ name: "{{ resource_prefix }}-checkmode-comparison"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ TestTag: "Some Value"
+
+ - name: "fact ec2 instance."
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
+ register: confirm_runninginstance_fact
+
+ - name: "Verify that it was running."
+ assert:
+ that:
+ - '"{{ confirm_runninginstance_fact.instances[0].state.name }}" == "running"'
+
+ - name: "Tag instance."
+ ec2_instance:
+ state: running
+ name: "{{ resource_prefix }}-checkmode-comparison"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ TestTag: "Some Other Value"
+ check_mode: yes
+
+ - name: "fact ec2 instance."
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
+ register: confirm_not_tagged
+
+ - name: "Verify that it hasn't been re-tagged."
+ assert:
+ that:
+ - '"{{ confirm_not_tagged.instances[0].tags.TestTag }}" == "Some Value"'
+
+ - name: "Terminate instance in check mode."
+ ec2_instance:
+ state: absent
+ name: "{{ resource_prefix }}-checkmode-comparison"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ TestTag: "Some Value"
+ check_mode: yes
+
+ - name: "fact ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
+ register: confirm_checkmode_terminatedinstance_fact
+
+ - name: "Verify that it was not terminated,"
+ assert:
+ that:
+ - '"{{ confirm_checkmode_terminatedinstance_fact.instances[0].state.name }}" != "terminated"'
+
+ - name: "Terminate instance."
+ ec2_instance:
+ state: absent
+ name: "{{ resource_prefix }}-checkmode-comparison"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ TestTag: "Some Value"
+
+ - name: "fact ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
+ register: confirm_terminatedinstance_fact
+
+ - name: "Verify that it was terminated,"
+ assert:
+ that:
+ - '"{{ confirm_terminatedinstance_fact.instances[0].state.name }}" == "terminated"'
+
+ always:
+ - name: "Terminate checkmode instances"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: yes
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/cpu_options.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/cpu_options.yml
new file mode 100644
index 00000000..947011f7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/cpu_options.yml
@@ -0,0 +1,86 @@
+- block:
+ - name: "create t3.nano instance with cpu_options"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core"
+ image_id: "{{ ec2_ami_image }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ instance_type: t3.nano
+ cpu_options:
+ core_count: 1
+ threads_per_core: 1
+ wait: false
+ register: instance_creation
+
+ - name: "instance with cpu_options created with the right options"
+ assert:
+ that:
+ - instance_creation is success
+ - instance_creation is changed
+
+ - name: "modify cpu_options on existing instance (warning displayed)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core"
+ image_id: "{{ ec2_ami_image }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ instance_type: t3.nano
+ cpu_options:
+ core_count: 1
+ threads_per_core: 2
+ wait: false
+ register: cpu_options_update
+ ignore_errors: yes
+
+ - name: "fact presented ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-t3nano-1-threads-per-core"
+ register: presented_instance_fact
+
+ - name: "modify cpu_options has no effect on existing instance"
+ assert:
+ that:
+ - cpu_options_update is success
+ - cpu_options_update is not changed
+ - "{{ presented_instance_fact.instances | length }} > 0"
+ - "'{{ presented_instance_fact.instances.0.state.name }}' in ['running','pending']"
+ - "{{ presented_instance_fact.instances.0.cpu_options.core_count }} == 1"
+ - "{{ presented_instance_fact.instances.0.cpu_options.threads_per_core }} == 1"
+
+ - name: "create t3.nano instance with cpu_options(check mode)"
+ ec2_instance:
+ name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core-checkmode"
+ image_id: "{{ ec2_ami_image }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ instance_type: t3.nano
+ cpu_options:
+ core_count: 1
+ threads_per_core: 1
+ check_mode: yes
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-t3nano-1-threads-per-core-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm existence of instance id."
+ assert:
+ that:
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
+
+ always:
+ - name: "Terminate cpu_options instances"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: yes
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/default_vpc_tests.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/default_vpc_tests.yml
new file mode 100644
index 00000000..a69dfe9f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/default_vpc_tests.yml
@@ -0,0 +1,57 @@
+- block:
+ - name: "Make instance in a default subnet of the VPC"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-default-vpc"
+ image_id: "{{ ec2_ami_image }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ security_group: "default"
+ instance_type: "{{ ec2_instance_type }}"
+ wait: false
+ register: in_default_vpc
+
+ - name: "Make instance in a default subnet of the VPC(check mode)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-default-vpc-checkmode"
+ image_id: "{{ ec2_ami_image }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ security_group: "default"
+ instance_type: "{{ ec2_instance_type }}"
+ check_mode: yes
+
+ - name: "fact presented ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-default-vpc"
+ register: presented_instance_fact
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-default-vpc-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm whether the check mode is working normally."
+ assert:
+ that:
+ - "{{ presented_instance_fact.instances | length }} > 0"
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
+
+ - name: "Terminate instances"
+ ec2_instance:
+ state: absent
+ instance_ids: "{{ in_default_vpc.instance_ids }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+
+ always:
+ - name: "Terminate vpc_tests instances"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: yes
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/ebs_optimized.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/ebs_optimized.yml
new file mode 100644
index 00000000..5bfdc086
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/ebs_optimized.yml
@@ -0,0 +1,41 @@
+- block:
+ - name: "Make EBS optimized instance in the testing subnet of the test VPC"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-ebs-optimized-instance-in-vpc"
+ image_id: "{{ ec2_ami_image }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ ebs_optimized: true
+ instance_type: t3.nano
+ wait: false
+ register: ebs_opt_in_vpc
+
+ - name: "Get ec2 instance info"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-ebs-optimized-instance-in-vpc"
+ register: ebs_opt_instance_info
+
+ - name: "Assert instance is ebs_optimized"
+ assert:
+ that:
+ - "{{ ebs_opt_instance_info.instances.0.ebs_optimized }}"
+
+ - name: "Terminate instances"
+ ec2_instance:
+ state: absent
+ instance_ids: "{{ ebs_opt_in_vpc.instance_ids }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+
+ always:
+ - name: "Terminate ebs_optimzed instances"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: yes
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/env_cleanup.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/env_cleanup.yml
new file mode 100644
index 00000000..07c7f72b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/env_cleanup.yml
@@ -0,0 +1,104 @@
+- name: "remove Instances"
+ ec2_instance:
+ state: absent
+ filters:
+ vpc-id: "{{ testing_vpc.vpc.id }}"
+ wait: yes
+ ignore_errors: yes
+ retries: 10
+
+- name: "remove ENIs"
+ ec2_eni_info:
+ filters:
+ vpc-id: "{{ testing_vpc.vpc.id }}"
+ register: enis
+
+- name: "delete all ENIs"
+ ec2_eni:
+ state: absent
+ eni_id: "{{ item.id }}"
+ until: removed is not failed
+ with_items: "{{ enis.network_interfaces }}"
+ ignore_errors: yes
+ retries: 10
+
+- name: "remove the security group"
+ ec2_group:
+ state: absent
+ name: "{{ resource_prefix }}-sg"
+ description: a security group for ansible tests
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+- name: "remove the second security group"
+ ec2_group:
+ name: "{{ resource_prefix }}-sg-2"
+ description: a security group for ansible tests
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+- name: "remove routing rules"
+ ec2_vpc_route_table:
+ state: absent
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ tags:
+ created: "{{ resource_prefix }}-route"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: "{{ igw.gateway_id }}"
+ subnets:
+ - "{{ testing_subnet_a.subnet.id }}"
+ - "{{ testing_subnet_b.subnet.id }}"
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+- name: "remove internet gateway"
+ ec2_vpc_igw:
+ state: absent
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+- name: "remove subnet A"
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: "{{ subnet_a_cidr }}"
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+- name: "remove subnet B"
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: "{{ subnet_b_cidr }}"
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+- name: "remove the VPC"
+ ec2_vpc_net:
+ state: absent
+ name: "{{ vpc_name }}"
+ cidr_block: "{{ vpc_cidr }}"
+ tags:
+ Name: Ansible Testing VPC
+ tenancy: default
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/env_setup.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/env_setup.yml
new file mode 100644
index 00000000..7c99f807
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/env_setup.yml
@@ -0,0 +1,95 @@
+- run_once: '{{ setup_run_once | default("no") | bool }}'
+ block:
+ - name: "fetch AZ availability"
+ aws_az_info:
+ register: az_info
+ - name: "Assert that we have multiple AZs available to us"
+ assert:
+ that: az_info.availability_zones | length >= 2
+
+ - name: "pick AZs"
+ set_fact:
+ subnet_a_az: '{{ az_info.availability_zones[0].zone_name }}'
+ subnet_b_az: '{{ az_info.availability_zones[1].zone_name }}'
+
+ - name: "Create VPC for use in testing"
+ ec2_vpc_net:
+ state: present
+ name: "{{ vpc_name }}"
+ cidr_block: "{{ vpc_cidr }}"
+ tags:
+ Name: Ansible ec2_instance Testing VPC
+ tenancy: default
+ register: testing_vpc
+
+ - name: "Create internet gateway for use in testing"
+ ec2_vpc_igw:
+ state: present
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ register: igw
+
+ - name: "Create default subnet in zone A"
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: "{{ subnet_a_cidr }}"
+ az: "{{ subnet_a_az }}"
+ resource_tags:
+ Name: "{{ resource_prefix }}-subnet-a"
+ register: testing_subnet_a
+
+ - name: "Create secondary subnet in zone B"
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: "{{ subnet_b_cidr }}"
+ az: "{{ subnet_b_az }}"
+ resource_tags:
+ Name: "{{ resource_prefix }}-subnet-b"
+ register: testing_subnet_b
+
+ - name: "create routing rules"
+ ec2_vpc_route_table:
+ state: present
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ tags:
+ created: "{{ resource_prefix }}-route"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: "{{ igw.gateway_id }}"
+ subnets:
+ - "{{ testing_subnet_a.subnet.id }}"
+ - "{{ testing_subnet_b.subnet.id }}"
+
+ - name: "create a security group with the vpc"
+ ec2_group:
+ state: present
+ name: "{{ resource_prefix }}-sg"
+ description: a security group for ansible tests
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ rules:
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ register: sg
+
+ - name: "create secondary security group with the vpc"
+ ec2_group:
+ name: "{{ resource_prefix }}-sg-2"
+ description: a secondary security group for ansible tests
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ rules:
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ register: sg2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/external_resource_attach.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/external_resource_attach.yml
new file mode 100644
index 00000000..2625977f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/external_resource_attach.yml
@@ -0,0 +1,129 @@
+- block:
+ # Make custom ENIs and attach via the `network` parameter
+ - ec2_eni:
+ state: present
+ delete_on_termination: true
+ subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ security_groups:
+ - "{{ sg.group_id }}"
+ register: eni_a
+
+ - ec2_eni:
+ state: present
+ delete_on_termination: true
+ subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ security_groups:
+ - "{{ sg.group_id }}"
+ register: eni_b
+
+ - ec2_eni:
+ state: present
+ delete_on_termination: true
+ subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ security_groups:
+ - "{{ sg.group_id }}"
+ register: eni_c
+
+ - ec2_key:
+ name: "{{ resource_prefix }}_test_key"
+
+ - name: "Make instance in the testing subnet created in the test VPC"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-eni-vpc"
+ key_name: "{{ resource_prefix }}_test_key"
+ network:
+ interfaces:
+ - id: "{{ eni_a.interface.id }}"
+ image_id: "{{ ec2_ami_image }}"
+ availability_zone: '{{ subnet_b_az }}'
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ instance_type: "{{ ec2_instance_type }}"
+ wait: false
+ register: in_test_vpc
+
+ - name: "Gather {{ resource_prefix }}-test-eni-vpc info"
+ ec2_instance_info:
+ filters:
+ "tag:Name": '{{ resource_prefix }}-test-eni-vpc'
+ register: in_test_vpc_instance
+
+ - assert:
+ that:
+ - 'in_test_vpc_instance.instances.0.key_name == "{{ resource_prefix }}_test_key"'
+ - '(in_test_vpc_instance.instances.0.network_interfaces | length) == 1'
+
+ - name: "Add a second interface"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-eni-vpc"
+ network:
+ interfaces:
+ - id: "{{ eni_a.interface.id }}"
+ - id: "{{ eni_b.interface.id }}"
+ image_id: "{{ ec2_ami_image }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ instance_type: "{{ ec2_instance_type }}"
+ wait: false
+ register: add_interface
+ until: add_interface is not failed
+ ignore_errors: yes
+ retries: 10
+
+ - name: "Make instance in the testing subnet created in the test VPC(check mode)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-eni-vpc-checkmode"
+ key_name: "{{ resource_prefix }}_test_key"
+ network:
+ interfaces:
+ - id: "{{ eni_c.interface.id }}"
+ image_id: "{{ ec2_ami_image }}"
+ availability_zone: '{{ subnet_b_az }}'
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ instance_type: "{{ ec2_instance_type }}"
+ check_mode: yes
+
+ - name: "fact presented ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-eni-vpc"
+ register: presented_instance_fact
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-eni-vpc-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm existence of instance id."
+ assert:
+ that:
+ - "{{ presented_instance_fact.instances | length }} > 0"
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
+
+ always:
+ - name: "Terminate external_resource_attach instances"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: yes
+ ignore_errors: yes
+
+ - ec2_key:
+ state: absent
+ name: "{{ resource_prefix }}_test_key"
+ ignore_errors: yes
+
+ - ec2_eni:
+ state: absent
+ eni_id: '{{ item.interface.id }}'
+ ignore_errors: yes
+ with_items:
+ - '{{ eni_a }}'
+ - '{{ eni_b }}'
+ - '{{ eni_c }}'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/find_ami.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/find_ami.yml
new file mode 100644
index 00000000..5c0e61f8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/find_ami.yml
@@ -0,0 +1,15 @@
+- run_once: '{{ setup_run_once | default("no") | bool }}'
+ block:
+ - name: "Find AMI to use"
+ run_once: yes
+ ec2_ami_info:
+ owners: 'amazon'
+ filters:
+ name: '{{ ec2_ami_name }}'
+ register: ec2_amis
+ - name: "Set fact with latest AMI"
+ run_once: yes
+ vars:
+ latest_ami: '{{ ec2_amis.images | sort(attribute="creation_date") | last }}'
+ set_fact:
+ ec2_ami_image: '{{ latest_ami.image_id }}'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/iam_instance_role.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/iam_instance_role.yml
new file mode 100644
index 00000000..6e29b746
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/iam_instance_role.yml
@@ -0,0 +1,127 @@
+- block:
+ - name: "Create IAM role for test"
+ iam_role:
+ state: present
+ name: "ansible-test-sts-{{ resource_prefix }}-test-policy"
+ assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}"
+ create_instance_profile: yes
+ managed_policy:
+ - AmazonEC2ContainerServiceRole
+ register: iam_role
+
+ - name: "Create second IAM role for test"
+ iam_role:
+ state: present
+ name: "ansible-test-sts-{{ resource_prefix }}-test-policy-2"
+ assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}"
+ create_instance_profile: yes
+ managed_policy:
+ - AmazonEC2ContainerServiceRole
+ register: iam_role_2
+
+ - name: "wait 10 seconds for roles to become available"
+ wait_for:
+ timeout: 10
+ delegate_to: localhost
+
+ - name: "Make instance with an instance_role"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-instance-role"
+ image_id: "{{ ec2_ami_image }}"
+ security_groups: "{{ sg.group_id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ instance_role: "ansible-test-sts-{{ resource_prefix }}-test-policy"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ register: instance_with_role
+
+ - assert:
+ that:
+ - 'instance_with_role.instances[0].iam_instance_profile.arn == iam_role.arn.replace(":role/", ":instance-profile/")'
+
+ - name: "Make instance with an instance_role(check mode)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-instance-role-checkmode"
+ image_id: "{{ ec2_ami_image }}"
+ security_groups: "{{ sg.group_id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ instance_role: "{{ iam_role.arn.replace(':role/', ':instance-profile/') }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ check_mode: yes
+
+ - name: "fact presented ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-instance-role"
+ register: presented_instance_fact
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-instance-role-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm whether the check mode is working normally."
+ assert:
+ that:
+ - "{{ presented_instance_fact.instances | length }} > 0"
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
+
+ - name: "Update instance with new instance_role"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-instance-role"
+ image_id: "{{ ec2_ami_image }}"
+ security_groups: "{{ sg.group_id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ instance_role: "{{ iam_role_2.arn.replace(':role/', ':instance-profile/') }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ register: instance_with_updated_role
+
+ - name: "wait 10 seconds for role update to complete"
+ wait_for:
+ timeout: 10
+ delegate_to: localhost
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-instance-role"
+ register: updates_instance_info
+
+ - assert:
+ that:
+ - 'updates_instance_info.instances[0].iam_instance_profile.arn == iam_role_2.arn.replace(":role/", ":instance-profile/")'
+ - 'updates_instance_info.instances[0].instance_id == instance_with_role.instances[0].instance_id'
+
+ always:
+ - name: "Terminate iam_instance_role instances"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: yes
+ ignore_errors: yes
+
+ - name: "Delete IAM role for test"
+ iam_role:
+ state: absent
+ name: "{{ item }}"
+ assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}"
+ create_instance_profile: yes
+ managed_policy:
+ - AmazonEC2ContainerServiceRole
+ loop:
+ - "ansible-test-sts-{{ resource_prefix }}-test-policy"
+ - "ansible-test-sts-{{ resource_prefix }}-test-policy-2"
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/instance_no_wait.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/instance_no_wait.yml
new file mode 100644
index 00000000..418d7ef3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/instance_no_wait.yml
@@ -0,0 +1,68 @@
+- block:
+ - name: "New instance and don't wait for it to complete"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-no-wait"
+ image_id: "{{ ec2_ami_image }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: false
+ instance_type: "{{ ec2_instance_type }}"
+ register: in_test_vpc
+
+ - assert:
+ that:
+ - in_test_vpc is not failed
+ - in_test_vpc is changed
+ - in_test_vpc.instances is not defined
+ - in_test_vpc.instance_ids is defined
+ - in_test_vpc.instance_ids | length > 0
+
+ - name: "New instance and don't wait for it to complete ( check mode )"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-no-wait-checkmode"
+ image_id: "{{ ec2_ami_image }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: false
+ instance_type: "{{ ec2_instance_type }}"
+ check_mode: yes
+
+ - name: "Facts for ec2 test instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-no-wait"
+ register: real_instance_fact
+ until: real_instance_fact.instances | length > 0
+ retries: 10
+
+ - name: "Facts for checkmode ec2 test instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-no-wait-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm whether the check mode is working normally."
+ assert:
+ that:
+ - "{{ real_instance_fact.instances | length }} > 0"
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
+
+ - name: "Terminate instances"
+ ec2_instance:
+ state: absent
+ instance_ids: "{{ in_test_vpc.instance_ids }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+
+ always:
+ - name: "Terminate instance_no_wait instances"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: yes
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/main.yml
new file mode 100644
index 00000000..dc81199a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/main.yml
@@ -0,0 +1,55 @@
+---
+# Beware: most of our tests here are run in parallel.
+# To add new tests you'll need to add a new host to the inventory and a matching
+# '{{ inventory_hostname }}'.yml file in roles/ec2_instance/tasks/
+#
+# Please make sure you tag your instances with
+# tags:
+# "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+# And delete them based off that tag at the end of your specific set of tests
+#
+# ###############################################################################
+#
+# A Note about ec2 environment variable name preference:
+# - EC2_URL -> AWS_URL
+# - EC2_ACCESS_KEY -> AWS_ACCESS_KEY_ID -> AWS_ACCESS_KEY
+# - EC2_SECRET_KEY -> AWS_SECRET_ACCESS_KEY -> AWX_SECRET_KEY
+# - EC2_REGION -> AWS_REGION
+#
+
+- name: "Wrap up all tests and setup AWS credentials"
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ aws_config:
+ retries:
+ # Unfortunately AWSRetry doesn't support paginators and boto3's paginators
+ # don't support any configuration of the delay between retries.
+ max_attempts: 20
+ collections:
+ - amazon.aws
+ block:
+ - debug:
+ msg: "{{ inventory_hostname }} start: {{ lookup('pipe','date') }}"
+ - include_tasks: '{{ inventory_hostname }}.yml'
+ - debug:
+ msg: "{{ inventory_hostname }} finish: {{ lookup('pipe','date') }}"
+
+ always:
+ - set_fact:
+ _role_complete: True
+ - vars:
+ completed_hosts: '{{ ansible_play_hosts_all | map("extract", hostvars, "_role_complete") | list | select("defined") | list | length }}'
+ hosts_in_play: '{{ ansible_play_hosts_all | length }}'
+ debug:
+ msg: "{{ completed_hosts }} of {{ hosts_in_play }} complete"
+ - include_tasks: env_cleanup.yml
+ vars:
+ completed_hosts: '{{ ansible_play_hosts_all | map("extract", hostvars, "_role_complete") | list | select("defined") | list | length }}'
+ hosts_in_play: '{{ ansible_play_hosts_all | length }}'
+ when:
+ - aws_cleanup
+ - completed_hosts == hosts_in_play
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/security_group.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/security_group.yml
new file mode 100644
index 00000000..c0e52a5f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/security_group.yml
@@ -0,0 +1,81 @@
+- block:
+ - name: "New instance with 2 security groups"
+ ec2_instance:
+ name: "{{ resource_prefix }}-test-security-groups"
+ image_id: "{{ ec2_ami_image }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ instance_type: t2.micro
+ wait: false
+ security_groups:
+ - "{{ sg.group_id }}"
+ - "{{ sg2.group_id }}"
+ register: security_groups_test
+
+ - name: "Recreate same instance with 2 security groups ( Idempotency )"
+ ec2_instance:
+ name: "{{ resource_prefix }}-test-security-groups"
+ image_id: "{{ ec2_ami_image }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ instance_type: t2.micro
+ wait: false
+ security_groups:
+ - "{{ sg.group_id }}"
+ - "{{ sg2.group_id }}"
+ register: security_groups_test_idempotency
+
+ - name: "Gather ec2 facts to check SGs have been added"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-security-groups"
+ "instance-state-name": "running"
+ register: dual_sg_instance_facts
+ until: dual_sg_instance_facts.instances | length > 0
+ retries: 10
+
+ - name: "Remove secondary security group from instance"
+ ec2_instance:
+ name: "{{ resource_prefix }}-test-security-groups"
+ image_id: "{{ ec2_ami_image }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ instance_type: t2.micro
+ security_groups:
+ - "{{ sg.group_id }}"
+ register: remove_secondary_security_group
+
+ - name: "Gather ec2 facts to check seconday SG has been removed"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-security-groups"
+ "instance-state-name": "running"
+ register: single_sg_instance_facts
+ until: single_sg_instance_facts.instances | length > 0
+ retries: 10
+
+ - name: "Add secondary security group to instance"
+ ec2_instance:
+ name: "{{ resource_prefix }}-test-security-groups"
+ image_id: "{{ ec2_ami_image }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ instance_type: t2.micro
+ security_groups:
+ - "{{ sg.group_id }}"
+ - "{{ sg2.group_id }}"
+ register: add_secondary_security_group
+
+ - assert:
+ that:
+ - security_groups_test is not failed
+ - security_groups_test is changed
+ - security_groups_test_idempotency is not changed
+ - remove_secondary_security_group is changed
+ - single_sg_instance_facts.instances.0.security_groups | length == 1
+ - dual_sg_instance_facts.instances.0.security_groups | length == 2
+ - add_secondary_security_group is changed
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/tags_and_vpc_settings.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/tags_and_vpc_settings.yml
new file mode 100644
index 00000000..d38b53f7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/tags_and_vpc_settings.yml
@@ -0,0 +1,158 @@
+- block:
+ - name: "Make instance in the testing subnet created in the test VPC"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic-vpc-create"
+ image_id: "{{ ec2_ami_image }}"
+ user_data: |
+ #cloud-config
+ package_upgrade: true
+ package_update: true
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ Something: else
+ security_groups: "{{ sg.group_id }}"
+ network:
+ source_dest_check: false
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ wait: false
+ register: in_test_vpc
+
+ - name: "Make instance in the testing subnet created in the test VPC(check mode)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic-vpc-create-checkmode"
+ image_id: "{{ ec2_ami_image }}"
+ user_data: |
+ #cloud-config
+ package_upgrade: true
+ package_update: true
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ Something: else
+ security_groups: "{{ sg.group_id }}"
+ network:
+ source_dest_check: false
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ check_mode: yes
+
+ - name: "Try to re-make the instance, hopefully this shows changed=False"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic-vpc-create"
+ image_id: "{{ ec2_ami_image }}"
+ user_data: |
+ #cloud-config
+ package_upgrade: true
+ package_update: true
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ Something: else
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ register: remake_in_test_vpc
+ - name: "Remaking the same instance resulted in no changes"
+ assert:
+ that: not remake_in_test_vpc.changed
+ - name: "check that instance IDs match anyway"
+ assert:
+ that: 'remake_in_test_vpc.instance_ids[0] == in_test_vpc.instance_ids[0]'
+ - name: "check that source_dest_check was set to false"
+ assert:
+ that: 'not remake_in_test_vpc.instances[0].source_dest_check'
+
+ - name: "fact presented ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-basic-vpc-create"
+ register: presented_instance_fact
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-basic-vpc-create-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm whether the check mode is working normally."
+ assert:
+ that:
+ - "{{ presented_instance_fact.instances | length }} > 0"
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
+
+ - name: "Alter it by adding tags"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic-vpc-create"
+ image_id: "{{ ec2_ami_image }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ Another: thing
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ register: add_another_tag
+
+ - ec2_instance_info:
+ instance_ids: "{{ add_another_tag.instance_ids }}"
+ register: check_tags
+ - name: "Remaking the same instance resulted in no changes"
+ assert:
+ that:
+ - check_tags.instances[0].tags.Another == 'thing'
+ - check_tags.instances[0].tags.Something == 'else'
+
+ - name: "Purge a tag"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic-vpc-create"
+ image_id: "{{ ec2_ami_image }}"
+ purge_tags: true
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ Another: thing
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ instance_type: "{{ ec2_instance_type }}"
+
+ - ec2_instance_info:
+ instance_ids: "{{ add_another_tag.instance_ids }}"
+ register: check_tags
+
+ - name: "Remaking the same instance resulted in no changes"
+ assert:
+ that:
+ - "'Something' not in check_tags.instances[0].tags"
+
+ - name: "check that subnet-default public IP rule was followed"
+ assert:
+ that:
+ - check_tags.instances[0].public_dns_name == ""
+ - check_tags.instances[0].private_ip_address.startswith(subnet_b_startswith)
+ - check_tags.instances[0].subnet_id == testing_subnet_b.subnet.id
+ - name: "check that tags were applied"
+ assert:
+ that:
+ - check_tags.instances[0].tags.Name.startswith(resource_prefix)
+ - "'{{ check_tags.instances[0].state.name }}' in ['pending', 'running']"
+
+ - name: "Terminate instance"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: false
+ register: result
+ - assert:
+ that: result.changed
+
+ always:
+ - name: "Terminate tags_and_vpc_settings instances"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: yes
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/termination_protection.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/termination_protection.yml
new file mode 100644
index 00000000..418e3c39
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/termination_protection.yml
@@ -0,0 +1,184 @@
+- block:
+
+ - name: Create instance with termination protection (check mode)
+ ec2_instance:
+ name: "{{ resource_prefix }}-termination-protection"
+ image_id: "{{ ec2_ami_image }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ termination_protection: true
+ instance_type: "{{ ec2_instance_type }}"
+ state: running
+ wait: yes
+ check_mode: yes
+ register: create_instance_check_mode_results
+
+ - name: Check the returned value for the earlier task
+ assert:
+ that:
+ - "{{ create_instance_check_mode_results.changed }}"
+ - "{{ create_instance_check_mode_results.spec.DisableApiTermination }}"
+
+ - name: Create instance with termination protection
+ ec2_instance:
+ name: "{{ resource_prefix }}-termination-protection"
+ image_id: "{{ ec2_ami_image }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ termination_protection: true
+ instance_type: "{{ ec2_instance_type }}"
+ state: running
+ wait: yes
+ register: create_instance_results
+
+ - name: Check return values of the create instance task
+ assert:
+ that:
+ - "{{ create_instance_results.instances | length }} > 0"
+ - "'{{ create_instance_results.instances.0.state.name }}' == 'running'"
+ - "'{{ create_instance_results.spec.DisableApiTermination }}'"
+
+ - name: Create instance with termination protection (check mode) (idempotent)
+ ec2_instance:
+ name: "{{ resource_prefix }}-termination-protection"
+ image_id: "{{ ec2_ami_image }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ termination_protection: true
+ instance_type: "{{ ec2_instance_type }}"
+ state: running
+ wait: yes
+ check_mode: yes
+ register: create_instance_check_mode_results
+
+ - name: Check the returned value for the earlier task
+ assert:
+ that:
+ - "{{ not create_instance_check_mode_results.changed }}"
+
+ - name: Create instance with termination protection (idempotent)
+ ec2_instance:
+ name: "{{ resource_prefix }}-termination-protection"
+ image_id: "{{ ec2_ami_image }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ termination_protection: true
+ instance_type: "{{ ec2_instance_type }}"
+ state: running
+ wait: yes
+ register: create_instance_results
+
+ - name: Check return values of the create instance task
+ assert:
+ that:
+ - "{{ not create_instance_results.changed }}"
+ - "{{ create_instance_results.instances | length }} > 0"
+
+ - name: Try to terminate the instance (expected to fail)
+ ec2_instance:
+ filters:
+ tag:Name: "{{ resource_prefix }}-termination-protection"
+ state: absent
+ failed_when: "'Unable to terminate instances' not in terminate_instance_results.msg"
+ register: terminate_instance_results
+
+ # https://github.com/ansible/ansible/issues/67716
+ # Updates to termination protection in check mode has a bug (listed above)
+
+ - name: Set termination protection to false
+ ec2_instance:
+ name: "{{ resource_prefix }}-termination-protection"
+ image_id: "{{ ec2_ami_image }}"
+ termination_protection: false
+ instance_type: "{{ ec2_instance_type }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ register: set_termination_protection_results
+
+ - name: Check return value
+ assert:
+ that:
+ - "{{ set_termination_protection_results.changed }}"
+ - "{{ not set_termination_protection_results.changes[0].DisableApiTermination.Value }}"
+
+ - name: Set termination protection to false (idempotent)
+ ec2_instance:
+ name: "{{ resource_prefix }}-termination-protection"
+ image_id: "{{ ec2_ami_image }}"
+ termination_protection: false
+ instance_type: "{{ ec2_instance_type }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ register: set_termination_protection_results
+
+ - name: Check return value
+ assert:
+ that:
+ - "{{ not set_termination_protection_results.changed }}"
+
+ - name: Set termination protection to true
+ ec2_instance:
+ name: "{{ resource_prefix }}-termination-protection"
+ image_id: "{{ ec2_ami_image }}"
+ termination_protection: true
+ instance_type: "{{ ec2_instance_type }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ register: set_termination_protection_results
+
+ - name: Check return value
+ assert:
+ that:
+ - "{{ set_termination_protection_results.changed }}"
+ - "{{ set_termination_protection_results.changes[0].DisableApiTermination.Value }}"
+
+ - name: Set termination protection to true (idempotent)
+ ec2_instance:
+ name: "{{ resource_prefix }}-termination-protection"
+ image_id: "{{ ec2_ami_image }}"
+ termination_protection: true
+ instance_type: "{{ ec2_instance_type }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ register: set_termination_protection_results
+
+ - name: Check return value
+ assert:
+ that:
+ - "{{ not set_termination_protection_results.changed }}"
+
+ - name: Set termination protection to false (so we can terminate instance)
+ ec2_instance:
+ name: "{{ resource_prefix }}-termination-protection"
+ image_id: "{{ ec2_ami_image }}"
+ termination_protection: false
+ instance_type: "{{ ec2_instance_type }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ register: set_termination_protection_results
+
+ - name: Terminate the instance
+ ec2_instance:
+ filters:
+ tag:TestId: "{{ resource_prefix }}"
+ state: absent
+
+ always:
+
+ - name: Set termination protection to false (so we can terminate instance) (cleanup)
+ ec2_instance:
+ filters:
+ tag:TestId: "{{ resource_prefix }}"
+ termination_protection: false
+ ignore_errors: yes
+
+ - name: Terminate instance
+ ec2_instance:
+ filters:
+ tag:TestId: "{{ resource_prefix }}"
+ state: absent
+ wait: false
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/version_fail.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/version_fail.yml
new file mode 100644
index 00000000..67370ebe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/version_fail.yml
@@ -0,0 +1,29 @@
+- block:
+ - name: "create t3.nano with cpu options (fails gracefully)"
+ ec2_instance:
+ state: present
+ name: "ansible-test-{{ resource_prefix | regex_search('([0-9]+)$') }}-ec2"
+ image_id: "{{ ec2_ami_image }}"
+ instance_type: "t3.nano"
+ cpu_options:
+ core_count: 1
+ threads_per_core: 1
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ register: ec2_instance_cpu_options_creation
+ ignore_errors: yes
+
+ - name: "check that graceful error message is returned when creation with cpu_options and old botocore"
+ assert:
+ that:
+ - ec2_instance_cpu_options_creation.failed
+ - 'ec2_instance_cpu_options_creation.msg == "cpu_options is only supported with botocore >= 1.10.16"'
+
+ always:
+ - name: "Terminate version_fail instances"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: yes
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/version_fail_wrapper.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/version_fail_wrapper.yml
new file mode 100644
index 00000000..4513ae71
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/roles/ec2_instance/tasks/version_fail_wrapper.yml
@@ -0,0 +1,30 @@
+---
+- include_role:
+ name: 'setup_remote_tmp_dir'
+
+- set_fact:
+ virtualenv: "{{ remote_tmp_dir }}/virtualenv"
+ virtualenv_command: "{{ ansible_python_interpreter }} -m virtualenv"
+
+- set_fact:
+ virtualenv_interpreter: "{{ virtualenv }}/bin/python"
+
+- pip:
+ name: "virtualenv"
+
+- pip:
+ name:
+ - 'botocore<1.10.16'
+ - boto3
+ - coverage<5
+ virtualenv: "{{ virtualenv }}"
+ virtualenv_command: "{{ virtualenv_command }}"
+ virtualenv_site_packages: no
+
+- include_tasks: version_fail.yml
+ vars:
+ ansible_python_interpreter: "{{ virtualenv_interpreter }}"
+
+- file:
+ state: absent
+ path: "{{ virtualenv }}"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/runme.sh b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/runme.sh
new file mode 100755
index 00000000..aa324772
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_instance/runme.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+#
+# Beware: most of our tests here are run in parallel.
+# To add new tests you'll need to add a new host to the inventory and a matching
+# '{{ inventory_hostname }}'.yml file in roles/ec2_instance/tasks/
+
+
+set -eux
+
+export ANSIBLE_ROLES_PATH=../
+
+ansible-playbook main.yml -i inventory "$@"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/aliases
new file mode 100644
index 00000000..56927195
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/full_test.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/full_test.yml
new file mode 100644
index 00000000..ae375ac1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/full_test.yml
@@ -0,0 +1,5 @@
+- hosts: localhost
+ connection: local
+ environment: "{{ ansible_test.environment }}"
+ roles:
+ - ec2_launch_template
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/defaults/main.yml
new file mode 100644
index 00000000..9651b916
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/defaults/main.yml
@@ -0,0 +1,18 @@
+---
+resource_prefix: ansible-test-default-group
+ec2_ami_image:
+ # https://wiki.centos.org/Cloud/AWS collected 2018-01-10
+ ap-northeast-1: ami-571e3c30
+ ap-northeast-2: ami-97cb19f9
+ ap-south-1: ami-11f0837e
+ ap-southeast-1: ami-30318f53
+ ap-southeast-2: ami-24959b47
+ ca-central-1: ami-daeb57be
+ eu-central-1: ami-7cbc6e13
+ eu-west-1: ami-0d063c6b
+ eu-west-2: ami-c22236a6
+ sa-east-1: ami-864f2dea
+ us-east-1: ami-ae7bfdb8
+ us-east-2: ami-9cbf9bf9
+ us-west-1: ami-7c280d1c
+ us-west-2: ami-0c2aba6c
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/files/assume-role-policy.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/files/assume-role-policy.json
new file mode 100644
index 00000000..72413abd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/files/assume-role-policy.json
@@ -0,0 +1,13 @@
+{
+ "Version": "2008-10-17",
+ "Statement": [
+ {
+ "Sid": "",
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "ec2.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/cpu_options.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/cpu_options.yml
new file mode 100644
index 00000000..8d610a2e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/cpu_options.yml
@@ -0,0 +1,38 @@
+- block:
+ - name: delete a non-existent template
+ ec2_launch_template:
+ name: "{{ resource_prefix }}-not-a-real-template"
+ state: absent
+ register: del_fake_lt
+ ignore_errors: true
+ - assert:
+ that:
+ - del_fake_lt is not failed
+ - name: create c4.large instance with cpu_options
+ ec2_launch_template:
+ name: "{{ resource_prefix }}-c4large-1-threads-per-core"
+ image_id: "{{ ec2_ami_image[aws_region] }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ instance_type: c4.large
+ cpu_options:
+ core_count: 1
+ threads_per_core: 1
+ register: lt
+
+ - name: instance with cpu_options created with the right options
+ assert:
+ that:
+ - lt is success
+ - lt is changed
+ - "lt.latest_template.launch_template_data.cpu_options.core_count == 1"
+ - "lt.latest_template.launch_template_data.cpu_options.threads_per_core == 1"
+ always:
+ - name: delete the template
+ ec2_launch_template:
+ name: "{{ resource_prefix }}-c4large-1-threads-per-core"
+ state: absent
+ register: del_lt
+ retries: 10
+ until: del_lt is not failed
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/iam_instance_role.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/iam_instance_role.yml
new file mode 100644
index 00000000..5e9b7f56
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/iam_instance_role.yml
@@ -0,0 +1,104 @@
+- block:
+ - name: Create IAM role for test
+ iam_role:
+ name: "{{ resource_prefix }}-test-policy"
+ assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}"
+ state: present
+ create_instance_profile: yes
+ managed_policy:
+ - AmazonS3ReadOnlyAccess
+ register: iam_role
+
+ - name: Create second IAM role for test
+ iam_role:
+ name: "{{ resource_prefix }}-test-policy-2"
+ assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}"
+ state: present
+ create_instance_profile: yes
+ managed_policy:
+ - AmazonS3ReadOnlyAccess
+ register: iam_role_2
+
+ - name: Make instance with an instance_role
+ ec2_launch_template:
+ name: "{{ resource_prefix }}-test-instance-role"
+ image_id: "{{ ec2_ami_image[aws_region] }}"
+ instance_type: t2.micro
+ iam_instance_profile: "{{ resource_prefix }}-test-policy"
+ register: template_with_role
+
+ - assert:
+ that:
+ - 'template_with_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role.arn.replace(":role/", ":instance-profile/")'
+
+ - name: Create template again, with no change to instance_role
+ ec2_launch_template:
+ name: "{{ resource_prefix }}-test-instance-role"
+ image_id: "{{ ec2_ami_image[aws_region] }}"
+ instance_type: t2.micro
+ iam_instance_profile: "{{ resource_prefix }}-test-policy"
+ register: template_with_role
+
+ - assert:
+ that:
+ - 'template_with_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role.arn.replace(":role/", ":instance-profile/")'
+ - 'template_with_role is not changed'
+
+ - name: Update instance with new instance_role
+ ec2_launch_template:
+ name: "{{ resource_prefix }}-test-instance-role"
+ image_id: "{{ ec2_ami_image[aws_region] }}"
+ instance_type: t2.micro
+ iam_instance_profile: "{{ resource_prefix }}-test-policy-2"
+ register: template_with_updated_role
+
+ - assert:
+ that:
+ - 'template_with_updated_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role_2.arn.replace(":role/", ":instance-profile/")'
+ - 'template_with_updated_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role_2.arn.replace(":role/", ":instance-profile/")'
+ - 'template_with_role.default_template.version_number < template_with_updated_role.default_template.version_number'
+ - 'template_with_updated_role is changed'
+ - 'template_with_updated_role is not failed'
+
+ - name: Re-set with same new instance_role
+ ec2_launch_template:
+ name: "{{ resource_prefix }}-test-instance-role"
+ image_id: "{{ ec2_ami_image[aws_region] }}"
+ instance_type: t2.micro
+ iam_instance_profile: "{{ resource_prefix }}-test-policy-2"
+ register: template_with_updated_role
+
+ - assert:
+ that:
+ - 'template_with_updated_role is not changed'
+ - 'template_with_updated_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role_2.arn.replace(":role/", ":instance-profile/")'
+
+ always:
+ - name: delete launch template
+ ec2_launch_template:
+ name: "{{ resource_prefix }}-test-instance-role"
+ state: absent
+ register: lt_removed
+ until: lt_removed is not failed
+ ignore_errors: yes
+ retries: 10
+ - name: Delete IAM role for test
+ iam_role:
+ name: "{{ resource_prefix }}-test-policy"
+ assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}"
+ state: absent
+ create_instance_profile: yes
+ register: iam_removed
+ until: iam_removed is not failed
+ ignore_errors: yes
+ retries: 10
+ - name: Delete IAM role for test
+ iam_role:
+ name: "{{ resource_prefix }}-test-policy-2"
+ assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}"
+ state: absent
+ create_instance_profile: yes
+ register: iam_2_removed
+ until: iam_2_removed is not failed
+ ignore_errors: yes
+ retries: 10
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/main.yml
new file mode 100644
index 00000000..4976da27
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/main.yml
@@ -0,0 +1,24 @@
+---
+# A Note about ec2 environment variable name preference:
+# - EC2_URL -> AWS_URL
+# - EC2_ACCESS_KEY -> AWS_ACCESS_KEY_ID -> AWS_ACCESS_KEY
+# - EC2_SECRET_KEY -> AWS_SECRET_ACCESS_KEY -> AWX_SECRET_KEY
+# - EC2_REGION -> AWS_REGION
+#
+
+# - include: ../../../../../setup_ec2/tasks/common.yml module_name: ec2_instance
+
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - include_tasks: cpu_options.yml
+ - include_tasks: iam_instance_role.yml
+ - include_tasks: versions.yml
+
+ always:
+ - debug:
+ msg: teardown goes here
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/tags_and_vpc_settings.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/tags_and_vpc_settings.yml
new file mode 100644
index 00000000..7da7f770
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/tags_and_vpc_settings.yml
@@ -0,0 +1,208 @@
+- block:
+ # ============================================================
+ # set up VPC
+ - name: Create VPC for use in testing
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ cidr_block: 10.99.0.0/16
+ tags:
+ Name: Ansible ec2_instance Testing VPC
+ tenancy: default
+ register: testing_vpc
+
+ - name: Create default subnet in zone A
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: 10.99.0.0/24
+ az: "{{ aws_region }}a"
+ resource_tags:
+ Name: "{{ resource_prefix }}-subnet-a"
+ register: testing_subnet_a
+
+ - name: Create secondary subnet in zone B
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: 10.99.1.0/24
+ az: "{{ aws_region }}b"
+ resource_tags:
+ Name: "{{ resource_prefix }}-subnet-b"
+ register: testing_subnet_b
+
+ - name: create a security group with the vpc
+ ec2_group:
+ name: "{{ resource_prefix }}-sg"
+ description: a security group for ansible tests
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ rules:
+ - proto: tcp
+ ports: [22, 80]
+ cidr_ip: 0.0.0.0/0
+ register: sg
+ # TODO: switch these tests from instances
+ - assert:
+ that:
+ - 1 == 0
+ # ============================================================
+ # start subnet/sg testing
+ - name: Make instance in the testing subnet created in the test VPC
+ ec2_instance:
+ name: "{{ resource_prefix }}-test-basic-vpc-create"
+ image_id: "{{ ec2_ami_image[aws_region] }}"
+ user_data: |
+ #cloud-config
+ package_upgrade: true
+ package_update: true
+ tags:
+ TestId: "{{ resource_prefix }}"
+ Something: else
+ security_groups: "{{ sg.group_id }}"
+ network:
+ source_dest_check: false
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ instance_type: t2.micro
+ volumes:
+ - device_name: /dev/sda1
+ ebs:
+ delete_on_termination: true
+ register: in_test_vpc
+
+ - name: Try to re-make the instance, hopefully this shows changed=False
+ ec2_instance:
+ name: "{{ resource_prefix }}-test-basic-vpc-create"
+ image_id: "{{ ec2_ami_image[aws_region] }}"
+ user_data: |
+ #cloud-config
+ package_upgrade: true
+ package_update: true
+ tags:
+ TestId: "{{ resource_prefix }}"
+ Something: else
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ instance_type: t2.micro
+ register: remake_in_test_vpc
+ - name: "Remaking the same instance resulted in no changes"
+ assert:
+ that: not remake_in_test_vpc.changed
+ - name: check that instance IDs match anyway
+ assert:
+ that: 'remake_in_test_vpc.instance_ids[0] == in_test_vpc.instance_ids[0]'
+ - name: check that source_dest_check was set to false
+ assert:
+ that: 'not remake_in_test_vpc.instances[0].source_dest_check'
+
+ - name: Alter it by adding tags
+ ec2_instance:
+ name: "{{ resource_prefix }}-test-basic-vpc-create"
+ image_id: "{{ ec2_ami_image[aws_region] }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ Another: thing
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ instance_type: t2.micro
+ register: add_another_tag
+
+ - ec2_instance_info:
+ instance_ids: "{{ add_another_tag.instance_ids }}"
+ register: check_tags
+ - name: "Remaking the same instance resulted in no changes"
+ assert:
+ that:
+ - check_tags.instances[0].tags.Another == 'thing'
+ - check_tags.instances[0].tags.Something == 'else'
+
+ - name: Purge a tag
+ ec2_instance:
+ name: "{{ resource_prefix }}-test-basic-vpc-create"
+ image_id: "{{ ec2_ami_image[aws_region] }}"
+ purge_tags: true
+ tags:
+ TestId: "{{ resource_prefix }}"
+ Another: thing
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ instance_type: t2.micro
+ - ec2_instance_info:
+ instance_ids: "{{ add_another_tag.instance_ids }}"
+ register: check_tags
+ - name: "Remaking the same instance resulted in no changes"
+ assert:
+ that:
+ - "'Something' not in check_tags.instances[0].tags"
+
+ - name: Terminate instance
+ ec2_instance:
+ filters:
+ tag:TestId: "{{ resource_prefix }}"
+ state: absent
+ register: result
+ - assert:
+ that: result.changed
+
+ - name: Terminate instance
+ ec2_instance:
+ instance_ids: "{{ in_test_vpc.instance_ids }}"
+ state: absent
+ register: result
+ - assert:
+ that: not result.changed
+
+ - name: check that subnet-default public IP rule was followed
+ assert:
+ that:
+ - in_test_vpc.instances[0].public_dns_name == ""
+ - in_test_vpc.instances[0].private_ip_address.startswith("10.22.33")
+ - in_test_vpc.instances[0].subnet_id == testing_subnet_b.subnet.id
+ - name: check that tags were applied
+ assert:
+ that:
+ - in_test_vpc.instances[0].tags.Name.startswith(resource_prefix)
+ - in_test_vpc.instances[0].state.name == 'running'
+
+ always:
+ - name: remove the security group
+ ec2_group:
+ name: "{{ resource_prefix }}-sg"
+ description: a security group for ansible tests
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+ - name: remove subnet A
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: 10.99.0.0/24
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+ - name: remove subnet B
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: 10.99.1.0/24
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+ - name: remove the VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ cidr_block: 10.99.0.0/16
+ state: absent
+ tags:
+ Name: Ansible Testing VPC
+ tenancy: default
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/versions.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/versions.yml
new file mode 100644
index 00000000..9035467a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/versions.yml
@@ -0,0 +1,62 @@
+- block:
+ - name: create simple instance template
+ ec2_launch_template:
+ name: "{{ resource_prefix }}-simple"
+ image_id: "{{ ec2_ami_image[aws_region] }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ instance_type: c4.large
+ register: lt
+
+ - name: instance with cpu_options created with the right options
+ assert:
+ that:
+ - lt is success
+ - lt is changed
+ - lt.default_version == 1
+ - lt.latest_version == 1
+
+ - name: update simple instance template
+ ec2_launch_template:
+ name: "{{ resource_prefix }}-simple"
+ default_version: 1
+ image_id: "{{ ec2_ami_image[aws_region] }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ instance_type: m5.large
+ register: lt
+
+ - name: instance with cpu_options created with the right options
+ assert:
+ that:
+ - lt is success
+ - lt is changed
+ - lt.default_version == 1
+ - lt.latest_version == 2
+
+ - name: update simple instance template
+ ec2_launch_template:
+ name: "{{ resource_prefix }}-simple"
+ image_id: "{{ ec2_ami_image[aws_region] }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ instance_type: t3.medium
+ register: lt
+
+ - name: instance with cpu_options created with the right options
+ assert:
+ that:
+ - lt is success
+ - lt is changed
+ - lt.default_version == 3
+ - lt.latest_version == 3
+
+ always:
+ - name: delete the template
+ ec2_launch_template:
+ name: "{{ resource_prefix }}-simple"
+ state: absent
+ register: del_lt
+ retries: 10
+ until: del_lt is not failed
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/version_fail.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/version_fail.yml
new file mode 100644
index 00000000..02b87f4a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/playbooks/version_fail.yml
@@ -0,0 +1,36 @@
+- hosts: localhost
+ connection: local
+ environment: "{{ ansible_test.environment }}"
+ vars:
+ resource_prefix: 'ansible-testing'
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ tasks:
+ - block:
+ - name: Include vars file in roles/ec2_instance/defaults/main.yml
+ include_vars:
+ file: 'roles/ec2_launch_template/defaults/main.yml'
+
+ - name: create c4.large template (failure expected)
+ ec2_launch_template:
+ state: present
+ name: "ansible-test-{{ resource_prefix | regex_search('([0-9]+)$') }}-tpl"
+ instance_type: c4.large
+ register: ec2_lt
+ ignore_errors: yes
+
+ - name: check that graceful error message is returned when creation with cpu_options and old botocore
+ assert:
+ that:
+ - ec2_lt is failed
+ - 'ec2_lt.msg == "ec2_launch_template requires boto3 >= 1.6.0"'
+ always:
+ - name: delete the c4.large template just in case it was created
+ ec2_launch_template:
+ state: absent
+ name: "ansible-test-{{ resource_prefix | regex_search('([0-9]+)$') }}-tpl"
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/runme.sh b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/runme.sh
new file mode 100755
index 00000000..62479044
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_launch_template/runme.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/env bash
+
+set -eux
+
+export ANSIBLE_ROLES_PATH=../
+
+# Test graceful failure for older versions of botocore
+source virtualenv.sh
+pip install 'boto3<1.6.0'
+ansible-playbook -i ../../inventory -v playbooks/version_fail.yml "$@"
+
+# Run full test suite
+source virtualenv.sh
+pip install 'boto3>1.6.0'
+ansible-playbook -i ../../inventory -v playbooks/full_test.yml "$@"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/aliases
new file mode 100644
index 00000000..72a9fb4f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group4
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/defaults/main.yml
new file mode 100644
index 00000000..4d80b5d6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+# defaults file for ec2_instance
+ec2_instance_name: '{{ resource_prefix }}-node'
+ec2_instance_owner: 'integration-run-{{ resource_prefix }}'
+ec2_ami_name: "amzn-ami-hvm*"
+alarm_prefix: "ansible-test"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/tasks/env_cleanup.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/tasks/env_cleanup.yml
new file mode 100644
index 00000000..e90ddc64
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/tasks/env_cleanup.yml
@@ -0,0 +1,94 @@
+- name: remove any instances in the test VPC
+ ec2_instance:
+ filters:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+- name: remove ENIs
+ ec2_eni_info:
+ filters:
+ vpc-id: "{{ testing_vpc.vpc.id }}"
+ register: enis
+
+- name: delete all ENIs
+ ec2_eni:
+ eni_id: "{{ item.id }}"
+ state: absent
+ until: removed is not failed
+ with_items: "{{ enis.network_interfaces }}"
+ ignore_errors: yes
+ retries: 10
+
+- name: remove the security group
+ ec2_group:
+ name: "{{ resource_prefix }}-sg"
+ description: a security group for ansible tests
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+- name: remove routing rules
+ ec2_vpc_route_table:
+ state: absent
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ tags:
+ created: "{{ resource_prefix }}-route"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: "{{ igw.gateway_id }}"
+ subnets:
+ - "{{ testing_subnet_a.subnet.id }}"
+ - "{{ testing_subnet_b.subnet.id }}"
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+- name: remove internet gateway
+ ec2_vpc_igw:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+- name: remove subnet A
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: 10.22.32.0/24
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+- name: remove subnet B
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: 10.22.33.0/24
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+- name: remove the VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ cidr_block: 10.22.32.0/23
+ state: absent
+ tags:
+ Name: Ansible Testing VPC
+ tenancy: default
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/tasks/env_setup.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/tasks/env_setup.yml
new file mode 100644
index 00000000..80b49dbc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/tasks/env_setup.yml
@@ -0,0 +1,62 @@
+- name: Create VPC for use in testing
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ cidr_block: 10.22.32.0/23
+ tags:
+ Name: Ansible ec2_instance Testing VPC
+ tenancy: default
+ register: testing_vpc
+
+- name: Create internet gateway for use in testing
+ ec2_vpc_igw:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ state: present
+ register: igw
+
+- name: Create default subnet in zone A
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: 10.22.32.0/24
+ az: "{{ aws_region }}a"
+ resource_tags:
+ Name: "{{ resource_prefix }}-subnet-a"
+ register: testing_subnet_a
+
+- name: Create secondary subnet in zone B
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: 10.22.33.0/24
+ az: "{{ aws_region }}b"
+ resource_tags:
+ Name: "{{ resource_prefix }}-subnet-b"
+ register: testing_subnet_b
+
+- name: create routing rules
+ ec2_vpc_route_table:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ tags:
+ created: "{{ resource_prefix }}-route"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: "{{ igw.gateway_id }}"
+ subnets:
+ - "{{ testing_subnet_a.subnet.id }}"
+ - "{{ testing_subnet_b.subnet.id }}"
+
+- name: create a security group with the vpc
+ ec2_group:
+ name: "{{ resource_prefix }}-sg"
+ description: a security group for ansible tests
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ rules:
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ register: sg
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/tasks/main.yml
new file mode 100644
index 00000000..102747f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/tasks/main.yml
@@ -0,0 +1,232 @@
+- name: run ec2_metric_alarm tests
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ collections:
+ - amazon.aws
+
+ block:
+ - set_fact:
+ alarm_full_name: "{{ alarm_prefix }}-{{ resource_prefix }}-cpu-low"
+
+ # until there's a module to get info about alarms, awscli is needed
+ - name: install awscli
+ pip:
+ state: present
+ name: awscli
+
+ - name: set up environment for testing.
+ include_tasks: env_setup.yml
+
+ - name: get info on alarms
+ command: aws cloudwatch describe-alarms --alarm-names {{ alarm_full_name }}
+ environment:
+ AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
+ AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
+ AWS_SESSION_TOKEN: "{{ security_token | default('') }}"
+ AWS_DEFAULT_REGION: "{{ aws_region }}"
+ register: alarm_info_query
+
+ - name: Find AMI to use
+ ec2_ami_info:
+ owners: 'amazon'
+ filters:
+ name: '{{ ec2_ami_name }}'
+ register: ec2_amis
+ - set_fact:
+ ec2_ami_image: '{{ ec2_amis.images[0].image_id }}'
+
+ - name: Make instance in a default subnet of the VPC
+ ec2_instance:
+ name: "{{ resource_prefix }}-test-default-vpc"
+ image_id: "{{ec2_ami_image }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ instance_type: t2.micro
+ wait: true
+ register: ec2_instance_results
+
+ - name: create ec2 metric alarm on ec2 instance
+ ec2_metric_alarm:
+ dimensions:
+ InstanceId: "{{ ec2_instance_results.instances[0].instance_id }}"
+ state: present
+ name: "{{ alarm_full_name }}"
+ metric: "CPUUtilization"
+ namespace: "AWS/EC2"
+ treat_missing_data: missing
+ statistic: Average
+ comparison: "<="
+ threshold: 5.0
+ period: 300
+ evaluation_periods: 3
+ unit: "Percent"
+ description: "This will alarm when an instance's cpu usage average is lower than 5% for 15 minutes "
+ register: ec2_instance_metric_alarm
+
+ - name: get info on alarms
+ command: aws cloudwatch describe-alarms --alarm-names {{ alarm_full_name }}
+ environment:
+ AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
+ AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
+ AWS_SESSION_TOKEN: "{{ security_token | default('') }}"
+ AWS_DEFAULT_REGION: "{{ aws_region }}"
+ register: alarm_info_query
+
+ - name: convert it to an object
+ set_fact:
+ alarm_info: "{{ alarm_info_query.stdout |from_json }}"
+
+ - name: "verify that an alarm was created"
+ assert:
+ that:
+ - 'ec2_instance_metric_alarm.changed'
+ - 'ec2_instance_metric_alarm.alarm_arn'
+ - 'ec2_instance_metric_alarm.statistic == alarm_info["MetricAlarms"][0].Statistic'
+ - 'ec2_instance_metric_alarm.name == alarm_info["MetricAlarms"][0].AlarmName'
+ - 'ec2_instance_metric_alarm.metric== alarm_info["MetricAlarms"][0].MetricName'
+ - 'ec2_instance_metric_alarm.namespace == alarm_info["MetricAlarms"][0].Namespace'
+ - 'ec2_instance_metric_alarm.comparison == alarm_info["MetricAlarms"][0].ComparisonOperator'
+ - 'ec2_instance_metric_alarm.comparison == alarm_info["MetricAlarms"][0].ComparisonOperator'
+ - 'ec2_instance_metric_alarm.threshold == alarm_info["MetricAlarms"][0].Threshold'
+ - 'ec2_instance_metric_alarm.period == alarm_info["MetricAlarms"][0].Period'
+ - 'ec2_instance_metric_alarm.unit == alarm_info["MetricAlarms"][0].Unit'
+ - 'ec2_instance_metric_alarm.evaluation_periods == alarm_info["MetricAlarms"][0].EvaluationPeriods'
+ - 'ec2_instance_metric_alarm.description == alarm_info["MetricAlarms"][0].AlarmDescription'
+ - 'ec2_instance_metric_alarm.treat_missing_data == alarm_info["MetricAlarms"][0].TreatMissingData'
+
+ - name: create ec2 metric alarm on ec2 instance (idempotent)
+ ec2_metric_alarm:
+ dimensions:
+ InstanceId: "{{ ec2_instance_results.instances[0].instance_id }}"
+ state: present
+ name: "{{ alarm_full_name }}"
+ metric: "CPUUtilization"
+ namespace: "AWS/EC2"
+ treat_missing_data: missing
+ statistic: Average
+ comparison: "<="
+ threshold: 5.0
+ period: 300
+ evaluation_periods: 3
+ unit: "Percent"
+ description: "This will alarm when an instance's cpu usage average is lower than 5% for 15 minutes "
+ register: ec2_instance_metric_alarm_idempotent
+
+ - name: get info on alarms
+ command: aws cloudwatch describe-alarms --alarm-names {{ alarm_full_name }}
+ environment:
+ AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
+ AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
+ AWS_SESSION_TOKEN: "{{ security_token | default('') }}"
+ AWS_DEFAULT_REGION: "{{ aws_region }}"
+ register: alarm_info_query_idempotent
+
+ - name: convert it to an object
+ set_fact:
+ alarm_info_idempotent: "{{ alarm_info_query_idempotent.stdout |from_json }}"
+
+ - name: "Verify alarm does not register as changed after update"
+ assert:
+ that:
+ - not ec2_instance_metric_alarm_idempotent.changed
+
+ - name: "Verify alarm did not change after updating"
+ assert:
+ that:
+ - "alarm_info['MetricAlarms'][0]['{{item}}'] == alarm_info_idempotent['MetricAlarms'][0]['{{ item }}']"
+ with_items:
+ - AlarmArn
+ - Statistic
+ - AlarmName
+ - MetricName
+ - Namespace
+ - ComparisonOperator
+ - Threshold
+ - Period
+ - Unit
+ - EvaluationPeriods
+ - AlarmDescription
+ - TreatMissingData
+
+ - name: update alarm
+ ec2_metric_alarm:
+ dimensions:
+ InstanceId: "{{ ec2_instance_results.instances[0].instance_id }}"
+ state: present
+ name: "{{ alarm_full_name }}"
+ metric: "CPUUtilization"
+ namespace: "AWS/EC2"
+ statistic: Average
+ comparison: "<="
+ threshold: 5.0
+ period: 60
+ evaluation_periods: 3
+ unit: "Percent"
+ description: "This will alarm when an instance's cpu usage average is lower than 5% for 3 minutes "
+ register: ec2_instance_metric_alarm_update
+
+ - name: "verify that alarm registers as updated"
+ assert:
+ that:
+ - 'ec2_instance_metric_alarm.changed'
+
+ - name: "verify that properties were changed"
+ assert:
+ that:
+ - 'ec2_instance_metric_alarm_update.changed'
+ - 'ec2_instance_metric_alarm_update.period == 60' #Period should be 60, not matching old value
+ - 'ec2_instance_metric_alarm_update.alarm_arn == ec2_instance_metric_alarm.alarm_arn'
+ - 'ec2_instance_metric_alarm_update.statistic == alarm_info["MetricAlarms"][0].Statistic'
+ - 'ec2_instance_metric_alarm_update.name == alarm_info["MetricAlarms"][0].AlarmName'
+ - 'ec2_instance_metric_alarm_update.metric== alarm_info["MetricAlarms"][0].MetricName'
+ - 'ec2_instance_metric_alarm_update.namespace == alarm_info["MetricAlarms"][0].Namespace'
+ - 'ec2_instance_metric_alarm_update.statistic == alarm_info["MetricAlarms"][0].Statistic'
+ - 'ec2_instance_metric_alarm_update.comparison == alarm_info["MetricAlarms"][0].ComparisonOperator'
+ - 'ec2_instance_metric_alarm_update.threshold == alarm_info["MetricAlarms"][0].Threshold'
+ - 'ec2_instance_metric_alarm_update.unit == alarm_info["MetricAlarms"][0].Unit'
+ - 'ec2_instance_metric_alarm_update.evaluation_periods == alarm_info["MetricAlarms"][0].EvaluationPeriods'
+ - 'ec2_instance_metric_alarm_update.treat_missing_data == alarm_info["MetricAlarms"][0].TreatMissingData'
+
+ - name: try to remove the alarm
+ ec2_metric_alarm:
+ state: absent
+ name: "{{ alarm_full_name }}"
+
+ register: ec2_instance_metric_alarm_deletion
+
+ - name: Verify that the alarm reports deleted/changed
+ assert:
+ that:
+ - 'ec2_instance_metric_alarm_deletion.changed'
+
+ - name: get info on alarms
+ command: aws cloudwatch describe-alarms --alarm-names {{ alarm_full_name }}
+ environment:
+ AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
+ AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
+ AWS_SESSION_TOKEN: "{{ security_token | default('') }}"
+ AWS_DEFAULT_REGION: "{{ aws_region }}"
+ register: alarm_info_query
+
+ - name: convert it to an object
+ set_fact:
+ alarm_info: "{{ alarm_info_query.stdout |from_json }}"
+
+ - name: Verify that the alarm was deleted using cli
+ assert:
+ that:
+ - 'alarm_info["MetricAlarms"]|length == 0'
+ always:
+ - name: try to stop the ec2 instance
+ ec2_instance:
+ instance_ids: "{{ ec2_instance_results.instances[0].instance_id }}"
+ state: terminated
+ ignore_errors: yes
+
+ - include_tasks: env_cleanup.yml
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/vars/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/vars/main.yml
new file mode 100644
index 00000000..ed97d539
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_metric_alarm/vars/main.yml
@@ -0,0 +1 @@
+---
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_scaling_policy/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_scaling_policy/aliases
new file mode 100644
index 00000000..157ce0c9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_scaling_policy/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group3
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_scaling_policy/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_scaling_policy/defaults/main.yml
new file mode 100644
index 00000000..9a895bdf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_scaling_policy/defaults/main.yml
@@ -0,0 +1,3 @@
+scaling_policy_lc_name: "{{ resource_prefix }}_lc"
+scaling_policy_asg_name: "{{ resource_prefix }}_asg"
+ec2_ami_name: 'amzn2-ami-hvm-2.*-x86_64-gp2'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_scaling_policy/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_scaling_policy/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_scaling_policy/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_scaling_policy/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_scaling_policy/tasks/main.yml
new file mode 100644
index 00000000..ba6c2163
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_scaling_policy/tasks/main.yml
@@ -0,0 +1,215 @@
+---
+# __Test Outline__
+#
+# __ec2_scaling_policy__
+# create simplescaling scaling policy
+# update simplescaling scaling policy
+# remove simplescaling scaling policy
+# create stepscaling scaling policy
+# update stepscaling scaling policy
+# remove stepscaling scaling policy
+
+- module_defaults:
+ group/aws:
+ region: "{{ aws_region }}"
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ collections:
+ - amazon.aws
+ block:
+
+ - name: Find AMI to use
+ ec2_ami_info:
+ owners: 'amazon'
+ filters:
+ name: '{{ ec2_ami_name }}'
+ register: ec2_amis
+
+ - name: Set fact with latest AMI
+ vars:
+ latest_ami: '{{ ec2_amis.images | sort(attribute="creation_date") | last }}'
+ set_fact:
+ scaling_policy_image_id: '{{ latest_ami.image_id }}'
+
+ - name: create trivial launch_configuration
+ ec2_lc:
+ name: "{{ scaling_policy_lc_name }}"
+ state: present
+ instance_type: t3.nano
+ image_id: "{{ scaling_policy_image_id }}"
+
+ - name: create trivial ASG
+ ec2_asg:
+ name: "{{ scaling_policy_asg_name }}"
+ state: present
+ launch_config_name: "{{ scaling_policy_lc_name }}"
+ min_size: 0
+ max_size: 1
+ desired_capacity: 0
+
+ - name: Create Simple Scaling policy using implicit defaults
+ ec2_scaling_policy:
+ name: "{{ resource_prefix }}_simplescaling_policy"
+ asg_name: "{{ scaling_policy_asg_name }}"
+ state: present
+ adjustment_type: ChangeInCapacity
+ scaling_adjustment: 1
+ register: result
+
+ - assert:
+ that:
+ - result.policy_name == "{{ resource_prefix }}_simplescaling_policy"
+ - result.changed
+
+ - name: Update Simple Scaling policy using explicit defaults
+ ec2_scaling_policy:
+ name: "{{ resource_prefix }}_simplescaling_policy"
+ asg_name: "{{ scaling_policy_asg_name }}"
+ state: present
+ adjustment_type: ChangeInCapacity
+ scaling_adjustment: 1
+ policy_type: SimpleScaling
+ register: result
+
+ - assert:
+ that:
+ - result.policy_name == "{{ resource_prefix }}_simplescaling_policy"
+ - not result.changed
+
+ - name: min_adjustment_step is ignored with ChangeInCapacity
+ ec2_scaling_policy:
+ name: "{{ resource_prefix }}_simplescaling_policy"
+ asg_name: "{{ scaling_policy_asg_name }}"
+ state: present
+ adjustment_type: ChangeInCapacity
+ scaling_adjustment: 1
+ min_adjustment_step: 1
+ policy_type: SimpleScaling
+ register: result
+
+ - assert:
+ that:
+ - result.policy_name == "{{ resource_prefix }}_simplescaling_policy"
+ - not result.changed
+ - result.adjustment_type == "ChangeInCapacity"
+
+ - name: Change Simple Scaling policy adjustment_type to PercentChangeInCapacity
+ ec2_scaling_policy:
+ name: "{{ resource_prefix }}_simplescaling_policy"
+ asg_name: "{{ scaling_policy_asg_name }}"
+ state: present
+ adjustment_type: PercentChangeInCapacity
+ scaling_adjustment: 1
+ min_adjustment_step: 1
+ policy_type: SimpleScaling
+ register: result
+
+ - assert:
+ that:
+ - result.policy_name == "{{ resource_prefix }}_simplescaling_policy"
+ - result.changed
+ - result.adjustment_type == "PercentChangeInCapacity"
+
+ - name: Remove Simple Scaling policy
+ ec2_scaling_policy:
+ name: "{{ resource_prefix }}_simplescaling_policy"
+ asg_name: "{{ scaling_policy_asg_name }}"
+ state: absent
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Create Step Scaling policy
+ ec2_scaling_policy:
+ name: "{{ resource_prefix }}_stepscaling_policy"
+ asg_name: "{{ scaling_policy_asg_name }}"
+ state: present
+ policy_type: StepScaling
+ metric_aggregation: Maximum
+ step_adjustments:
+ - upper_bound: 20
+ scaling_adjustment: 50
+ - lower_bound: 20
+ scaling_adjustment: 100
+ adjustment_type: "PercentChangeInCapacity"
+ register: result
+
+ - assert:
+ that:
+ - result.policy_name == "{{ resource_prefix }}_stepscaling_policy"
+ - result.changed
+
+ - name: Add another step
+ ec2_scaling_policy:
+ name: "{{ resource_prefix }}_stepscaling_policy"
+ asg_name: "{{ scaling_policy_asg_name }}"
+ state: present
+ policy_type: StepScaling
+ metric_aggregation: Maximum
+ step_adjustments:
+ - upper_bound: 20
+ scaling_adjustment: 50
+ - lower_bound: 20
+ upper_bound: 40
+ scaling_adjustment: 75
+ - lower_bound: 40
+ scaling_adjustment: 100
+ adjustment_type: "PercentChangeInCapacity"
+ register: result
+
+ - assert:
+ that:
+ - result.policy_name == "{{ resource_prefix }}_stepscaling_policy"
+ - result.changed
+ - result.adjustment_type == "PercentChangeInCapacity"
+
+ - name: Remove Step Scaling policy
+ ec2_scaling_policy:
+ name: "{{ resource_prefix }}_stepscaling_policy"
+ asg_name: "{{ scaling_policy_asg_name }}"
+ state: absent
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Remove Step Scaling policy (idemopotency)
+ ec2_scaling_policy:
+ name: "{{ resource_prefix }}_stepscaling_policy"
+ asg_name: "{{ scaling_policy_asg_name }}"
+ state: absent
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - result is successful
+
+ always:
+
+ # ============================================================
+ - name: Remove the scaling policies
+ ec2_scaling_policy:
+ name: "{{ item }}"
+ state: absent
+ register: result
+ with_items:
+ - "{{ resource_prefix }}_simplescaling_policy"
+ - "{{ resource_prefix }}_stepscaling_policy"
+ ignore_errors: yes
+
+ - name: remove the ASG
+ ec2_asg:
+ name: "{{ scaling_policy_asg_name }}"
+ state: absent
+ ignore_errors: yes
+
+ - name: remove the Launch Configuration
+ ec2_lc:
+ name: "{{ scaling_policy_lc_name }}"
+ state: absent
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_transit_gateway/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_transit_gateway/aliases
new file mode 100644
index 00000000..ce6c0771
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_transit_gateway/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+shippable/aws/group2
+ec2_transit_gateway_info
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_transit_gateway/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_transit_gateway/tasks/main.yml
new file mode 100644
index 00000000..b70db393
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_transit_gateway/tasks/main.yml
@@ -0,0 +1,175 @@
+---
+# tasks file for test_ec2_transit_gateway
+
+- name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+- name: generate unique value for testing
+ set_fact:
+ tgw_description: "{{ resource_prefix }}-tgw"
+
+- block:
+ - name: test create transit gateway without permissions
+ ec2_transit_gateway:
+ description: "{{ tgw_description }}"
+ region: "{{ aws_region }}"
+ register: result
+ ignore_errors: yes
+
+ - name: assert nice message returned
+ assert:
+ that:
+ - result is failed
+ - "result.msg != 'MODULE FAILURE'"
+
+ - name: test create transit gateway without region
+ ec2_transit_gateway:
+ description: "{{ tgw_description }}"
+ register: result
+ ignore_errors: yes
+
+ - name: assert failure when called with minimal parameters but no region
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("The ec2_transit_gateway module requires a region")'
+
+ - name: test create transit gateway without tags
+ ec2_transit_gateway:
+ description: "{{ tgw_description }}"
+ <<: *aws_connection_info
+ register: create_result
+ - name: assert changed is True
+ assert:
+ that:
+ - create_result.changed == True
+
+ - name: test update transit gateway with tags by description
+ ec2_transit_gateway:
+ description: "{{ tgw_description }}"
+ tags:
+ Name: Ansible Test TGW
+ <<: *aws_connection_info
+ register: result
+ - name: assert changed is True
+ assert:
+ that:
+ - result.changed == True
+ - result.transit_gateway.tags | length == 1
+ - "'Name' in result.transit_gateway.tags"
+
+ - name: test update transit gateway with new tag and purge_tags false
+ ec2_transit_gateway:
+ transit_gateway_id: '{{ create_result.transit_gateway.transit_gateway_id }}'
+ purge_tags: False
+ tags:
+ status: ok to delete
+ <<: *aws_connection_info
+ register: result
+ - name: assert changed is True and have 2 tags
+ assert:
+ that:
+ - result.changed == True
+ - result.transit_gateway.tags | length == 2
+ - "'Name' in result.transit_gateway.tags"
+
+ - name: test update transit gateway with purge_tags true
+ ec2_transit_gateway:
+ transit_gateway_id: '{{ create_result.transit_gateway.transit_gateway_id }}'
+ purge_tags: True
+ tags:
+ status: ok to delete
+ <<: *aws_connection_info
+ register: result
+ - name: assert changed is True and TGW tag is absent
+ assert:
+ that:
+ - result.changed == True
+ - result.transit_gateway.tags | length == 1
+ - "'Name' not in result.transit_gateway.tags"
+
+ - name: test idempotence
+ ec2_transit_gateway:
+ description: "{{ tgw_description }}"
+ purge_tags: True
+ tags:
+ status: ok to delete
+ <<: *aws_connection_info
+ register: result
+ - name: assert changed is False
+ assert:
+ that:
+ - result.changed == False
+
+ # ==== Combine ec2_transit_gateway_info ======================
+ - name: test success with no parameters
+ ec2_transit_gateway_info:
+ <<: *aws_connection_info
+ register: result
+ - name: assert success with no parameters
+ assert:
+ that:
+ - 'result.changed == false'
+ - 'result.transit_gateways != []'
+
+ - name: test success with single filter
+ ec2_transit_gateway_info:
+ filters:
+ transit-gateway-id: "{{ create_result.transit_gateway.transit_gateway_id }}"
+ <<: *aws_connection_info
+ register: result
+ - name: assert success with transit_gateway_id filter
+ assert:
+ that:
+ - 'result.changed == false'
+ - 'result.transit_gateways != []'
+
+ - name: test empty result set for non-existent tgw id via filter
+ ec2_transit_gateway_info:
+ filters:
+ transit-gateway-id: tgw-00000011111111122
+ <<: *aws_connection_info
+ register: result
+ - name: assert success with transit_gateway_id filter
+ assert:
+ that:
+ - 'result.changed == false'
+ - 'result.transit_gateways == []'
+
+ - name: test NotFound exception caught and returned empty result set
+ ec2_transit_gateway_info:
+ transit_gateway_id: tgw-00000011111111122
+ <<: *aws_connection_info
+ register: result
+ - name: assert success with transit_gateway_id filter
+ assert:
+ that:
+ - 'result.changed == false'
+ - 'result.transit_gateways == []'
+
+ - name: test success with multiple filters
+ ec2_transit_gateway_info:
+ filters:
+ options.dns-support: enable
+ options.vpn-ecmp-support: enable
+ <<: *aws_connection_info
+ register: result
+ - name: assert success with transit_gateway_id filter
+ assert:
+ that:
+ - 'result.changed == false'
+ - 'result.transit_gateways != []'
+ always:
+ ###### TEARDOWN STARTS HERE ######
+ - name: delete transit gateway
+ ec2_transit_gateway:
+ description: "{{ tgw_description }}"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_egress_igw/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_egress_igw/aliases
new file mode 100644
index 00000000..6e3860be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_egress_igw/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_egress_igw/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_egress_igw/tasks/main.yml
new file mode 100644
index 00000000..b9ce40cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_egress_igw/tasks/main.yml
@@ -0,0 +1,116 @@
+---
+- name: ec2_vpc_egress_igw tests
+ collections:
+ - amazon.aws
+
+ block:
+
+ # ============================================================
+ - name: test failure with no parameters
+ ec2_vpc_egress_igw:
+ register: result
+ ignore_errors: true
+
+ - name: assert failure with no parameters
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "missing required arguments: vpc_id"'
+
+ # ============================================================
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ # ============================================================
+ - name: test failure with non-existent VPC ID
+ ec2_vpc_egress_igw:
+ state: present
+ vpc_id: vpc-012345678
+ <<: *aws_connection_info
+ register: result
+ ignore_errors: true
+
+ - name: assert failure with non-existent VPC ID
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.error.code == "InvalidVpcID.NotFound"'
+ - '"invalid vpc ID" in result.msg'
+
+ # ============================================================
+ - name: create a VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ state: present
+ cidr_block: "10.232.232.128/26"
+ <<: *aws_connection_info
+ tags:
+ Name: "{{ resource_prefix }}-vpc"
+ Description: "Created by ansible-test"
+ register: vpc_result
+
+ # ============================================================
+ - name: create egress-only internet gateway (expected changed=true)
+ ec2_vpc_egress_igw:
+ state: present
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ <<: *aws_connection_info
+ register: vpc_eigw_create
+
+ - name: assert creation happened (expected changed=true)
+ assert:
+ that:
+ - 'vpc_eigw_create'
+ - 'vpc_eigw_create.gateway_id.startswith("eigw-")'
+ - 'vpc_eigw_create.vpc_id == vpc_result.vpc.id'
+
+ # ============================================================
+ - name: attempt to recreate egress-only internet gateway on VPC (expected changed=false)
+ ec2_vpc_egress_igw:
+ state: present
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ <<: *aws_connection_info
+ register: vpc_eigw_recreate
+
+ - name: assert recreation did nothing (expected changed=false)
+ assert:
+ that:
+ - 'vpc_eigw_recreate.changed == False'
+ - 'vpc_eigw_recreate.gateway_id == vpc_eigw_create.gateway_id'
+ - 'vpc_eigw_recreate.vpc_id == vpc_eigw_create.vpc_id'
+
+ # ============================================================
+ - name: test state=absent (expected changed=true)
+ ec2_vpc_egress_igw:
+ state: absent
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ <<: *aws_connection_info
+ register: vpc_eigw_delete
+
+ - name: assert state=absent (expected changed=true)
+ assert:
+ that:
+ - 'vpc_eigw_delete.changed'
+
+ always:
+ # ============================================================
+ - name: tidy up EIGW
+ ec2_vpc_egress_igw:
+ state: absent
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ <<: *aws_connection_info
+ ignore_errors: true
+
+ - name: tidy up VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ state: absent
+ cidr_block: "10.232.232.128/26"
+ <<: *aws_connection_info
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_igw/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_igw/aliases
new file mode 100644
index 00000000..6b8a2ae5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_igw/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+shippable/aws/group2
+ec2_vpc_igw_info
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_igw/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_igw/defaults/main.yml
new file mode 100644
index 00000000..eeda091c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_igw/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+vpc_name: '{{ resource_prefix }}-vpc'
+vpc_seed: '{{ resource_prefix }}'
+vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_igw/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_igw/tasks/main.yml
new file mode 100644
index 00000000..634438c0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_igw/tasks/main.yml
@@ -0,0 +1,429 @@
+---
+- name: ec2_vpc_igw tests
+ collections:
+ - amazon.aws
+
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ # ============================================================
+ - name: Fetch IGWs in check_mode
+ ec2_vpc_igw_info:
+ register: igw_info
+ check_mode: True
+
+ - name: Assert success
+ assert:
+ that:
+ - igw_info is successful
+ - '"internet_gateways" in igw_info'
+
+ # ============================================================
+ - name: create a VPC
+ ec2_vpc_net:
+ name: "{{ vpc_name }}"
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ tags:
+ Name: "{{ resource_prefix }}-vpc"
+ Description: "Created by ansible-test"
+ register: vpc_result
+
+ - name: Assert success
+ assert:
+ that:
+ - vpc_result is successful
+
+ # ============================================================
+ - name: Search for internet gateway by VPC - no matches
+ ec2_vpc_igw_info:
+ filters:
+ attachment.vpc-id: '{{ vpc_result.vpc.id }}'
+ register: igw_info
+
+ - name: Assert success
+ assert:
+ that:
+ - igw_info is successful
+ - '"internet_gateways" in igw_info'
+
+ # ============================================================
+ - name: create internet gateway (expected changed=true) - CHECK_MODE
+ ec2_vpc_igw:
+ state: present
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ "Tag Two": 'two {{ resource_prefix }}'
+ register: vpc_igw_create
+ check_mode: yes
+
+ - name: assert creation would happen (expected changed=true) - CHECK_MODE
+ assert:
+ that:
+ - vpc_igw_create is changed
+
+ - name: create internet gateway (expected changed=true)
+ ec2_vpc_igw:
+ state: present
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ "Tag Two": 'two {{ resource_prefix }}'
+ register: vpc_igw_create
+
+ - name: assert creation happened (expected changed=true)
+ assert:
+ that:
+ - vpc_igw_create is changed
+ - 'vpc_igw_create.gateway_id.startswith("igw-")'
+ - 'vpc_igw_create.vpc_id == vpc_result.vpc.id'
+ - '"tags" in vpc_igw_create'
+ - vpc_igw_create.tags | length == 2
+ - vpc_igw_create.tags["tag_one"] == '{{ resource_prefix }} One'
+ - vpc_igw_create.tags["Tag Two"] == 'two {{ resource_prefix }}'
+ - '"gateway_id" in vpc_igw_create'
+
+ # ============================================================
+ - name: Save IDs for later
+ set_fact:
+ igw_id: '{{ vpc_igw_create.gateway_id }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+
+ # ============================================================
+ - name: Search for internet gateway by VPC
+ ec2_vpc_igw_info:
+ filters:
+ attachment.vpc-id: '{{ vpc_id }}'
+ register: igw_info
+
+ - name: 'Check standard IGW details'
+ assert:
+ that:
+ - '"internet_gateways" in igw_info'
+ - igw_info.internet_gateways | length == 1
+ - '"attachments" in current_igw'
+ - current_igw.attachments | length == 1
+ - '"state" in current_igw.attachments[0]'
+ - current_igw.attachments[0].state == "available"
+ - '"vpc_id" in current_igw.attachments[0]'
+ - current_igw.attachments[0].vpc_id == vpc_id
+ - '"internet_gateway_id" in current_igw'
+ - current_igw.internet_gateway_id == igw_id
+ - '"tags" in current_igw'
+ - current_igw.tags | length == 2
+ - '"key" in current_igw.tags[0]'
+ - '"value" in current_igw.tags[0]'
+ - '"key" in current_igw.tags[1]'
+ - '"value" in current_igw.tags[1]'
+ # Order isn't guaranteed in boto3 style, so just check the keys and
+ # values we expect are in there.
+ - current_igw.tags[0].key in ["tag_one", "Tag Two"]
+ - current_igw.tags[1].key in ["tag_one", "Tag Two"]
+ - current_igw.tags[0].value in [resource_prefix + " One", "two " + resource_prefix]
+ - current_igw.tags[1].value in [resource_prefix + " One", "two " + resource_prefix]
+ vars:
+ current_igw: '{{ igw_info.internet_gateways[0] }}'
+
+ # ============================================================
+ - name: Fetch IGW by ID
+ ec2_vpc_igw_info:
+ internet_gateway_ids: '{{ igw_id }}'
+ convert_tags: yes
+ register: igw_info
+
+ - name: 'Check standard IGW details'
+ assert:
+ that:
+ - '"internet_gateways" in igw_info'
+ - igw_info.internet_gateways | length == 1
+ - '"attachments" in current_igw'
+ - current_igw.attachments | length == 1
+ - '"state" in current_igw.attachments[0]'
+ - current_igw.attachments[0].state == "available"
+ - '"vpc_id" in current_igw.attachments[0]'
+ - current_igw.attachments[0].vpc_id == vpc_id
+ - '"internet_gateway_id" in current_igw'
+ - current_igw.internet_gateway_id == igw_id
+ - '"tags" in current_igw'
+ - current_igw.tags | length == 2
+ - '"tag_one" in current_igw.tags'
+ - '"Tag Two" in current_igw.tags'
+ - current_igw.tags["tag_one"] == '{{ resource_prefix }} One'
+ - current_igw.tags["Tag Two"] == 'two {{ resource_prefix }}'
+ vars:
+ current_igw: '{{ igw_info.internet_gateways[0] }}'
+
+ # ============================================================
+ - name: Fetch IGW by ID (list)
+ ec2_vpc_igw_info:
+ internet_gateway_ids:
+ - '{{ igw_id }}'
+ register: igw_info
+
+ - name: 'Check standard IGW details'
+ assert:
+ that:
+ - '"internet_gateways" in igw_info'
+ - igw_info.internet_gateways | length == 1
+ - '"attachments" in current_igw'
+ - current_igw.attachments | length == 1
+ - '"state" in current_igw.attachments[0]'
+ - current_igw.attachments[0].state == "available"
+ - '"vpc_id" in current_igw.attachments[0]'
+ - current_igw.attachments[0].vpc_id == vpc_id
+ - '"internet_gateway_id" in current_igw'
+ - current_igw.internet_gateway_id == igw_id
+ - '"tags" in current_igw'
+ vars:
+ current_igw: '{{ igw_info.internet_gateways[0] }}'
+
+ # ============================================================
+ - name: attempt to recreate internet gateway on VPC (expected changed=false) - CHECK_MODE
+ ec2_vpc_igw:
+ state: present
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ register: vpc_igw_recreate
+ check_mode: yes
+
+ - name: assert recreation would do nothing (expected changed=false) - CHECK_MODE
+ assert:
+ that:
+ - vpc_igw_recreate is not changed
+
+ - name: attempt to recreate internet gateway on VPC (expected changed=false)
+ ec2_vpc_igw:
+ state: present
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ register: vpc_igw_recreate
+
+ - name: assert recreation did nothing (expected changed=false)
+ assert:
+ that:
+ - vpc_igw_recreate is not changed
+ - vpc_igw_recreate.gateway_id == igw_id
+ - vpc_igw_recreate.vpc_id == vpc_id
+ - '"tags" in vpc_igw_create'
+ - vpc_igw_create.tags | length == 2
+ - vpc_igw_create.tags["tag_one"] == '{{ resource_prefix }} One'
+ - vpc_igw_create.tags["Tag Two"] == 'two {{ resource_prefix }}'
+
+ # ============================================================
+ - name: Update the tags (no change) - CHECK_MODE
+ ec2_vpc_igw:
+ state: present
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ "Tag Two": 'two {{ resource_prefix }}'
+ register: vpc_igw_recreate
+ check_mode: yes
+
+ - name: assert tag update would do nothing (expected changed=false) - CHECK_MODE
+ assert:
+ that:
+ - vpc_igw_recreate is not changed
+
+ - name: Update the tags (no change)
+ ec2_vpc_igw:
+ state: present
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ "Tag Two": 'two {{ resource_prefix }}'
+ register: vpc_igw_recreate
+
+ - name: assert tag update did nothing (expected changed=false)
+ assert:
+ that:
+ - vpc_igw_recreate is not changed
+ - vpc_igw_recreate.gateway_id == igw_id
+ - vpc_igw_recreate.vpc_id == vpc_id
+ - '"tags" in vpc_igw_recreate'
+ - vpc_igw_recreate.tags | length == 2
+ - vpc_igw_recreate.tags["tag_one"] == '{{ resource_prefix }} One'
+ - vpc_igw_recreate.tags["Tag Two"] == 'two {{ resource_prefix }}'
+
+ # ============================================================
+ - name: Update the tags - remove and add - CHECK_MODE
+ ec2_vpc_igw:
+ state: present
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ tag_three: '{{ resource_prefix }} Three'
+ "Tag Two": 'two {{ resource_prefix }}'
+ register: vpc_igw_update
+ check_mode: yes
+
+ - name: assert tag update would happen (expected changed=true) - CHECK_MODE
+ assert:
+ that:
+ - vpc_igw_update is changed
+
+ - name: Update the tags - remove and add
+ ec2_vpc_igw:
+ state: present
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ tag_three: '{{ resource_prefix }} Three'
+ "Tag Two": 'two {{ resource_prefix }}'
+ register: vpc_igw_update
+
+ - name: assert tags are updated (expected changed=true)
+ assert:
+ that:
+ - vpc_igw_update is changed
+ - vpc_igw_update.gateway_id == igw_id
+ - vpc_igw_update.vpc_id == vpc_id
+ - '"tags" in vpc_igw_update'
+ - vpc_igw_update.tags | length == 2
+ - vpc_igw_update.tags["tag_three"] == '{{ resource_prefix }} Three'
+ - vpc_igw_update.tags["Tag Two"] == 'two {{ resource_prefix }}'
+
+ # ============================================================
+ - name: Update the tags add without purge - CHECK_MODE
+ ec2_vpc_igw:
+ state: present
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ purge_tags: no
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ register: vpc_igw_update
+ check_mode: yes
+
+ - name: assert tags would be added - CHECK_MODE
+ assert:
+ that:
+ - vpc_igw_update is changed
+
+ - name: Update the tags add without purge
+ ec2_vpc_igw:
+ state: present
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ purge_tags: no
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ register: vpc_igw_update
+
+ - name: assert tags added
+ assert:
+ that:
+ - vpc_igw_update is changed
+ - vpc_igw_update.gateway_id == igw_id
+ - vpc_igw_update.vpc_id == vpc_id
+ - '"tags" in vpc_igw_update'
+ - vpc_igw_update.tags | length == 3
+ - vpc_igw_update.tags["tag_one"] == '{{ resource_prefix }} One'
+ - vpc_igw_update.tags["tag_three"] == '{{ resource_prefix }} Three'
+ - vpc_igw_update.tags["Tag Two"] == 'two {{ resource_prefix }}'
+
+ # ============================================================
+ - name: Remove all tags - CHECK_MODE
+ ec2_vpc_igw:
+ state: present
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags: {}
+ register: vpc_igw_update
+ check_mode: yes
+
+ - name: assert tags would be removed - CHECK_MODE
+ assert:
+ that:
+ - vpc_igw_update is changed
+
+ - name: Remove all tags
+ ec2_vpc_igw:
+ state: present
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags: {}
+ register: vpc_igw_update
+
+ - name: assert tags removed
+ assert:
+ that:
+ - vpc_igw_update is changed
+ - vpc_igw_update.gateway_id == igw_id
+ - vpc_igw_update.vpc_id == vpc_id
+ - '"tags" in vpc_igw_update'
+ - vpc_igw_update.tags | length == 0
+
+ # ============================================================
+ - name: test state=absent (expected changed=true) - CHECK_MODE
+ ec2_vpc_igw:
+ state: absent
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ register: vpc_igw_delete
+ check_mode: yes
+
+ - name: assert state=absent (expected changed=true) - CHECK_MODE
+ assert:
+ that:
+ - vpc_igw_delete is changed
+
+ - name: test state=absent (expected changed=true)
+ ec2_vpc_igw:
+ state: absent
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ register: vpc_igw_delete
+
+ - name: assert state=absent (expected changed=true)
+ assert:
+ that:
+ - vpc_igw_delete is changed
+
+ # ============================================================
+ - name: Fetch IGW by ID (list)
+ ec2_vpc_igw_info:
+ internet_gateway_ids:
+ - '{{ igw_id }}'
+ register: igw_info
+ ignore_errors: True
+
+ - name: 'Check IGW does not exist'
+ assert:
+ that:
+ # Deliberate choice not to change bevahiour when searching by ID
+ - igw_info is failed
+
+ # ============================================================
+ - name: test state=absent when already deleted (expected changed=false) - CHECK_MODE
+ ec2_vpc_igw:
+ state: absent
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ register: vpc_igw_delete
+ check_mode: yes
+
+ - name: assert state=absent (expected changed=false) - CHECK_MODE
+ assert:
+ that:
+ - vpc_igw_delete is not changed
+
+ - name: test state=absent when already deleted (expected changed=false)
+ ec2_vpc_igw:
+ state: absent
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ register: vpc_igw_delete
+
+ - name: assert state=absent (expected changed=false)
+ assert:
+ that:
+ - vpc_igw_delete is not changed
+
+ always:
+ # ============================================================
+ - name: tidy up IGW
+ ec2_vpc_igw:
+ state: absent
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ ignore_errors: true
+
+ - name: tidy up VPC
+ ec2_vpc_net:
+ name: "{{ vpc_name }}"
+ state: absent
+ cidr_block: "{{ vpc_cidr }}"
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/aliases
new file mode 100644
index 00000000..d82d1f9e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/aliases
@@ -0,0 +1,5 @@
+ec2_vpc_nacl_info
+cloud/aws
+shippable/aws/group2
+# https://github.com/ansible-collections/community.aws/issues/153
+unstable
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/ingress_and_egress.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/ingress_and_egress.yml
new file mode 100644
index 00000000..4eb60791
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/ingress_and_egress.yml
@@ -0,0 +1,162 @@
+# ============================================================
+
+- name: create ingress and egress rules using subnet IDs
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc_id }}"
+ name: "{{ resource_prefix }}-acl"
+ subnets: "{{ subnet_ids }}"
+ tags:
+ Created_by: "Ansible test {{ resource_prefix }}"
+ ingress:
+ - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
+ - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
+ - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
+ egress:
+ - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
+ state: 'present'
+ register: nacl
+
+- name: assert the network acl was created
+ assert:
+ that:
+ - nacl.changed
+ - nacl.nacl_id.startswith('acl-')
+
+- name: get network ACL facts
+ ec2_vpc_nacl_info:
+ nacl_ids:
+ - "{{ nacl.nacl_id }}"
+ register: nacl_facts
+
+- name: assert the nacl has the correct attributes
+ assert:
+ that:
+ - nacl_facts.nacls | length == 1
+ - nacl_facts.nacls[0].ingress | length == 3
+ - nacl_facts.nacls[0].egress | length == 1
+
+# ============================================================
+
+- name: remove an ingress rule
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc_id }}"
+ name: "{{ resource_prefix }}-acl"
+ subnets: "{{ subnet_ids }}"
+ tags:
+ Created_by: "Ansible test {{ resource_prefix }}"
+ ingress:
+ - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
+ - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
+ egress:
+ - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
+ state: 'present'
+ register: nacl
+
+- name: assert the network acl changed
+ assert:
+ that:
+ - nacl.changed
+ - nacl.nacl_id.startswith('acl-')
+
+- name: get network ACL facts
+ ec2_vpc_nacl_info:
+ nacl_ids:
+ - "{{ nacl.nacl_id }}"
+ register: nacl_facts
+
+- name: assert the nacl has the correct attributes
+ assert:
+ that:
+ - nacl_facts.nacls | length == 1
+ - nacl_facts.nacls[0].ingress | length == 2
+ - nacl_facts.nacls[0].egress | length == 1
+
+# ============================================================
+
+- name: remove the egress rule
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc_id }}"
+ name: "{{ resource_prefix }}-acl"
+ subnets: "{{ subnet_ids }}"
+ tags:
+ Created_by: "Ansible test {{ resource_prefix }}"
+ ingress:
+ - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
+ - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
+ egress: []
+ state: 'present'
+ register: nacl
+
+- name: assert the network acl changed
+ assert:
+ that:
+ - nacl.changed
+ - nacl.nacl_id.startswith('acl-')
+
+- name: get network ACL facts
+ ec2_vpc_nacl_info:
+ nacl_ids:
+ - "{{ nacl.nacl_id }}"
+ register: nacl_facts
+
+- name: assert the nacl has the correct attributes
+ assert:
+ that:
+ - nacl_facts.nacls | length == 1
+ - nacl_facts.nacls[0].ingress | length == 2
+ - nacl_facts.nacls[0].egress | length == 0
+
+# ============================================================
+
+- name: add egress rules
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc_id }}"
+ name: "{{ resource_prefix }}-acl"
+ subnets: "{{ subnet_ids }}"
+ tags:
+ Created_by: "Ansible test {{ resource_prefix }}"
+ ingress:
+ - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
+ - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
+ egress:
+ - [100, 'tcp', 'allow', '10.0.0.0/24', null, null, 22, 22]
+ - [200, 'udp', 'allow', '10.0.0.0/24', null, null, 22, 22]
+ state: 'present'
+ register: nacl
+
+- name: assert the network acl changed
+ assert:
+ that:
+ - nacl.changed
+ - nacl.nacl_id.startswith('acl-')
+
+- name: get network ACL facts
+ ec2_vpc_nacl_info:
+ nacl_ids:
+ - "{{ nacl.nacl_id }}"
+ register: nacl_facts
+
+- name: assert the nacl has the correct attributes
+ assert:
+ that:
+ - nacl_facts.nacls | length == 1
+ - nacl_facts.nacls[0].ingress | length == 2
+ - nacl_facts.nacls[0].egress | length == 2
+
+# ============================================================
+
+- name: remove the network ACL
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc_id }}"
+ name: "{{ resource_prefix }}-acl"
+ state: absent
+ register: nacl
+ until: nacl is success
+ ignore_errors: yes
+ retries: 5
+ delay: 5
+
+- name: assert nacl was removed
+ assert:
+ that:
+ - nacl.changed
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/ipv6.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/ipv6.yml
new file mode 100644
index 00000000..16b3a5aa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/ipv6.yml
@@ -0,0 +1,178 @@
+- block:
+ - name: create a VPC
+ ec2_vpc_net:
+ cidr_block: 10.230.231.0/24
+ name: "{{ resource_prefix }}-ipv6"
+ state: present
+ ipv6_cidr: yes
+ register: vpc_result
+
+ - set_fact:
+ vpc_ipv6_cidr: "{{ vpc_result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block }}"
+
+ # ============================================================
+ - name: create subnet with IPv6 (expected changed=true)
+ ec2_vpc_subnet:
+ cidr: 10.230.231.0/26
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ ipv6_cidr: "{{ vpc_ipv6_cidr | regex_replace('::/56', '::/64') }}"
+ state: present
+ tags:
+ Name: "{{ resource_prefix }}-ipv6-subnet-1"
+ register: vpc_subnet_ipv6
+
+ - name: assert creation with IPv6 happened (expected changed=true)
+ assert:
+ that:
+ - "vpc_subnet_ipv6.subnet.ipv6_cidr_block == '{{ vpc_ipv6_cidr | regex_replace('::/56', '::/64') }}'"
+
+ # ============================================================
+
+ - name: create ingress and egress rules using subnet names
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ name: "{{ resource_prefix }}-acl"
+ subnets:
+ - "{{ resource_prefix }}-ipv6-subnet-1"
+ tags:
+ Created_by: "Ansible test {{ resource_prefix }}"
+ ingress:
+ - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
+ - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
+ - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
+ egress:
+ - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
+ state: 'present'
+ register: nacl
+ - assert:
+ that:
+ - nacl.nacl_id
+
+ - set_fact:
+ nacl_id: "{{ nacl.nacl_id }}"
+
+ - name: add ipv6 entries
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ name: "{{ resource_prefix }}-acl"
+ subnets:
+ - "{{ resource_prefix }}-ipv6-subnet-1"
+ tags:
+ Created_by: "Ansible test {{ resource_prefix }}"
+ ingress:
+ - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
+ - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
+ - [205, 'ipv6-tcp', 'allow', '::/0', null, null, 80, 80]
+ - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
+ - [305, 'ipv6-icmp', 'allow', '::/0', 0, 8]
+ egress:
+ - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
+ - [105, 'all', 'allow', '::/0', null, null, null, null]
+ state: 'present'
+ register: nacl
+ # FIXME: Currently IPv6 rules are not supported - uncomment assertion when
+ # fixed (and add some nacl_info tests)
+ ignore_errors: yes
+ - name: get network ACL facts (test that it works with ipv6 entries)
+ ec2_vpc_nacl_info:
+ nacl_ids:
+ - "{{ nacl_id }}"
+ register: nacl_facts
+
+
+ #- assert:
+ # that:
+ # - nacl.changed
+ # - nacl.nacl_id == nacl_id
+
+ - name: purge ingress entries
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ name: "{{ resource_prefix }}-acl"
+ subnets:
+ - "{{ resource_prefix }}-ipv6-subnet-1"
+ tags:
+ Created_by: "Ansible test {{ resource_prefix }}"
+ ingress: []
+ egress:
+ - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
+ - [105, 'all', 'allow', '::/0', null, null, null, null]
+ state: 'present'
+ register: nacl
+ # FIXME: Currently IPv6 rules are not supported - uncomment assertion when
+ # fixed (and add some nacl_info tests)
+ ignore_errors: yes
+
+ #- assert:
+ # that:
+ # - nacl.changed
+ # - nacl.nacl_id == nacl_id
+
+ - name: purge egress entries
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ name: "{{ resource_prefix }}-acl"
+ subnets:
+ - "{{ resource_prefix }}-ipv6-subnet-1"
+ tags:
+ Created_by: "Ansible test {{ resource_prefix }}"
+ ingress: []
+ egress: []
+ state: 'present'
+ register: nacl
+
+ - assert:
+ that:
+ - nacl.changed
+
+ # ============================================================
+ - name: remove subnet ipv6 cidr (expected changed=true)
+ ec2_vpc_subnet:
+ cidr: 10.230.231.0/26
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: absent
+ register: vpc_remove_ipv6_cidr
+
+ - name: assert subnet ipv6 cidr removed (expected changed=true)
+ assert:
+ that:
+ - 'vpc_remove_ipv6_cidr.changed'
+
+ always:
+
+ ################################################
+ # TEARDOWN STARTS HERE
+ ################################################
+
+ - name: remove network ACL
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ name: "{{ resource_prefix }}-acl"
+ state: absent
+ register: removed_acl
+ until: removed_acl is success
+ retries: 5
+ delay: 5
+ ignore_errors: yes
+
+ - name: tidy up subnet
+ ec2_vpc_subnet:
+ cidr: 10.230.231.0/26
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: absent
+ register: removed_subnet
+ until: removed_subnet is success
+ retries: 5
+ delay: 5
+ ignore_errors: yes
+
+ - name: tidy up VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-ipv6"
+ state: absent
+ cidr_block: 10.230.231.0/24
+ register: removed_vpc
+ until: removed_vpc is success
+ retries: 5
+ delay: 5
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/main.yml
new file mode 100644
index 00000000..7be79895
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/main.yml
@@ -0,0 +1,172 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ collections:
+ - amazon.aws
+ block:
+
+ # ============================================================
+
+ - name: test without any parameters
+ ec2_vpc_nacl:
+ register: result
+ ignore_errors: yes
+
+ - name: assert required parameters
+ assert:
+ that:
+ - result.failed
+ - "result.msg == 'one of the following is required: name, nacl_id'"
+
+ - name: get network ACL info without any parameters
+ ec2_vpc_nacl_info:
+ register: nacl_facts
+
+ - name: assert we don't error
+ assert:
+ that:
+ - nacl_facts is succeeded
+
+ - name: get network ACL info with invalid ID
+ ec2_vpc_nacl_info:
+ nacl_ids:
+ - 'acl-000000000000'
+ register: nacl_facts
+ ignore_errors: yes
+
+ - name: assert message mentions missing ACLs
+ assert:
+ that:
+ - nacl_facts is failed
+ - '"does not exist" in nacl_facts.msg'
+
+ # ============================================================
+
+ - name: fetch AZ availability
+ aws_az_info:
+ register: az_info
+
+ - name: Assert that we have multiple AZs available to us
+ assert:
+ that: az_info.availability_zones | length >= 2
+
+ - name: pick AZs
+ set_fact:
+ az_one: '{{ az_info.availability_zones[0].zone_name }}'
+ az_two: '{{ az_info.availability_zones[1].zone_name }}'
+
+ # ============================================================
+
+ - name: create a VPC
+ ec2_vpc_net:
+ cidr_block: 10.230.230.0/24
+ name: "{{ resource_prefix }}"
+ state: present
+ register: vpc
+
+ - name: create subnets
+ ec2_vpc_subnet:
+ cidr: "{{ item.cidr }}"
+ az: "{{ item.az }}"
+ vpc_id: "{{ vpc.vpc.id }}"
+ state: present
+ tags:
+ Name: "{{ item.name }}"
+ with_items:
+ - cidr: 10.230.230.0/26
+ az: "{{ az_one }}"
+ name: "{{ resource_prefix }}-subnet-1"
+ - cidr: 10.230.230.64/26
+ az: "{{ az_two }}"
+ name: "{{ resource_prefix }}-subnet-2"
+ - cidr: 10.230.230.128/26
+ az: "{{ az_one }}"
+ name: "{{ resource_prefix }}-subnet-3"
+ - cidr: 10.230.230.192/26
+ az: "{{ az_two }}"
+ name: "{{ resource_prefix }}-subnet-4"
+ register: subnets
+
+ # ============================================================
+
+ - include_tasks: tasks/subnet_ids.yml
+ vars:
+ vpc_id: "{{ vpc.vpc.id }}"
+ subnet_ids: "{{ subnets | community.general.json_query('results[*].subnet.id') }}"
+
+ - include_tasks: tasks/subnet_names.yml
+ vars:
+ vpc_id: "{{ vpc.vpc.id }}"
+ subnet_names: "{{ subnets | community.general.json_query('results[*].subnet.tags.Name') }}"
+
+ - include_tasks: tasks/tags.yml
+ vars:
+ vpc_id: "{{ vpc.vpc.id }}"
+ subnet_ids: "{{ subnets | community.general.json_query('results[*].subnet.id') }}"
+
+ - include_tasks: tasks/ingress_and_egress.yml
+ vars:
+ vpc_id: "{{ vpc.vpc.id }}"
+ subnet_ids: "{{ subnets | community.general.json_query('results[*].subnet.id') }}"
+
+ - include_tasks: tasks/ipv6.yml
+
+ # ============================================================
+
+ always:
+
+ - name: remove network ACL
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc.vpc.id }}"
+ name: "{{ resource_prefix }}-acl"
+ state: absent
+ register: removed_acl
+ until: removed_acl is success
+ retries: 5
+ delay: 5
+ ignore_errors: yes
+
+ - name: remove subnets
+ ec2_vpc_subnet:
+ cidr: "{{ item.cidr }}"
+ az: "{{ aws_region}}{{ item.az }}"
+ vpc_id: "{{ vpc.vpc.id }}"
+ state: absent
+ tags:
+ Public: "{{ item.public | string }}"
+ Name: "{{ item.public | ternary('public', 'private') }}-{{ item.az }}"
+ with_items:
+ - cidr: 10.230.230.0/26
+ az: "a"
+ public: "True"
+ - cidr: 10.230.230.64/26
+ az: "b"
+ public: "True"
+ - cidr: 10.230.230.128/26
+ az: "a"
+ public: "False"
+ - cidr: 10.230.230.192/26
+ az: "b"
+ public: "False"
+ ignore_errors: yes
+ register: removed_subnets
+ until: removed_subnets is success
+ retries: 5
+ delay: 5
+
+ - name: remove the VPC
+ ec2_vpc_net:
+ cidr_block: 10.230.230.0/24
+ name: "{{ resource_prefix }}"
+ state: absent
+ ignore_errors: yes
+ register: removed_vpc
+ until: removed_vpc is success
+ retries: 5
+ delay: 5
+
+ # ============================================================
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/subnet_ids.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/subnet_ids.yml
new file mode 100644
index 00000000..de371d62
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/subnet_ids.yml
@@ -0,0 +1,174 @@
+# ============================================================
+
+- name: create ingress and egress rules using subnet IDs
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc_id }}"
+ name: "{{ resource_prefix }}-acl"
+ subnets: "{{ subnet_ids }}"
+ tags:
+ Created_by: "Ansible test {{ resource_prefix }}"
+ ingress:
+ - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
+ - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
+ - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
+ egress:
+ - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
+ state: 'present'
+ register: nacl
+
+- set_fact:
+ nacl_id: "{{ nacl.nacl_id }}"
+
+- name: assert the network acl was created
+ assert:
+ that:
+ - nacl.changed
+ - nacl.nacl_id.startswith('acl-')
+
+- name: get network ACL facts
+ ec2_vpc_nacl_info:
+ nacl_ids:
+ - "{{ nacl_id }}"
+ register: nacl_facts
+
+- name: assert the nacl has the correct attributes
+ assert:
+ that:
+ - nacl_facts.nacls | length == 1
+ - nacl_facts.nacls[0].nacl_id == nacl_id
+ - nacl_facts.nacls[0].subnets | length == 4
+ - nacl_facts.nacls[0].subnets | sort == subnet_ids | sort
+ - nacl_facts.nacls[0].ingress | length == 3
+ - nacl_facts.nacls[0].egress | length == 1
+ - "'{{ nacl_facts.nacls[0].tags.Name }}' == '{{ resource_prefix }}-acl'"
+
+# ============================================================
+
+- name: test idempotence
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc_id }}"
+ name: "{{ resource_prefix }}-acl"
+ subnets: "{{ subnet_ids }}"
+ tags:
+ Created_by: "Ansible test {{ resource_prefix }}"
+ ingress:
+ - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
+ - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
+ - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
+ egress:
+ - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
+ state: 'present'
+ register: nacl
+
+- name: assert the network acl already existed
+ assert:
+ that:
+ - not nacl.changed
+ - nacl.nacl_id == nacl_id
+ - nacl.nacl_id.startswith('acl-')
+
+- name: get network ACL facts
+ ec2_vpc_nacl_info:
+ nacl_ids:
+ - "{{ nacl.nacl_id }}"
+ register: nacl_facts_idem
+
+- name: assert the facts are the same as before
+ assert:
+ that:
+ - nacl_facts_idem == nacl_facts
+
+# ============================================================
+
+- name: remove a subnet from the network ACL
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc_id }}"
+ name: "{{ resource_prefix }}-acl"
+ subnets:
+ - "{{ subnet_ids[0] }}"
+ - "{{ subnet_ids[1] }}"
+ - "{{ subnet_ids[2] }}"
+ tags:
+ Created_by: "Ansible test {{ resource_prefix }}"
+ ingress:
+ - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
+ - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
+ - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
+ egress:
+ - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
+ state: 'present'
+ register: nacl
+
+- name: assert the network ACL changed
+ assert:
+ that:
+ - nacl.changed
+ - nacl.nacl_id.startswith('acl-')
+ - nacl.nacl_id == nacl_id
+
+- name: get network ACL facts
+ ec2_vpc_nacl_info:
+ nacl_id:
+ - "{{ nacl.nacl_id }}"
+ register: nacl_facts
+
+- name: assert the nacl has the correct attributes
+ assert:
+ that:
+ - nacl_facts.nacls | length == 1
+ - nacl_facts.nacls[0].nacl_id == nacl_id
+ - nacl_facts.nacls[0].subnets | length == 3
+ - subnet_ids[3] not in nacl_facts.nacls[0].subnets
+ - nacl_facts.nacls[0].ingress | length == 3
+ - nacl_facts.nacls[0].egress | length == 1
+ - "'{{ nacl_facts.nacls[0].tags.Name }}' == '{{ resource_prefix }}-acl'"
+
+# ============================================================
+
+- name: remove the network ACL
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc_id }}"
+ name: "{{ resource_prefix }}-acl"
+ state: absent
+ register: nacl
+ until: nacl is success
+ ignore_errors: yes
+ retries: 5
+ delay: 5
+
+- name: assert nacl was removed
+ assert:
+ that:
+ - nacl.changed
+
+- name: re-remove the network ACL by name (test idempotency)
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc_id }}"
+ name: "{{ resource_prefix }}-acl"
+ state: absent
+ register: nacl
+ until: nacl is success
+ ignore_errors: yes
+ retries: 5
+ delay: 5
+
+- name: assert nacl was removed
+ assert:
+ that:
+ - nacl is not changed
+
+- name: re-remove the network ACL by id (test idempotency)
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc_id }}"
+ nacl_id: "{{ nacl_id }}"
+ state: absent
+ register: nacl
+ until: nacl is success
+ ignore_errors: yes
+ retries: 5
+ delay: 5
+
+- name: assert nacl was removed
+ assert:
+ that:
+ - nacl is not changed
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/subnet_names.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/subnet_names.yml
new file mode 100644
index 00000000..5a4db04d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/subnet_names.yml
@@ -0,0 +1,140 @@
+# ============================================================
+
+- name: create ingress and egress rules using subnet names
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc_id }}"
+ name: "{{ resource_prefix }}-acl"
+ subnets: "{{ subnet_names }}"
+ tags:
+ Created_by: "Ansible test {{ resource_prefix }}"
+ ingress:
+ - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
+ - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
+ - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
+ egress:
+ - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
+ state: 'present'
+ register: nacl
+
+- set_fact:
+ nacl_id: "{{ nacl.nacl_id }}"
+
+- name: assert the network acl was created
+ assert:
+ that:
+ - nacl.changed
+ - nacl.nacl_id.startswith('acl-')
+
+- name: get network ACL facts
+ ec2_vpc_nacl_info:
+ nacl_ids:
+ - "{{ nacl_id }}"
+ register: nacl_facts
+
+- name: assert the nacl has the correct attributes
+ assert:
+ that:
+ - nacl_facts.nacls | length == 1
+ - nacl_facts.nacls[0].nacl_id == nacl_id
+ - nacl_facts.nacls[0].subnets | length == 4
+ - nacl_facts.nacls[0].ingress | length == 3
+ - nacl_facts.nacls[0].egress | length == 1
+ - "'{{ nacl_facts.nacls[0].tags.Name }}' == '{{ resource_prefix }}-acl'"
+
+# ============================================================
+
+- name: test idempotence
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc_id }}"
+ name: "{{ resource_prefix }}-acl"
+ subnets: "{{ subnet_names }}"
+ tags:
+ Created_by: "Ansible test {{ resource_prefix }}"
+ ingress:
+ - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
+ - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
+ - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
+ egress:
+ - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
+ state: 'present'
+ register: nacl
+
+- name: assert the network acl already existed
+ assert:
+ that:
+ - not nacl.changed
+ - nacl.nacl_id == nacl_id
+ - nacl.nacl_id.startswith('acl-')
+
+- name: get network ACL facts
+ ec2_vpc_nacl_info:
+ nacl_ids:
+ - "{{ nacl.nacl_id }}"
+ register: nacl_facts_idem
+
+- name: assert the facts are the same as before
+ assert:
+ that:
+ - nacl_facts_idem == nacl_facts
+
+# ============================================================
+
+- name: remove a subnet from the network ACL
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc_id }}"
+ name: "{{ resource_prefix }}-acl"
+ subnets:
+ - "{{ subnet_names[0] }}"
+ - "{{ subnet_names[1] }}"
+ - "{{ subnet_names[2] }}"
+ tags:
+ Created_by: "Ansible test {{ resource_prefix }}"
+ ingress:
+ - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
+ - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
+ - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
+ egress:
+ - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
+ state: 'present'
+ register: nacl
+
+- name: assert the network ACL changed
+ assert:
+ that:
+ - nacl.changed
+ - nacl.nacl_id == nacl_id
+ - nacl.nacl_id.startswith('acl-')
+
+- name: get network ACL facts
+ ec2_vpc_nacl_info:
+ nacl_ids:
+ - "{{ nacl.nacl_id }}"
+ register: nacl_facts
+
+- name: assert the nacl has the correct attributes
+ assert:
+ that:
+ - nacl_facts.nacls | length == 1
+ - nacl_facts.nacls[0].nacl_id == nacl_id
+ - nacl_facts.nacls[0].subnets | length == 3
+ - nacl_facts.nacls[0].ingress | length == 3
+ - nacl_facts.nacls[0].egress | length == 1
+ - "'{{ nacl_facts.nacls[0].tags.Name }}' == '{{ resource_prefix }}-acl'"
+
+# ============================================================
+
+- name: remove the network ACL
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc_id }}"
+ name: "{{ resource_prefix }}-acl"
+ state: absent
+ register: nacl
+ until: nacl is success
+ ignore_errors: yes
+ retries: 5
+ delay: 5
+
+- name: assert nacl was removed
+ assert:
+ that:
+ - nacl.changed
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/tags.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/tags.yml
new file mode 100644
index 00000000..f7847850
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nacl/tasks/tags.yml
@@ -0,0 +1,117 @@
+# ============================================================
+
+- name: create a network ACL using subnet IDs
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc_id }}"
+ name: "{{ resource_prefix }}-acl"
+ subnets: "{{ subnet_ids }}"
+ state: 'present'
+ register: nacl
+
+- name: assert the network acl was created
+ assert:
+ that:
+ - nacl.changed
+ - nacl.nacl_id.startswith('acl-')
+
+- name: get network ACL facts
+ ec2_vpc_nacl_info:
+ nacl_ids:
+ - "{{ nacl.nacl_id }}"
+ register: nacl_facts
+
+- name: assert the nacl has the correct attributes
+ assert:
+ that:
+ - nacl_facts.nacls[0].tags | length == 1
+ - "'{{ nacl_facts.nacls[0].tags.Name }}' == '{{ resource_prefix }}-acl'"
+
+# ============================================================
+
+- name: add a tag
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc_id }}"
+ name: "{{ resource_prefix }}-acl"
+ subnets: "{{ subnet_ids }}"
+ tags:
+ Created_by: "Ansible test {{ resource_prefix }}"
+ state: 'present'
+ register: nacl
+
+- name: assert the network acl changed
+ assert:
+ that:
+ - nacl.changed
+
+- name: get network ACL facts
+ ec2_vpc_nacl_info:
+ nacl_ids:
+ - "{{ nacl.nacl_id }}"
+ register: nacl_facts
+
+- name: assert the facts are the same as before
+ assert:
+ that:
+ - nacl_facts.nacls[0].tags | length == 2
+ - "'{{ nacl_facts.nacls[0].tags.Name }}' == '{{ resource_prefix }}-acl'"
+ - "'{{ nacl_facts.nacls[0].tags.Created_by }}' == 'Ansible test {{ resource_prefix }}'"
+
+- name: get network ACL facts by filter
+ ec2_vpc_nacl_info:
+ filters:
+ "tag:Created_by": "Ansible test {{ resource_prefix }}"
+ register: nacl_facts
+
+- name: assert the facts are the same as before
+ assert:
+ that:
+ - nacl_facts.nacls | length == 1
+ - nacl_facts.nacls[0].tags | length == 2
+ - "'{{ nacl_facts.nacls[0].tags.Name }}' == '{{ resource_prefix }}-acl'"
+ - "'{{ nacl_facts.nacls[0].tags.Created_by }}' == 'Ansible test {{ resource_prefix }}'"
+
+# ============================================================
+
+- name: remove a tag
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc_id }}"
+ name: "{{ resource_prefix }}-acl"
+ subnets: "{{ subnet_ids }}"
+ state: 'present'
+ register: nacl
+
+- name: assert the network acl was created
+ assert:
+ that:
+ - nacl.changed
+ - nacl.nacl_id.startswith('acl-')
+
+- name: get network ACL facts
+ ec2_vpc_nacl_info:
+ nacl_ids:
+ - "{{ nacl.nacl_id }}"
+ register: nacl_facts
+
+- name: assert the nacl has the correct attributes
+ assert:
+ that:
+ - nacl_facts.nacls[0].tags | length == 1
+ - "'{{ nacl_facts.nacls[0].tags.Name }}' == '{{ resource_prefix }}-acl'"
+
+# ============================================================
+
+- name: remove the network ACL
+ ec2_vpc_nacl:
+ vpc_id: "{{ vpc_id }}"
+ name: "{{ resource_prefix }}-acl"
+ state: absent
+ register: nacl
+ until: nacl is success
+ ignore_errors: yes
+ retries: 5
+ delay: 5
+
+- name: assert nacl was removed
+ assert:
+ that:
+ - nacl.changed
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nat_gateway/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nat_gateway/aliases
new file mode 100644
index 00000000..6e3860be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nat_gateway/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml
new file mode 100644
index 00000000..7cb7e986
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml
@@ -0,0 +1,82 @@
+# The tests for this module are incomplete.
+# The tests below were migrated from unit tests.
+# They take advantage of hard-coded results within the module to trigger both changed and unchanged responses.
+# They were migrated to maintain test coverage while removing unit tests that depended on use of TaskQueueManager.
+
+- name: Create new nat gateway with eip allocation-id
+ ec2_vpc_nat_gateway:
+ subnet_id: subnet-12345678
+ allocation_id: eipalloc-12345678
+ wait: yes
+ region: us-west-2
+ register: nat_gateway
+ check_mode: yes
+
+- assert:
+ that:
+ - nat_gateway.changed
+
+- name: Create new nat gateway with eip allocation-id
+ ec2_vpc_nat_gateway:
+ subnet_id: subnet-123456789
+ allocation_id: eipalloc-1234567
+ wait: yes
+ region: us-west-2
+ register: nat_gateway
+ check_mode: yes
+
+- assert:
+ that:
+ - not nat_gateway.changed
+
+- name: Create new nat gateway with eip address
+ ec2_vpc_nat_gateway:
+ subnet_id: subnet-12345678
+ eip_address: 55.55.55.55
+ wait: yes
+ region: us-west-2
+ register: nat_gateway
+ check_mode: yes
+
+- assert:
+ that:
+ - nat_gateway.changed
+
+- name: Create new nat gateway with eip address
+ ec2_vpc_nat_gateway:
+ subnet_id: subnet-123456789
+ eip_address: 55.55.55.55
+ wait: yes
+ region: us-west-2
+ register: nat_gateway
+ check_mode: yes
+
+- assert:
+ that:
+ - not nat_gateway.changed
+
+- name: Create new nat gateway only if one does not exist already
+ ec2_vpc_nat_gateway:
+ if_exist_do_not_create: yes
+ subnet_id: subnet-123456789
+ wait: yes
+ region: us-west-2
+ register: nat_gateway
+ check_mode: yes
+
+- assert:
+ that:
+ - not nat_gateway.changed
+
+- name: Delete Nat Gateway
+ ec2_vpc_nat_gateway:
+ nat_gateway_id: nat-123456789
+ state: absent
+ wait: yes
+ region: us-west-2
+ register: nat_gateway
+ check_mode: yes
+
+- assert:
+ that:
+ - nat_gateway.changed
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_route_table/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_route_table/aliases
new file mode 100644
index 00000000..e4da78b0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_route_table/aliases
@@ -0,0 +1,4 @@
+cloud/aws
+shippable/aws/group2
+unstable
+ec2_vpc_route_table_info
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_route_table/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_route_table/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_route_table/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_route_table/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_route_table/tasks/main.yml
new file mode 100644
index 00000000..c3e4daf0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_route_table/tasks/main.yml
@@ -0,0 +1,761 @@
+- name: ec2_vpc_net tests
+ collections:
+ - amazon.aws
+
+ block:
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: create VPC
+ ec2_vpc_net:
+ cidr_block: 10.228.228.0/22
+ name: "{{ resource_prefix }}_vpc"
+ state: present
+ <<: *aws_connection_info
+ register: vpc
+
+ - name: create public subnet
+ ec2_vpc_subnet:
+ cidr: "{{ item.cidr }}"
+ az: "{{ aws_region}}{{ item.az }}"
+ vpc_id: "{{ vpc.vpc.id }}"
+ state: present
+ tags:
+ Public: "{{ item.public|string }}"
+ Name: "{{ (item.public|bool)|ternary('public', 'private') }}-{{ item.az }}"
+ <<: *aws_connection_info
+ with_items:
+ - cidr: 10.228.228.0/24
+ az: "a"
+ public: "True"
+ - cidr: 10.228.229.0/24
+ az: "b"
+ public: "True"
+ - cidr: 10.228.230.0/24
+ az: "a"
+ public: "False"
+ - cidr: 10.228.231.0/24
+ az: "b"
+ public: "False"
+ register: subnets
+
+ - ec2_vpc_subnet_info:
+ filters:
+ vpc-id: "{{ vpc.vpc.id }}"
+ <<: *aws_connection_info
+ register: vpc_subnets
+
+ - name: create IGW
+ ec2_vpc_igw:
+ vpc_id: "{{ vpc.vpc.id }}"
+ <<: *aws_connection_info
+
+ - name: create NAT GW
+ ec2_vpc_nat_gateway:
+ if_exist_do_not_create: yes
+ wait: yes
+ subnet_id: "{{ subnets.results[0].subnet.id }}"
+ <<: *aws_connection_info
+ register: nat_gateway
+
+ - name: CHECK MODE - route table should be created
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: "true"
+ Name: "Public route table"
+ <<: *aws_connection_info
+ check_mode: true
+ register: check_mode_results
+
+ - name: assert that the public route table would be created
+ assert:
+ that:
+ - check_mode_results.changed
+
+ - name: create public route table
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: "true"
+ Name: "Public route table"
+ <<: *aws_connection_info
+ register: create_public_table
+
+ - name: assert that public route table has an id
+ assert:
+ that:
+ # - create_public_table.changed
+ - "create_public_table.route_table.id.startswith('rtb-')"
+ - "'Public' in create_public_table.route_table.tags and create_public_table.route_table.tags['Public'] == 'true'"
+ - create_public_table.route_table.routes|length == 1
+ - create_public_table.route_table.associations|length == 0
+
+ - name: CHECK MODE - route table should already exist
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: "true"
+ Name: "Public route table"
+ <<: *aws_connection_info
+ check_mode: True
+ register: check_mode_results
+
+ - name: assert the table already exists
+ assert:
+ that:
+ - not check_mode_results.changed
+
+ - name: recreate public route table
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: "true"
+ Name: "Public route table"
+ <<: *aws_connection_info
+ register: recreate_public_route_table
+
+ - name: assert that public route table did not change
+ assert:
+ that:
+ - not recreate_public_route_table.changed
+
+ - name: CHECK MODE - add route to public route table
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: "true"
+ Name: "Public route table"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ <<: *aws_connection_info
+ check_mode: True
+ register: check_mode_results
+
+ - name: assert a route would be added
+ assert:
+ that:
+ - check_mode_results.changed
+
+ - name: add a route to public route table
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: "true"
+ Name: "Public route table"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ <<: *aws_connection_info
+ register: add_routes
+
+ - name: assert route table contains new route
+ assert:
+ that:
+ - add_routes.changed
+ - add_routes.route_table.routes|length == 2
+
+ - name: CHECK MODE - add subnets to public route table
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: "true"
+ Name: "Public route table"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ subnets: "{{ vpc_subnets|community.general.json_query('subnets[?tags.Public == `True`].id') }}"
+ <<: *aws_connection_info
+ check_mode: True
+ register: check_mode_results
+
+ - name: assert the subnets would be added to the route table
+ assert:
+ that:
+ - check_mode_results.changed
+
+ - name: add subnets to public route table
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: "true"
+ Name: "Public route table"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ subnets: "{{ vpc_subnets|community.general.json_query('subnets[?tags.Public == `True`].id') }}"
+ <<: *aws_connection_info
+ register: add_subnets
+
+ - name: assert route table contains subnets
+ assert:
+ that:
+ - add_subnets.changed
+ - add_subnets.route_table.associations|length == 2
+
+ - name: add a route to public route table
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: "true"
+ Name: "Public route table"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ <<: *aws_connection_info
+ register: add_routes
+
+ - name: CHECK MODE - no routes but purge_routes set to false
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: "true"
+ Name: "Public route table"
+ purge_routes: no
+ subnets: "{{ vpc_subnets|community.general.json_query('subnets[?tags.Public == `True`].id') }}"
+ <<: *aws_connection_info
+ check_mode: True
+ register: check_mode_results
+
+ - name: assert no routes would be removed
+ assert:
+ that:
+ - not check_mode_results.changed
+
+ - name: rerun with purge_routes set to false
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: "true"
+ Name: "Public route table"
+ purge_routes: no
+ subnets: "{{ vpc_subnets|community.general.json_query('subnets[?tags.Public == `True`].id') }}"
+ <<: *aws_connection_info
+ register: no_purge_routes
+
+ - name: assert route table still has routes
+ assert:
+ that:
+ - not no_purge_routes.changed
+ - no_purge_routes.route_table.routes|length == 2
+ - no_purge_routes.route_table.associations|length == 2
+
+ - name: rerun with purge_subnets set to false
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: "true"
+ Name: "Public route table"
+ purge_subnets: no
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ <<: *aws_connection_info
+ register: no_purge_subnets
+
+ - name: assert route table still has subnets
+ assert:
+ that:
+ - not no_purge_subnets.changed
+ - no_purge_subnets.route_table.routes|length == 2
+ - no_purge_subnets.route_table.associations|length == 2
+
+ - name: rerun with purge_tags not set (implicitly false)
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ lookup: id
+ route_table_id: "{{ create_public_table.route_table.id }}"
+ subnets: "{{ vpc_subnets|community.general.json_query('subnets[?tags.Public == `True`].id') }}"
+ <<: *aws_connection_info
+ register: no_purge_tags
+
+ - name: assert route table still has tags
+ assert:
+ that:
+ - not no_purge_tags.changed
+ - "'Public' in no_purge_tags.route_table.tags and no_purge_tags.route_table.tags['Public'] == 'true'"
+
+ - name: CHECK MODE - purge subnets
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ subnets: []
+ tags:
+ Public: "true"
+ Name: "Public route table"
+ <<: *aws_connection_info
+ check_mode: True
+ register: check_mode_results
+
+ - name: assert subnets would be removed
+ assert:
+ that:
+ - check_mode_results.changed
+
+ - name: purge subnets
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ subnets: []
+ tags:
+ Public: "true"
+ Name: "Public route table"
+ <<: *aws_connection_info
+ register: purge_subnets
+
+ - name: assert purge subnets worked
+ assert:
+ that:
+ - purge_subnets.changed
+ - purge_subnets.route_table.associations|length == 0
+ - purge_subnets.route_table.id == create_public_table.route_table.id
+
+ - name: CHECK MODE - purge routes
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: "true"
+ Name: "Public route table"
+ <<: *aws_connection_info
+ routes: []
+ check_mode: True
+ register: check_mode_results
+
+ - name: assert routes would be removed
+ assert:
+ that:
+ - check_mode_results.changed
+
+ - name: add subnets by cidr to public route table
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ subnets: "{{ vpc_subnets|community.general.json_query('subnets[?tags.Public == `True`].cidr_block') }}"
+ lookup: id
+ route_table_id: "{{ create_public_table.route_table.id }}"
+ <<: *aws_connection_info
+ register: add_subnets_cidr
+
+ - name: assert route table contains subnets added by cidr
+ assert:
+ that:
+ - add_subnets_cidr.changed
+ - add_subnets_cidr.route_table.associations|length == 2
+
+ - name: purge subnets added by cidr
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ subnets: []
+ lookup: id
+ route_table_id: "{{ create_public_table.route_table.id }}"
+ <<: *aws_connection_info
+ register: purge_subnets_cidr
+
+ - name: assert purge subnets added by cidr worked
+ assert:
+ that:
+ - purge_subnets_cidr.changed
+ - purge_subnets_cidr.route_table.associations|length == 0
+
+ - name: add subnets by name to public route table
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ subnets: "{{ vpc_subnets|community.general.json_query('subnets[?tags.Public == `True`].tags.Name') }}"
+ lookup: id
+ route_table_id: "{{ create_public_table.route_table.id }}"
+ <<: *aws_connection_info
+ register: add_subnets_name
+
+ - name: assert route table contains subnets added by name
+ assert:
+ that:
+ - add_subnets_name.changed
+ - add_subnets_name.route_table.associations|length == 2
+
+ - name: purge subnets added by name
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ subnets: []
+ lookup: id
+ route_table_id: "{{ create_public_table.route_table.id }}"
+ <<: *aws_connection_info
+ register: purge_subnets_name
+
+ - name: assert purge subnets added by name worked
+ assert:
+ that:
+ - purge_subnets_name.changed
+ - purge_subnets_name.route_table.associations|length == 0
+
+ - name: purge routes
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: "true"
+ Name: "Public route table"
+ <<: *aws_connection_info
+ routes: []
+ register: purge_routes
+
+ - name: assert purge routes worked
+ assert:
+ that:
+ - purge_routes.changed
+ - purge_routes.route_table.routes|length == 1
+ - purge_routes.route_table.id == create_public_table.route_table.id
+
+ - name: CHECK MODE - update tags
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ route_table_id: "{{ create_public_table.route_table.id }}"
+ lookup: id
+ purge_tags: yes
+ tags:
+ Name: Public route table
+ Updated: new_tag
+ <<: *aws_connection_info
+ check_mode: True
+ register: check_mode_results
+
+ - name: assert tags would be changed
+ assert:
+ that:
+ - check_mode_results.changed
+
+ - name: update tags
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ route_table_id: "{{ create_public_table.route_table.id }}"
+ lookup: id
+ purge_tags: yes
+ tags:
+ Name: Public route table
+ Updated: new_tag
+ <<: *aws_connection_info
+ register: update_tags
+
+ - name: assert update tags worked
+ assert:
+ that:
+ - update_tags.changed
+ - "'Updated' in update_tags.route_table.tags and update_tags.route_table.tags['Updated'] == 'new_tag'"
+ - "'Public' not in update_tags.route_table.tags"
+
+ - name: create NAT GW
+ ec2_vpc_nat_gateway:
+ if_exist_do_not_create: yes
+ wait: yes
+ subnet_id: "{{ subnets.results[0].subnet.id }}"
+ <<: *aws_connection_info
+ register: nat_gateway
+
+ - name: CHECK MODE - create private route table
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: "false"
+ Name: "Private route table"
+ routes:
+ - gateway_id: "{{ nat_gateway.nat_gateway_id }}"
+ dest: 0.0.0.0/0
+ subnets: "{{ vpc_subnets|community.general.json_query('subnets[?tags.Public == `False`].id') }}"
+ <<: *aws_connection_info
+ check_mode: True
+ register: check_mode_results
+
+ - name: assert the route table would be created
+ assert:
+ that:
+ - check_mode_results.changed
+
+ - name: create private route table
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: "false"
+ Name: "Private route table"
+ routes:
+ - gateway_id: "{{ nat_gateway.nat_gateway_id }}"
+ dest: 0.0.0.0/0
+ subnets: "{{ vpc_subnets|community.general.json_query('subnets[?tags.Public == `False`].id') }}"
+ <<: *aws_connection_info
+ register: create_private_table
+
+ - name: assert creating private route table worked
+ assert:
+ that:
+ - create_private_table.changed
+ - create_private_table.route_table.id != create_public_table.route_table.id
+ - "'Public' in create_private_table.route_table.tags"
+
+ - name: CHECK MODE - destroy public route table by tags
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ state: absent
+ tags:
+ Updated: new_tag
+ Name: Public route table
+ <<: *aws_connection_info
+ check_mode: True
+ register: check_mode_results
+
+ - name: assert the route table would be deleted
+ assert:
+ that:
+ check_mode_results.changed
+
+ - name: destroy public route table by tags
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ state: absent
+ tags:
+ Updated: new_tag
+ Name: Public route table
+ <<: *aws_connection_info
+ register: destroy_table
+
+ - name: assert destroy table worked
+ assert:
+ that:
+ - destroy_table.changed
+
+ - name: CHECK MODE - redestroy public route table
+ ec2_vpc_route_table:
+ route_table_id: "{{ create_public_table.route_table.id }}"
+ lookup: id
+ state: absent
+ <<: *aws_connection_info
+ check_mode: True
+ register: check_mode_results
+
+ - name: assert the public route table does not exist
+ assert:
+ that:
+ - not check_mode_results.changed
+
+ - name: redestroy public route table
+ ec2_vpc_route_table:
+ route_table_id: "{{ create_public_table.route_table.id }}"
+ lookup: id
+ state: absent
+ <<: *aws_connection_info
+ register: redestroy_table
+
+ - name: assert redestroy table worked
+ assert:
+ that:
+ - not redestroy_table.changed
+
+ - name: destroy NAT GW
+ ec2_vpc_nat_gateway:
+ state: absent
+ wait: yes
+ release_eip: yes
+ subnet_id: "{{ subnets.results[0].subnet.id }}"
+ nat_gateway_id: "{{ nat_gateway.nat_gateway_id }}"
+ <<: *aws_connection_info
+ register: nat_gateway
+
+ - name: show route table info, get table using route-table-id
+ ec2_vpc_route_table_info:
+ filters:
+ route-table-id: "{{ create_private_table.route_table.id }}"
+ <<: *aws_connection_info
+ register: route_table_info
+
+ - name: assert route_table_info has correct attributes
+ assert:
+ that:
+ - '"route_tables" in route_table_info'
+ - 'route_table_info.route_tables | length == 1'
+ - '"id" in route_table_info.route_tables[0]'
+ - '"routes" in route_table_info.route_tables[0]'
+ - '"associations" in route_table_info.route_tables[0]'
+ - '"tags" in route_table_info.route_tables[0]'
+ - '"vpc_id" in route_table_info.route_tables[0]'
+ - 'route_table_info.route_tables[0].id == create_private_table.route_table.id'
+
+ - name: show route table info, get table using tags
+ ec2_vpc_route_table_info:
+ filters:
+ "tag:Public": "false"
+ "tag:Name": "Private route table"
+ vpc-id: "{{ vpc.vpc.id }}"
+ <<: *aws_connection_info
+ register: route_table_info
+
+ - name: assert route_table_info has correct tags
+ assert:
+ that:
+ - 'route_table_info.route_tables | length == 1'
+ - '"tags" in route_table_info.route_tables[0]'
+ - '"Public" in route_table_info.route_tables[0].tags and route_table_info.route_tables[0].tags["Public"] == "false"'
+ - '"Name" in route_table_info.route_tables[0].tags and route_table_info.route_tables[0].tags["Name"] == "Private route table"'
+
+ - name: create NAT GW
+ ec2_vpc_nat_gateway:
+ if_exist_do_not_create: yes
+ wait: yes
+ subnet_id: "{{ subnets.results[0].subnet.id }}"
+ <<: *aws_connection_info
+ register: nat_gateway
+
+ - name: show route table info
+ ec2_vpc_route_table_info:
+ filters:
+ route-table-id: "{{ create_private_table.route_table.id }}"
+ <<: *aws_connection_info
+
+ - name: recreate private route table with new NAT GW
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: "false"
+ Name: "Private route table"
+ routes:
+ - nat_gateway_id: "{{ nat_gateway.nat_gateway_id }}"
+ dest: 0.0.0.0/0
+ subnets: "{{ vpc_subnets|community.general.json_query('subnets[?tags.Public == `False`].id') }}"
+ <<: *aws_connection_info
+ register: recreate_private_table
+
+ - name: assert creating private route table worked
+ assert:
+ that:
+ - recreate_private_table.changed
+ - recreate_private_table.route_table.id != create_public_table.route_table.id
+
+ - name: create a VPC endpoint to test ec2_vpc_route_table ignores it
+ ec2_vpc_endpoint:
+ state: present
+ vpc_id: "{{ vpc.vpc.id }}"
+ service: "com.amazonaws.{{ aws_region }}.s3"
+ route_table_ids:
+ - "{{ recreate_private_table.route_table.route_table_id }}"
+ <<: *aws_connection_info
+ register: vpc_endpoint
+
+ - name: purge routes
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: "false"
+ Name: "Private route table"
+ routes:
+ - nat_gateway_id: "{{ nat_gateway.nat_gateway_id }}"
+ dest: 0.0.0.0/0
+ subnets: "{{ vpc_subnets|community.general.json_query('subnets[?tags.Public == `False`].id') }}"
+ purge_routes: true
+ <<: *aws_connection_info
+ register: result
+
+ - name: Get endpoint infos to verify that it wasn't purged from the route table
+ ec2_vpc_endpoint_info:
+ query: endpoints
+ vpc_endpoint_ids:
+ - "{{ vpc_endpoint.result.vpc_endpoint_id }}"
+ <<: *aws_connection_info
+ register: endpoint_details
+
+ - name: assert the route table is associated with the VPC endpoint
+ assert:
+ that:
+ - endpoint_details.vpc_endpoints[0].route_table_ids[0] == recreate_private_table.route_table.route_table_id
+
+ always:
+ #############################################################################
+ # TEAR DOWN STARTS HERE
+ #############################################################################
+ - name: remove the VPC endpoint
+ ec2_vpc_endpoint:
+ state: absent
+ vpc_endpoint_id: "{{ vpc_endpoint.result.vpc_endpoint_id }}"
+ <<: *aws_connection_info
+ when: vpc_endpoint is defined
+ ignore_errors: yes
+
+ - name: destroy route tables
+ ec2_vpc_route_table:
+ route_table_id: "{{ item.route_table.id }}"
+ lookup: id
+ state: absent
+ <<: *aws_connection_info
+ with_items:
+ - "{{ create_public_table|default() }}"
+ - "{{ create_private_table|default() }}"
+ when: item and not item.failed
+ ignore_errors: yes
+
+ - name: destroy NAT GW
+ ec2_vpc_nat_gateway:
+ state: absent
+ wait: yes
+ release_eip: yes
+ subnet_id: "{{ subnets.results[0].subnet.id }}"
+ nat_gateway_id: "{{ nat_gateway.nat_gateway_id }}"
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: destroy IGW
+ ec2_vpc_igw:
+ vpc_id: "{{ vpc.vpc.id }}"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: destroy subnets
+ ec2_vpc_subnet:
+ cidr: "{{ item.cidr }}"
+ vpc_id: "{{ vpc.vpc.id }}"
+ state: absent
+ <<: *aws_connection_info
+ with_items:
+ - cidr: 10.228.228.0/24
+ - cidr: 10.228.229.0/24
+ - cidr: 10.228.230.0/24
+ - cidr: 10.228.231.0/24
+ ignore_errors: yes
+
+ # FIXME: ec2_vpc_nat_gateway should take care of this, but clearly doesn't always
+ - name: ensure EIP is actually released
+ ec2_eip:
+ state: absent
+ device_id: "{{ item.network_interface_id }}"
+ in_vpc: yes
+ <<: *aws_connection_info
+ with_items: "{{ nat_gateway.nat_gateway_addresses }}"
+ ignore_errors: yes
+
+ - name: destroy VPC
+ ec2_vpc_net:
+ cidr_block: 10.228.228.0/22
+ name: "{{ resource_prefix }}_vpc"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_vgw/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_vgw/aliases
new file mode 100644
index 00000000..0b8a330a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_vgw/aliases
@@ -0,0 +1,4 @@
+cloud/aws
+shippable/aws/group2
+# https://github.com/ansible-collections/community.aws/issues/154
+unstable
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_vgw/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_vgw/tasks/main.yml
new file mode 100644
index 00000000..641f6563
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_vgw/tasks/main.yml
@@ -0,0 +1,174 @@
+---
+- name: ec2_vpc_net tests
+ collections:
+ - amazon.aws
+ block:
+
+ # ============================================================
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ # ============================================================
+ - debug: msg="Setting up test dependencies"
+
+ - name: create a VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc-{{ item }}"
+ state: present
+ cidr_block: "10.0.0.0/26"
+ <<: *aws_connection_info
+ tags:
+ Name: "{{ resource_prefix }}-vpc-{{ item }}"
+ Description: "Created by ansible-test"
+ register: vpc_result
+ loop: [1, 2]
+
+ - name: use set fact for vpc ids
+ set_fact:
+ vpc_id_1: '{{ vpc_result.results.0.vpc.id }}'
+ vpc_id_2: '{{ vpc_result.results.1.vpc.id }}'
+
+ # ============================================================
+ - debug: msg="Running tests"
+
+ - name: create vpn gateway and attach it to vpc
+ ec2_vpc_vgw:
+ state: present
+ vpc_id: '{{ vpc_id_1 }}'
+ name: "{{ resource_prefix }}-vgw"
+ <<: *aws_connection_info
+ register: vgw
+
+ - assert:
+ that:
+ - vgw.changed
+ - "{{ vgw.vgw.vpc_id == vpc_id_1 }}"
+ - '"{{ vgw.vgw.tags.Name }}" == "{{ resource_prefix }}-vgw"'
+
+ - name: test idempotence
+ ec2_vpc_vgw:
+ state: present
+ vpc_id: '{{ vpc_id_1 }}'
+ name: "{{ resource_prefix }}-vgw"
+ <<: *aws_connection_info
+ register: vgw
+
+ - assert:
+ that:
+ - not vgw.changed
+
+ # ============================================================
+ - name: attach vpn gateway to the other VPC
+ ec2_vpc_vgw:
+ state: present
+ vpc_id: '{{ vpc_id_2 }}'
+ name: "{{ resource_prefix }}-vgw"
+ <<: *aws_connection_info
+ register: vgw
+
+ - assert:
+ that:
+ - vgw.changed
+ - "{{ vgw.vgw.vpc_id == vpc_id_2 }}"
+
+ # ============================================================
+ - name: add tags to the VGW
+ ec2_vpc_vgw:
+ state: present
+ vpc_id: '{{ vpc_id_2 }}'
+ name: "{{ resource_prefix }}-vgw"
+ tags:
+ created_by: ec2_vpc_vgw integration tests
+ <<: *aws_connection_info
+ register: vgw
+
+ - assert:
+ that:
+ - vgw.changed
+ - vgw.vgw.tags | length == 2
+ - "'created_by' in vgw.vgw.tags"
+
+ - name: test idempotence
+ ec2_vpc_vgw:
+ state: present
+ vpc_id: '{{ vpc_id_2 }}'
+ name: "{{ resource_prefix }}-vgw"
+ tags:
+ created_by: ec2_vpc_vgw integration tests
+ <<: *aws_connection_info
+ register: vgw
+
+ - assert:
+ that:
+ - not vgw.changed
+
+ # ============================================================
+ - name: remove tags from the VGW
+ ec2_vpc_vgw:
+ state: present
+ vpc_id: '{{ vpc_id_2 }}'
+ name: "{{ resource_prefix }}-vgw"
+ <<: *aws_connection_info
+ register: vgw
+
+ - assert:
+ that:
+ - vgw.changed
+ - vgw.vgw.tags | length == 1
+ - '"{{ vgw.vgw.tags.Name }}" == "{{ resource_prefix }}-vgw"'
+
+ # ============================================================
+ - name: detach vpn gateway
+ ec2_vpc_vgw:
+ state: present
+ name: "{{ resource_prefix }}-vgw"
+ <<: *aws_connection_info
+ register: vgw
+
+ - assert:
+ that:
+ - vgw.changed
+ - not vgw.vgw.vpc_id
+
+ - name: test idempotence
+ ec2_vpc_vgw:
+ state: present
+ name: "{{ resource_prefix }}-vgw"
+ <<: *aws_connection_info
+ register: vgw
+
+ - assert:
+ that:
+ - not vgw.changed
+
+ # ============================================================
+
+ always:
+
+ - debug: msg="Removing test dependencies"
+
+ - name: delete vpn gateway
+ ec2_vpc_vgw:
+ state: absent
+ vpn_gateway_id: '{{ vgw.vgw.id }}'
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: delete vpc
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc-{{ item }}"
+ state: absent
+ cidr_block: "10.0.0.0/26"
+ <<: *aws_connection_info
+ loop: [1, 2]
+ register: result
+ retries: 10
+ delay: 5
+ until: result is not failed
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_vpn_info/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_vpn_info/aliases
new file mode 100644
index 00000000..e915bed8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_vpn_info/aliases
@@ -0,0 +1,4 @@
+cloud/aws
+shippable/aws/group3
+# https://github.com/ansible-collections/community.aws/issues/156
+unstable
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_vpn_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_vpn_info/tasks/main.yml
new file mode 100644
index 00000000..1d432a17
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ec2_vpc_vpn_info/tasks/main.yml
@@ -0,0 +1,127 @@
+---
+- name: ec2_vpc_vpn_info tests
+ collections:
+ - amazon.aws
+ block:
+
+ # ============================================================
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ # ============================================================
+ - name: create a VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ state: present
+ cidr_block: "10.0.0.0/26"
+ <<: *aws_connection_info
+ tags:
+ Name: "{{ resource_prefix }}-vpc"
+ Description: "Created by ansible-test"
+ register: vpc_result
+
+ - name: create vpn gateway and attach it to vpc
+ ec2_vpc_vgw:
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ name: "{{ resource_prefix }}-vgw"
+ <<: *aws_connection_info
+ register: vgw
+
+ - name: create customer gateway
+ ec2_customer_gateway:
+ bgp_asn: 12345
+ ip_address: 1.2.3.4
+ name: testcgw
+ <<: *aws_connection_info
+ register: cgw
+
+ - name: create vpn connection, with customer gateway
+ ec2_vpc_vpn:
+ customer_gateway_id: '{{ cgw.gateway.customer_gateway.customer_gateway_id }}'
+ vpn_gateway_id: '{{ vgw.vgw.id }}'
+ state: present
+ <<: *aws_connection_info
+ register: vpn
+
+ # ============================================================
+ - name: test success with no parameters
+ ec2_vpc_vpn_info:
+ <<: *aws_connection_info
+ register: result
+
+ - name: assert success with no parameters
+ assert:
+ that:
+ - 'result.changed == false'
+ - 'result.vpn_connections != []'
+
+ - name: test success with customer gateway id as a filter
+ ec2_vpc_vpn_info:
+ filters:
+ customer-gateway-id: '{{ cgw.gateway.customer_gateway.customer_gateway_id }}'
+ vpn-connection-id: '{{ vpn.vpn_connection_id }}'
+ <<: *aws_connection_info
+ register: result
+
+ - name: assert success with customer gateway id as filter
+ assert:
+ that:
+ - 'result.changed == false'
+ - 'result.vpn_connections != []'
+
+ # ============================================================
+ always:
+
+ - name: delete vpn connection
+ ec2_vpc_vpn:
+ state: absent
+ vpn_connection_id: '{{ vpn.vpn_connection_id }}'
+ <<: *aws_connection_info
+ register: result
+ retries: 10
+ delay: 3
+ until: result is not failed
+ ignore_errors: true
+
+ - name: delete customer gateway
+ ec2_customer_gateway:
+ state: absent
+ ip_address: 1.2.3.4
+ name: testcgw
+ bgp_asn: 12345
+ <<: *aws_connection_info
+ register: result
+ retries: 10
+ delay: 3
+ until: result is not failed
+ ignore_errors: true
+
+ - name: delete vpn gateway
+ ec2_vpc_vgw:
+ state: absent
+ vpn_gateway_id: '{{ vgw.vgw.id }}'
+ <<: *aws_connection_info
+ register: result
+ retries: 10
+ delay: 3
+ until: result is not failed
+ ignore_errors: true
+
+ - name: delete vpc
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ state: absent
+ cidr_block: "10.0.0.0/26"
+ <<: *aws_connection_info
+ register: result
+ retries: 10
+ delay: 3
+ until: result is not failed
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/aliases
new file mode 100644
index 00000000..4b1bea7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/aliases
@@ -0,0 +1,6 @@
+cloud/aws
+ecs_service_info
+ecs_task
+ecs_taskdefinition
+ecs_taskdefinition_info
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/defaults/main.yml
new file mode 100644
index 00000000..20e010e3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/defaults/main.yml
@@ -0,0 +1,38 @@
+ecs_cluster_name: "{{ resource_prefix }}"
+user_data: |
+ #!/bin/bash
+ echo ECS_CLUSTER={{ ecs_cluster_name }} >> /etc/ecs/ecs.config
+
+ecs_service_name: "{{ resource_prefix }}-service"
+ecs_task_image_path: nginx
+ecs_task_name: "{{ resource_prefix }}-task"
+ecs_task_memory: 128
+ecs_task_containers:
+- name: "{{ ecs_task_name }}"
+ image: "{{ ecs_task_image_path }}"
+ essential: true
+ memory: "{{ ecs_task_memory }}"
+ portMappings:
+ - containerPort: "{{ ecs_task_container_port }}"
+ hostPort: "{{ ecs_task_host_port|default(0) }}"
+ mountPoints: "{{ ecs_task_mount_points|default([]) }}"
+ecs_service_deployment_configuration:
+ minimum_healthy_percent: 0
+ maximum_percent: 100
+ecs_service_placement_strategy:
+ - type: binpack
+ field: memory
+ - type: spread
+ field: attribute:ecs.availability-zone
+ecs_task_container_port: 8080
+ecs_target_group_name: "{{ resource_prefix[:28] }}-tg"
+ecs_load_balancer_name: "{{ resource_prefix[:29] }}-lb"
+ecs_service_health_check_grace_period: 60
+ecs_fargate_task_containers:
+- name: "{{ ecs_task_name }}"
+ image: "{{ ecs_task_image_path }}"
+ essential: true
+ portMappings:
+ - containerPort: "{{ ecs_task_container_port }}"
+ hostPort: "{{ ecs_task_host_port|default(0) }}"
+ #mountPoints: "{{ ecs_task_mount_points|default([]) }}"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/files/ec2-trust-policy.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/files/ec2-trust-policy.json
new file mode 100644
index 00000000..72413abd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/files/ec2-trust-policy.json
@@ -0,0 +1,13 @@
+{
+ "Version": "2008-10-17",
+ "Statement": [
+ {
+ "Sid": "",
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "ec2.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/files/ecs-trust-policy.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/files/ecs-trust-policy.json
new file mode 100644
index 00000000..f871b34d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/files/ecs-trust-policy.json
@@ -0,0 +1,16 @@
+{
+ "Version": "2008-10-17",
+ "Statement": [
+ {
+ "Sid": "",
+ "Effect": "Allow",
+ "Principal": {
+ "Service": [
+ "ecs.amazonaws.com",
+ "ecs-tasks.amazonaws.com"
+ ]
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/meta/main.yml
new file mode 100644
index 00000000..1810d4be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/full_test.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/full_test.yml
new file mode 100644
index 00000000..19db74d8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/full_test.yml
@@ -0,0 +1,1172 @@
+---
+# tasks file for ecs_cluster
+- name: ecs_cluster tests
+ collections:
+ - amazon.aws
+
+ block:
+ # ============================================================
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: ensure IAM instance role exists
+ iam_role:
+ name: ecsInstanceRole
+ assume_role_policy_document: "{{ lookup('file','ec2-trust-policy.json') }}"
+ state: present
+ create_instance_profile: yes
+ managed_policy:
+ - AmazonEC2ContainerServiceforEC2Role
+ <<: *aws_connection_info
+
+ - name: ensure IAM service role exists
+ iam_role:
+ name: ecsServiceRole
+ assume_role_policy_document: "{{ lookup('file','ecs-trust-policy.json') }}"
+ state: present
+ create_instance_profile: no
+ managed_policy:
+ - AmazonEC2ContainerServiceRole
+ <<: *aws_connection_info
+
+ - name: ensure AWSServiceRoleForECS role exists
+ iam_role_info:
+ name: AWSServiceRoleForECS
+ <<: *aws_connection_info
+ register: iam_role_result
+
+ # FIXME: come up with a way to automate this
+ - name: fail if AWSServiceRoleForECS role does not exist
+ fail:
+ msg: >
+ Run `aws iam create-service-linked-role --aws-service-name=ecs.amazonaws.com ` to create
+ a linked role for AWS VPC load balancer management
+ when: not iam_role_result.iam_roles
+
+ - name: create an ECS cluster
+ ecs_cluster:
+ name: "{{ ecs_cluster_name }}"
+ state: present
+ <<: *aws_connection_info
+ register: ecs_cluster
+
+ - name: check that ecs_cluster changed
+ assert:
+ that:
+ - ecs_cluster.changed
+
+ - name: create same ECS cluster (should do nothing)
+ ecs_cluster:
+ name: "{{ ecs_cluster_name }}"
+ state: present
+ <<: *aws_connection_info
+ register: ecs_cluster_again
+
+ - name: check that ecs_cluster did not change
+ assert:
+ that:
+ - not ecs_cluster_again.changed
+
+ - name: create a VPC to work in
+ ec2_vpc_net:
+ cidr_block: 10.0.0.0/16
+ state: present
+ name: '{{ resource_prefix }}_ecs_cluster'
+ resource_tags:
+ Name: '{{ resource_prefix }}_ecs_cluster'
+ <<: *aws_connection_info
+ register: setup_vpc
+
+ - name: create a key pair to use for creating an ec2 instance
+ ec2_key:
+ name: '{{ resource_prefix }}_ecs_cluster'
+ state: present
+ <<: *aws_connection_info
+ when: ec2_keypair is not defined # allow override in cloud-config-aws.ini
+ register: setup_key
+
+ - name: create subnets
+ ec2_vpc_subnet:
+ az: '{{ ec2_region }}{{ item.zone }}'
+ tags:
+ Name: '{{ resource_prefix }}_ecs_cluster-subnet-{{ item.zone }}'
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ cidr: "{{ item.cidr }}"
+ state: present
+ <<: *aws_connection_info
+ register: setup_subnet
+ with_items:
+ - zone: a
+ cidr: 10.0.1.0/24
+ - zone: b
+ cidr: 10.0.2.0/24
+
+ - name: create an internet gateway so that ECS agents can talk to ECS
+ ec2_vpc_igw:
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ state: present
+ <<: *aws_connection_info
+ register: igw
+
+ - name: create a security group to use for creating an ec2 instance
+ ec2_group:
+ name: '{{ resource_prefix }}_ecs_cluster-sg'
+ description: 'created by Ansible integration tests'
+ state: present
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ rules: # allow all ssh traffic but nothing else
+ - ports: 22
+ cidr: 0.0.0.0/0
+ <<: *aws_connection_info
+ register: setup_sg
+
+ - name: find a suitable AMI
+ ec2_ami_info:
+ owner: amazon
+ filters:
+ description: "Amazon Linux AMI* ECS *"
+ <<: *aws_connection_info
+ register: ec2_ami_info
+
+ - name: set image id fact
+ set_fact:
+ ecs_image_id: "{{ (ec2_ami_info.images|first).image_id }}"
+
+ - name: provision ec2 instance to create an image
+ ec2_instance:
+ key_name: '{{ ec2_keypair|default(setup_key.key.name) }}'
+ instance_type: t2.micro
+ state: present
+ image_id: '{{ ecs_image_id }}'
+ wait: yes
+ user_data: "{{ user_data }}"
+ instance_role: ecsInstanceRole
+ tags:
+ Name: '{{ resource_prefix }}_ecs_agent'
+ security_group: '{{ setup_sg.group_id }}'
+ vpc_subnet_id: '{{ setup_subnet.results[0].subnet.id }}'
+ <<: *aws_connection_info
+ register: setup_instance
+
+ - name: create target group
+ elb_target_group:
+ name: "{{ ecs_target_group_name }}1"
+ state: present
+ protocol: HTTP
+ port: 8080
+ modify_targets: no
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ target_type: instance
+ <<: *aws_connection_info
+ register: elb_target_group_instance
+
+ - name: create second target group to use ip target_type
+ elb_target_group:
+ name: "{{ ecs_target_group_name }}2"
+ state: present
+ protocol: HTTP
+ port: 8080
+ modify_targets: no
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ target_type: ip
+ <<: *aws_connection_info
+ register: elb_target_group_ip
+
+ - name: create load balancer
+ elb_application_lb:
+ name: "{{ ecs_load_balancer_name }}"
+ state: present
+ scheme: internal
+ security_groups: '{{ setup_sg.group_id }}'
+ subnets: "{{ setup_subnet.results | community.general.json_query('[].subnet.id') }}"
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ ecs_target_group_name }}1"
+ - Protocol: HTTP
+ Port: 81
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ ecs_target_group_name }}2"
+ <<: *aws_connection_info
+
+ - name: create task definition
+ ecs_taskdefinition:
+ containers: "{{ ecs_task_containers }}"
+ family: "{{ ecs_task_name }}"
+ state: present
+ <<: *aws_connection_info
+ register: ecs_task_definition
+
+ - name: recreate task definition
+ ecs_taskdefinition:
+ containers: "{{ ecs_task_containers }}"
+ family: "{{ ecs_task_name }}"
+ state: present
+ <<: *aws_connection_info
+ register: ecs_task_definition_again
+
+ - name: check that task definition does not change
+ assert:
+ that:
+ - not ecs_task_definition_again.changed
+ # FIXME: task definition should not change, will need #26752 or equivalent
+ ignore_errors: yes
+
+ - name: obtain ECS task definition facts
+ ecs_taskdefinition_info:
+ task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
+ <<: *aws_connection_info
+
+ - name: create ECS service definition
+ ecs_service:
+ state: present
+ name: "{{ ecs_service_name }}"
+ cluster: "{{ ecs_cluster_name }}"
+ task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
+ desired_count: 1
+ deployment_configuration: "{{ ecs_service_deployment_configuration }}"
+ placement_strategy: "{{ ecs_service_placement_strategy }}"
+ health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}"
+ load_balancers:
+ - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}"
+ containerName: "{{ ecs_task_name }}"
+ containerPort: "{{ ecs_task_container_port }}"
+ role: "ecsServiceRole"
+ <<: *aws_connection_info
+ register: ecs_service
+
+ - name: check that ECS service creation changed
+ assert:
+ that:
+ - ecs_service.changed
+
+ - name: create same ECS service definition (should not change)
+ ecs_service:
+ state: present
+ name: "{{ ecs_service_name }}"
+ cluster: "{{ ecs_cluster_name }}"
+ task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
+ desired_count: 1
+ deployment_configuration: "{{ ecs_service_deployment_configuration }}"
+ placement_strategy: "{{ ecs_service_placement_strategy }}"
+ health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}"
+ load_balancers:
+ - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}"
+ containerName: "{{ ecs_task_name }}"
+ containerPort: "{{ ecs_task_container_port }}"
+ role: "ecsServiceRole"
+ <<: *aws_connection_info
+ register: ecs_service_again
+
+ - name: check that ECS service recreation changed nothing
+ assert:
+ that:
+ - not ecs_service_again.changed
+ # FIXME: service should not change, needs fixing
+ ignore_errors: yes
+
+ # FIXME: attempt to update service load balancer
+ - name: update ECS service definition (expected to fail)
+ ecs_service:
+ state: present
+ name: "{{ ecs_service_name }}"
+ cluster: "{{ ecs_cluster_name }}"
+ task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
+ desired_count: 1
+ deployment_configuration: "{{ ecs_service_deployment_configuration }}"
+ placement_strategy: "{{ ecs_service_placement_strategy }}"
+ health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}"
+ load_balancers:
+ - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}"
+ containerName: "{{ ecs_task_name }}"
+ containerPort: "{{ ecs_task_container_port|int + 1 }}"
+ role: "ecsServiceRole"
+ <<: *aws_connection_info
+ register: update_ecs_service
+ ignore_errors: yes
+
+ - name: assert that updating ECS load balancer failed with helpful message
+ assert:
+ that:
+ - update_ecs_service is failed
+ - "'error' not in update_ecs_service"
+ - "'msg' in update_ecs_service"
+
+
+ - name: attempt to use ECS network configuration on task definition without awsvpc network_mode
+ ecs_service:
+ state: present
+ name: "{{ ecs_service_name }}3"
+ cluster: "{{ ecs_cluster_name }}"
+ task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
+ desired_count: 1
+ deployment_configuration: "{{ ecs_service_deployment_configuration }}"
+ placement_strategy: "{{ ecs_service_placement_strategy }}"
+ load_balancers:
+ - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}"
+ containerName: "{{ ecs_task_name }}"
+ containerPort: "{{ ecs_task_container_port }}"
+ network_configuration:
+ subnets: "{{ setup_subnet.results | community.general.json_query('[].subnet.id') }}"
+ security_groups:
+ - '{{ setup_sg.group_id }}'
+ <<: *aws_connection_info
+ register: ecs_service_network_without_awsvpc_task
+ ignore_errors: yes
+
+ - name: assert that using ECS network configuration with non AWSVPC task definition fails
+ assert:
+ that:
+ - ecs_service_network_without_awsvpc_task is failed
+
+ - name: scale down ECS service
+ ecs_service:
+ state: present
+ name: "{{ ecs_service_name }}"
+ cluster: "{{ ecs_cluster_name }}"
+ task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
+ desired_count: 0
+ deployment_configuration: "{{ ecs_service_deployment_configuration }}"
+ placement_strategy: "{{ ecs_service_placement_strategy }}"
+ load_balancers:
+ - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}"
+ containerName: "{{ ecs_task_name }}"
+ containerPort: "{{ ecs_task_container_port }}"
+ role: "ecsServiceRole"
+ <<: *aws_connection_info
+ register: ecs_service_scale_down
+
+ - name: pause to allow service to scale down
+ pause:
+ seconds: 60
+
+ - name: delete ECS service definition
+ ecs_service:
+ state: absent
+ name: "{{ ecs_service_name }}"
+ cluster: "{{ ecs_cluster_name }}"
+ <<: *aws_connection_info
+ register: delete_ecs_service
+
+ - name: assert that deleting ECS service worked
+ assert:
+ that:
+ - delete_ecs_service.changed
+
+ - name: assert that deleting ECS service worked
+ assert:
+ that:
+ - delete_ecs_service.changed
+
+ - name: create VPC-networked task definition with host port set to 0 (expected to fail)
+ ecs_taskdefinition:
+ containers: "{{ ecs_task_containers }}"
+ family: "{{ ecs_task_name }}-vpc"
+ state: present
+ network_mode: awsvpc
+ <<: *aws_connection_info
+ register: ecs_task_definition_vpc_no_host_port
+ ignore_errors: yes
+
+ - name: check that awsvpc task definition with host port 0 fails gracefully
+ assert:
+ that:
+ - ecs_task_definition_vpc_no_host_port is failed
+ - "'error' not in ecs_task_definition_vpc_no_host_port"
+
+ - name: create VPC-networked task definition with host port set to 8080
+ ecs_taskdefinition:
+ containers: "{{ ecs_task_containers }}"
+ family: "{{ ecs_task_name }}-vpc"
+ network_mode: awsvpc
+ state: present
+ <<: *aws_connection_info
+ vars:
+ ecs_task_host_port: 8080
+ register: ecs_task_definition_vpc_with_host_port
+
+ - name: obtain ECS task definition facts
+ ecs_taskdefinition_info:
+ task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}"
+ <<: *aws_connection_info
+ register: ecs_taskdefinition_info
+
+ - name: assert that network mode is awsvpc
+ assert:
+ that:
+ - "ecs_taskdefinition_info.network_mode == 'awsvpc'"
+
+ - name: pause to allow service to scale down
+ pause:
+ seconds: 60
+
+ - name: delete ECS service definition
+ ecs_service:
+ state: absent
+ name: "{{ ecs_service_name }}4"
+ cluster: "{{ ecs_cluster_name }}"
+ <<: *aws_connection_info
+ register: delete_ecs_service
+
+ - name: create ECS service definition with network configuration
+ ecs_service:
+ state: present
+ name: "{{ ecs_service_name }}2"
+ cluster: "{{ ecs_cluster_name }}"
+ task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}"
+ desired_count: 1
+ deployment_configuration: "{{ ecs_service_deployment_configuration }}"
+ placement_strategy: "{{ ecs_service_placement_strategy }}"
+ load_balancers:
+ - targetGroupArn: "{{ elb_target_group_ip.target_group_arn }}"
+ containerName: "{{ ecs_task_name }}"
+ containerPort: "{{ ecs_task_container_port }}"
+ network_configuration:
+ subnets: "{{ setup_subnet.results | community.general.json_query('[].subnet.id') }}"
+ security_groups:
+ - '{{ setup_sg.group_id }}'
+ <<: *aws_connection_info
+ register: create_ecs_service_with_vpc
+
+ - name: assert that network configuration is correct
+ assert:
+ that:
+ - "'networkConfiguration' in create_ecs_service_with_vpc.service"
+ - "'awsvpcConfiguration' in create_ecs_service_with_vpc.service.networkConfiguration"
+ - "create_ecs_service_with_vpc.service.networkConfiguration.awsvpcConfiguration.subnets|length == 2"
+ - "create_ecs_service_with_vpc.service.networkConfiguration.awsvpcConfiguration.securityGroups|length == 1"
+
+ - name: create dummy group to update ECS service with
+ ec2_group:
+ name: "{{ resource_prefix }}-ecs-vpc-test-sg"
+ description: "Test security group for ECS with VPC"
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ state: present
+ <<: *aws_connection_info
+
+ - name: update ECS service definition with new network configuration
+ ecs_service:
+ state: present
+ name: "{{ ecs_service_name }}2"
+ cluster: "{{ ecs_cluster_name }}"
+ task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}"
+ desired_count: 1
+ deployment_configuration: "{{ ecs_service_deployment_configuration }}"
+ placement_strategy: "{{ ecs_service_placement_strategy }}"
+ load_balancers:
+ - targetGroupArn: "{{ elb_target_group_ip.target_group_arn }}"
+ containerName: "{{ ecs_task_name }}"
+ containerPort: "{{ ecs_task_container_port }}"
+ network_configuration:
+ subnets: "{{ setup_subnet.results | community.general.json_query('[].subnet.id') }}"
+ security_groups:
+ - "{{ resource_prefix }}-ecs-vpc-test-sg"
+ <<: *aws_connection_info
+ register: update_ecs_service_with_vpc
+
+ - name: check that ECS service changed
+ assert:
+ that:
+ - update_ecs_service_with_vpc.changed
+ - "'networkConfiguration' in update_ecs_service_with_vpc.service"
+ - "'awsvpcConfiguration' in update_ecs_service_with_vpc.service.networkConfiguration"
+ - "update_ecs_service_with_vpc.service.networkConfiguration.awsvpcConfiguration.subnets|length == 2"
+ - "update_ecs_service_with_vpc.service.networkConfiguration.awsvpcConfiguration.securityGroups|length == 1"
+
+ - name: create ecs_service using health_check_grace_period_seconds
+ ecs_service:
+ name: "{{ ecs_service_name }}-mft"
+ cluster: "{{ ecs_cluster_name }}"
+ load_balancers:
+ - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}"
+ containerName: "{{ ecs_task_name }}"
+ containerPort: "{{ ecs_task_container_port }}"
+ task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
+ scheduling_strategy: "REPLICA"
+ health_check_grace_period_seconds: 10
+ desired_count: 1
+ state: present
+ <<: *aws_connection_info
+ register: ecs_service_creation_hcgp
+
+
+ - name: health_check_grace_period_seconds sets HealthChecGracePeriodSeconds
+ assert:
+ that:
+ - ecs_service_creation_hcgp.changed
+ - "{{ecs_service_creation_hcgp.service.healthCheckGracePeriodSeconds}} == 10"
+
+ - name: update ecs_service using health_check_grace_period_seconds
+ ecs_service:
+ name: "{{ ecs_service_name }}-mft"
+ cluster: "{{ ecs_cluster_name }}"
+ load_balancers:
+ - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}"
+ containerName: "{{ ecs_task_name }}"
+ containerPort: "{{ ecs_task_container_port }}"
+ task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
+ desired_count: 1
+ health_check_grace_period_seconds: 30
+ state: present
+ <<: *aws_connection_info
+ register: ecs_service_creation_hcgp2
+ ignore_errors: no
+
+ - name: check that module returns success
+ assert:
+ that:
+ - ecs_service_creation_hcgp2.changed
+ - "{{ecs_service_creation_hcgp2.service.healthCheckGracePeriodSeconds}} == 30"
+
+# until ansible supports service registries, this test can't run.
+# - name: update ecs_service using service_registries
+# ecs_service:
+# name: "{{ ecs_service_name }}-service-registries"
+# cluster: "{{ ecs_cluster_name }}"
+# load_balancers:
+# - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}"
+# containerName: "{{ ecs_task_name }}"
+# containerPort: "{{ ecs_task_container_port }}"
+# service_registries:
+# - containerName: "{{ ecs_task_name }}"
+# containerPort: "{{ ecs_task_container_port }}"
+# ### TODO: Figure out how to get a service registry ARN without a service registry module.
+# registryArn: "{{ ecs_task_service_registry_arn }}"
+# task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
+# desired_count: 1
+# state: present
+# <<: *aws_connection_info
+# register: ecs_service_creation_sr
+# ignore_errors: yes
+
+# - name: dump sr output
+# debug: var=ecs_service_creation_sr
+
+# - name: check that module returns success
+# assert:
+# that:
+# - ecs_service_creation_sr.changed
+
+ - name: update ecs_service using REPLICA scheduling_strategy
+ ecs_service:
+ name: "{{ ecs_service_name }}-replica"
+ cluster: "{{ ecs_cluster_name }}"
+ load_balancers:
+ - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}"
+ containerName: "{{ ecs_task_name }}"
+ containerPort: "{{ ecs_task_container_port }}"
+ scheduling_strategy: "REPLICA"
+ task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
+ desired_count: 1
+ state: present
+ <<: *aws_connection_info
+ register: ecs_service_creation_replica
+
+ - name: obtain facts for all ECS services in the cluster
+ ecs_service_info:
+ cluster: "{{ ecs_cluster_name }}"
+ details: yes
+ events: no
+ <<: *aws_connection_info
+ register: ecs_service_info
+
+ - name: assert that facts are useful
+ assert:
+ that:
+ - "'services' in ecs_service_info"
+ - ecs_service_info.services | length > 0
+ - "'events' not in ecs_service_info.services[0]"
+
+ - name: obtain facts for existing service in the cluster
+ ecs_service_info:
+ cluster: "{{ ecs_cluster_name }}"
+ service: "{{ ecs_service_name }}"
+ details: yes
+ events: no
+ <<: *aws_connection_info
+ register: ecs_service_info
+
+ - name: assert that existing service is available and running
+ assert:
+ that:
+ - "ecs_service_info.services|length == 1"
+ - "ecs_service_info.services_not_running|length == 0"
+
+ - name: obtain facts for non-existent service in the cluster
+ ecs_service_info:
+ cluster: "{{ ecs_cluster_name }}"
+ service: madeup
+ details: yes
+ events: no
+ <<: *aws_connection_info
+ register: ecs_service_info
+
+ - name: assert that non-existent service is missing
+ assert:
+ that:
+ - "ecs_service_info.services_not_running[0].reason == 'MISSING'"
+
+ - name: obtain specific ECS service facts
+ ecs_service_info:
+ service: "{{ ecs_service_name }}2"
+ cluster: "{{ ecs_cluster_name }}"
+ details: yes
+ <<: *aws_connection_info
+ register: ecs_service_info
+
+ - name: check that facts contain network configuration
+ assert:
+ that:
+ - "'networkConfiguration' in ecs_service_info.services[0]"
+
+ - name: attempt to get facts from missing task definition
+ ecs_taskdefinition_info:
+ task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition.taskdefinition.revision + 1}}"
+ <<: *aws_connection_info
+
+ # ============================================================
+ # Begin tests for Fargate
+
+ - name: ensure AmazonECSTaskExecutionRolePolicy exists
+ iam_role:
+ name: ecsTaskExecutionRole
+ assume_role_policy_document: "{{ lookup('file','ecs-trust-policy.json') }}"
+ description: "Allows ECS containers to make calls to ECR"
+ state: present
+ create_instance_profile: no
+ managed_policy:
+ - AmazonEC2ContainerServiceRole
+ <<: *aws_connection_info
+ register: iam_execution_role
+
+ - name: create Fargate VPC-networked task definition with host port set to 8080 and unsupported network mode (expected to fail)
+ ecs_taskdefinition:
+ containers: "{{ ecs_fargate_task_containers }}"
+ family: "{{ ecs_task_name }}-vpc"
+ network_mode: bridge
+ launch_type: FARGATE
+ cpu: 512
+ memory: 1024
+ state: present
+ <<: *aws_connection_info
+ vars:
+ ecs_task_host_port: 8080
+ ignore_errors: yes
+ register: ecs_fargate_task_definition_bridged_with_host_port
+
+ - name: check that fargate task definition with bridged networking fails gracefully
+ assert:
+ that:
+ - ecs_fargate_task_definition_bridged_with_host_port is failed
+ - 'ecs_fargate_task_definition_bridged_with_host_port.msg == "To use FARGATE launch type, network_mode must be awsvpc"'
+
+ - name: create Fargate VPC-networked task definition without CPU or Memory (expected to Fail)
+ ecs_taskdefinition:
+ containers: "{{ ecs_fargate_task_containers }}"
+ family: "{{ ecs_task_name }}-vpc"
+ network_mode: awsvpc
+ launch_type: FARGATE
+ state: present
+ <<: *aws_connection_info
+ ignore_errors: yes
+ register: ecs_fargate_task_definition_vpc_no_mem
+
+ - name: check that fargate task definition without memory or cpu fails gracefully
+ assert:
+ that:
+ - ecs_fargate_task_definition_vpc_no_mem is failed
+ - 'ecs_fargate_task_definition_vpc_no_mem.msg == "launch_type is FARGATE but all of the following are missing: cpu, memory"'
+
+ - name: create Fargate VPC-networked task definition with CPU or Memory and execution role
+ ecs_taskdefinition:
+ containers: "{{ ecs_fargate_task_containers }}"
+ family: "{{ ecs_task_name }}-vpc"
+ network_mode: awsvpc
+ launch_type: FARGATE
+ cpu: 512
+ memory: 1024
+ execution_role_arn: "{{ iam_execution_role.arn }}"
+ state: present
+ <<: *aws_connection_info
+ vars:
+ ecs_task_host_port: 8080
+ register: ecs_fargate_task_definition
+
+ - name: obtain ECS task definition facts
+ ecs_taskdefinition_info:
+ task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}"
+ <<: *aws_connection_info
+
+ - name: create fargate ECS service without network config (expected to fail)
+ ecs_service:
+ state: present
+ name: "{{ ecs_service_name }}4"
+ cluster: "{{ ecs_cluster_name }}"
+ task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}"
+ desired_count: 1
+ deployment_configuration: "{{ ecs_service_deployment_configuration }}"
+ launch_type: FARGATE
+ <<: *aws_connection_info
+ register: ecs_fargate_service_network_without_awsvpc
+ ignore_errors: yes
+
+ - name: assert that using Fargate ECS service fails
+ assert:
+ that:
+ - ecs_fargate_service_network_without_awsvpc is failed
+
+ - name: create fargate ECS service with network config
+ ecs_service:
+ state: present
+ name: "{{ ecs_service_name }}4"
+ cluster: "{{ ecs_cluster_name }}"
+ task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}"
+ desired_count: 1
+ deployment_configuration: "{{ ecs_service_deployment_configuration }}"
+ launch_type: FARGATE
+ network_configuration:
+ subnets: "{{ setup_subnet.results | community.general.json_query('[].subnet.id') }}"
+ security_groups:
+ - '{{ setup_sg.group_id }}'
+ assign_public_ip: true
+ <<: *aws_connection_info
+ register: ecs_fargate_service_network_with_awsvpc
+
+ - name: assert that public IP assignment is enabled
+ assert:
+ that:
+ - 'ecs_fargate_service_network_with_awsvpc.service.networkConfiguration.awsvpcConfiguration.assignPublicIp == "ENABLED"'
+
+ - name: create fargate ECS task with run task
+ ecs_task:
+ operation: run
+ cluster: "{{ ecs_cluster_name }}"
+ task_definition: "{{ ecs_task_name }}-vpc"
+ launch_type: FARGATE
+ count: 1
+ network_configuration:
+ subnets: "{{ setup_subnet.results | community.general.json_query('[].subnet.id') }}"
+ security_groups:
+ - '{{ setup_sg.group_id }}'
+ assign_public_ip: true
+ started_by: ansible_user
+ <<: *aws_connection_info
+ register: fargate_run_task_output
+
+ # aws cli not installed in docker container; make sure it's installed.
+ - name: install awscli
+ pip:
+ state: present
+ name: awscli
+
+ - name: disable taskLongArnFormat
+ command: aws ecs put-account-setting --name taskLongArnFormat --value disabled
+ environment:
+ AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
+ AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
+ AWS_SESSION_TOKEN: "{{ security_token | default('') }}"
+ AWS_DEFAULT_REGION: "{{ aws_region }}"
+
+ - name: create fargate ECS task with run task and tags (LF disabled) (should fail)
+ ecs_task:
+ operation: run
+ cluster: "{{ ecs_cluster_name }}"
+ task_definition: "{{ ecs_task_name }}-vpc"
+ launch_type: FARGATE
+ count: 1
+ tags:
+ tag_key: tag_value
+ tag_key2: tag_value2
+ network_configuration:
+ subnets: "{{ setup_subnet.results | community.general.json_query('[].subnet.id') }}"
+ security_groups:
+ - '{{ setup_sg.group_id }}'
+ assign_public_ip: true
+ started_by: ansible_user
+ <<: *aws_connection_info
+ register: fargate_run_task_output_with_tags_fail
+ ignore_errors: yes
+
+ - name: enable taskLongArnFormat
+ command: aws ecs put-account-setting --name taskLongArnFormat --value enabled
+ environment:
+ AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
+ AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
+ AWS_SESSION_TOKEN: "{{ security_token | default('') }}"
+ AWS_DEFAULT_REGION: "{{ aws_region }}"
+
+ - name: create fargate ECS task with run task and tags
+ ecs_task:
+ operation: run
+ cluster: "{{ ecs_cluster_name }}"
+ task_definition: "{{ ecs_task_name }}-vpc"
+ launch_type: FARGATE
+ count: 1
+ tags:
+ tag_key: tag_value
+ tag_key2: tag_value2
+ network_configuration:
+ subnets: "{{ setup_subnet.results | community.general.json_query('[].subnet.id') }}"
+ security_groups:
+ - '{{ setup_sg.group_id }}'
+ assign_public_ip: true
+ started_by: ansible_user
+ <<: *aws_connection_info
+ register: fargate_run_task_output_with_tags
+
+
+ # ============================================================
+ # End tests for Fargate
+
+ - name: create task definition for absent with arn regression test
+ ecs_taskdefinition:
+ containers: "{{ ecs_task_containers }}"
+ family: "{{ ecs_task_name }}-absent"
+ state: present
+ <<: *aws_connection_info
+ register: ecs_task_definition_absent_with_arn
+
+ - name: absent task definition by arn
+ ecs_taskdefinition:
+ arn: "{{ ecs_task_definition_absent_with_arn.taskdefinition.taskDefinitionArn }}"
+ state: absent
+ <<: *aws_connection_info
+
+ always:
+ # TEAR DOWN: snapshot, ec2 instance, ec2 key pair, security group, vpc
+ - name: Announce teardown start
+ debug:
+ msg: "***** TESTING COMPLETE. COMMENCE TEARDOWN *****"
+
+ - name: obtain ECS service facts
+ ecs_service_info:
+ service: "{{ ecs_service_name }}"
+ cluster: "{{ ecs_cluster_name }}"
+ details: yes
+ <<: *aws_connection_info
+ register: ecs_service_info
+
+ - name: scale down ECS service
+ ecs_service:
+ state: present
+ name: "{{ ecs_service_name }}"
+ cluster: "{{ ecs_cluster_name }}"
+ task_definition: "{{ ecs_service_info.services[0].taskDefinition }}"
+ desired_count: 0
+ deployment_configuration: "{{ ecs_service_deployment_configuration }}"
+ placement_strategy: "{{ ecs_service_placement_strategy }}"
+ load_balancers:
+ - targetGroupArn: "{{ ecs_service_info.services[0].loadBalancers[0].targetGroupArn }}"
+ containerName: "{{ ecs_task_name }}"
+ containerPort: "{{ ecs_task_container_port }}"
+ <<: *aws_connection_info
+ ignore_errors: yes
+ register: ecs_service_scale_down
+
+ - name: obtain second ECS service facts
+ ecs_service_info:
+ service: "{{ ecs_service_name }}2"
+ cluster: "{{ ecs_cluster_name }}"
+ details: yes
+ <<: *aws_connection_info
+ ignore_errors: yes
+ register: ecs_service_info
+
+ - name: scale down second ECS service
+ ecs_service:
+ state: present
+ name: "{{ ecs_service_name }}2"
+ cluster: "{{ ecs_cluster_name }}"
+ task_definition: "{{ ecs_service_info.services[0].taskDefinition }}"
+ desired_count: 0
+ deployment_configuration: "{{ ecs_service_deployment_configuration }}"
+ placement_strategy: "{{ ecs_service_placement_strategy }}"
+ load_balancers:
+ - targetGroupArn: "{{ ecs_service_info.services[0].loadBalancers[0].targetGroupArn }}"
+ containerName: "{{ ecs_task_name }}"
+ containerPort: "{{ ecs_task_container_port }}"
+ <<: *aws_connection_info
+ ignore_errors: yes
+ register: ecs_service_scale_down
+
+ - name: scale down multifunction-test service
+ ecs_service:
+ name: "{{ ecs_service_name }}-mft"
+ cluster: "{{ ecs_cluster_name }}"
+ state: present
+ load_balancers:
+ - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}"
+ containerName: "{{ ecs_task_name }}"
+ containerPort: "{{ ecs_task_container_port }}"
+ task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
+ desired_count: 0
+ <<: *aws_connection_info
+ ignore_errors: yes
+ register: ecs_service_scale_down
+
+
+
+ - name: scale down scheduling_strategy service
+ ecs_service:
+ name: "{{ ecs_service_name }}-replica"
+ cluster: "{{ ecs_cluster_name }}"
+ state: present
+ load_balancers:
+ - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}"
+ containerName: "{{ ecs_task_name }}"
+ containerPort: "{{ ecs_task_container_port }}"
+ task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
+ desired_count: 0
+ <<: *aws_connection_info
+ ignore_errors: yes
+ register: ecs_service_scale_down
+
+
+# until ansible supports service registries, the test for it can't run and this
+# scale down is not needed
+# - name: scale down service_registries service
+# ecs_service:
+# name: "{{ ecs_service_name }}-service-registries"
+# cluster: "{{ ecs_cluster_name }}"
+# state: present
+# load_balancers:
+# - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}"
+# containerName: "{{ ecs_task_name }}"
+# containerPort: "{{ ecs_task_container_port }}"
+# task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
+# desired_count: 0
+# <<: *aws_connection_info
+# ignore_errors: yes
+# register: ecs_service_scale_down
+
+ - name: scale down Fargate ECS service
+ ecs_service:
+ state: present
+ name: "{{ ecs_service_name }}4"
+ cluster: "{{ ecs_cluster_name }}"
+ task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}"
+ desired_count: 0
+ deployment_configuration: "{{ ecs_service_deployment_configuration }}"
+ <<: *aws_connection_info
+ ignore_errors: yes
+ register: ecs_service_scale_down
+
+ - name: stop Fargate ECS task
+ ecs_task:
+ task: "{{ fargate_run_task_output.task[0].taskArn }}"
+ task_definition: "{{ ecs_task_name }}-vpc"
+ operation: stop
+ cluster: "{{ ecs_cluster_name }}"
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: stop Fargate ECS task
+ ecs_task:
+ task: "{{ fargate_run_task_output_with_tags.task[0].taskArn }}"
+ task_definition: "{{ ecs_task_name }}-vpc"
+ operation: stop
+ cluster: "{{ ecs_cluster_name }}"
+ <<: *aws_connection_info
+ ignore_errors: yes
+ - name: pause to allow services to scale down
+ pause:
+ seconds: 60
+ when: ecs_service_scale_down is not failed
+
+ - name: remove ecs service
+ ecs_service:
+ state: absent
+ cluster: "{{ ecs_cluster_name }}"
+ name: "{{ ecs_service_name }}"
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove second ecs service
+ ecs_service:
+ state: absent
+ cluster: "{{ ecs_cluster_name }}"
+ name: "{{ ecs_service_name }}2"
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove mft ecs service
+ ecs_service:
+ state: absent
+ cluster: "{{ ecs_cluster_name }}"
+ name: "{{ ecs_service_name }}-mft"
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove scheduling_strategy ecs service
+ ecs_service:
+ state: absent
+ cluster: "{{ ecs_cluster_name }}"
+ name: "{{ ecs_service_name }}-replica"
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+# until ansible supports service registries, the test for it can't run and this
+# removal is not needed
+# - name: remove service_registries ecs service
+# ecs_service:
+# state: absent
+# cluster: "{{ ecs_cluster_name }}"
+# name: "{{ ecs_service_name }}-service-registries"
+# <<: *aws_connection_info
+# ignore_errors: yes
+
+ - name: remove fargate ECS service
+ ecs_service:
+ state: absent
+ name: "{{ ecs_service_name }}4"
+ cluster: "{{ ecs_cluster_name }}"
+ <<: *aws_connection_info
+ ignore_errors: yes
+ register: ecs_fargate_service_network_with_awsvpc
+
+ - name: remove ecs task definition
+ ecs_taskdefinition:
+ containers: "{{ ecs_task_containers }}"
+ family: "{{ ecs_task_name }}"
+ revision: "{{ ecs_task_definition.taskdefinition.revision }}"
+ state: absent
+ <<: *aws_connection_info
+ vars:
+ ecs_task_host_port: 8080
+ ignore_errors: yes
+
+ - name: remove ecs task definition again
+ ecs_taskdefinition:
+ containers: "{{ ecs_task_containers }}"
+ family: "{{ ecs_task_name }}"
+ revision: "{{ ecs_task_definition_again.taskdefinition.revision }}"
+ state: absent
+ <<: *aws_connection_info
+ vars:
+ ecs_task_host_port: 8080
+ ignore_errors: yes
+
+ - name: remove second ecs task definition
+ ecs_taskdefinition:
+ containers: "{{ ecs_task_containers }}"
+ family: "{{ ecs_task_name }}-vpc"
+ revision: "{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}"
+ state: absent
+ <<: *aws_connection_info
+ vars:
+ ecs_task_host_port: 8080
+ ignore_errors: yes
+
+ - name: remove fargate ecs task definition
+ ecs_taskdefinition:
+ containers: "{{ ecs_fargate_task_containers }}"
+ family: "{{ ecs_task_name }}-vpc"
+ revision: "{{ ecs_fargate_task_definition.taskdefinition.revision }}"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove ecs task definition for absent with arn
+ ecs_taskdefinition:
+ containers: "{{ ecs_task_containers }}"
+ family: "{{ ecs_task_name }}-absent"
+ revision: "{{ ecs_task_definition_absent_with_arn.taskdefinition.revision }}"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove load balancer
+ elb_application_lb:
+ name: "{{ ecs_load_balancer_name }}"
+ state: absent
+ wait: yes
+ <<: *aws_connection_info
+ ignore_errors: yes
+ register: elb_application_lb_remove
+
+ - name: pause to allow target group to be disassociated
+ pause:
+ seconds: 30
+ when: not elb_application_lb_remove is failed
+
+ - name: remove target groups
+ elb_target_group:
+ name: "{{ item }}"
+ state: absent
+ <<: *aws_connection_info
+ with_items:
+ - "{{ ecs_target_group_name }}1"
+ - "{{ ecs_target_group_name }}2"
+ ignore_errors: yes
+
+ - name: remove setup ec2 instance
+ ec2_instance:
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ wait: yes
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove setup keypair
+ ec2_key:
+ name: '{{ resource_prefix }}_ecs_cluster'
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove security groups
+ ec2_group:
+ name: '{{ item }}'
+ description: 'created by Ansible integration tests'
+ state: absent
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ <<: *aws_connection_info
+ with_items:
+ - "{{ resource_prefix }}-ecs-vpc-test-sg"
+ - '{{ resource_prefix }}_ecs_cluster-sg'
+ ignore_errors: yes
+
+ - name: remove IGW
+ ec2_vpc_igw:
+ state: absent
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove setup subnet
+ ec2_vpc_subnet:
+ az: '{{ aws_region }}{{ item.zone }}'
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ cidr: "{{ item.cidr}}"
+ state: absent
+ <<: *aws_connection_info
+ with_items:
+ - zone: a
+ cidr: 10.0.1.0/24
+ - zone: b
+ cidr: 10.0.2.0/24
+ ignore_errors: yes
+
+ - name: remove setup VPC
+ ec2_vpc_net:
+ cidr_block: 10.0.0.0/16
+ state: absent
+ name: '{{ resource_prefix }}_ecs_cluster'
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove ECS cluster
+ ecs_cluster:
+ name: "{{ ecs_cluster_name }}"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/main.yml
new file mode 100644
index 00000000..ccbd00a4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/main.yml
@@ -0,0 +1,53 @@
+- set_fact:
+ virtualenv: "{{ remote_tmp_dir }}/virtualenv"
+ virtualenv_command: "{{ ansible_python_interpreter }} -m virtualenv"
+
+- set_fact:
+ virtualenv_interpreter: "{{ virtualenv }}/bin/python"
+
+- pip:
+ name: virtualenv
+
+- pip:
+ name:
+ - 'botocore<1.8.4'
+ - boto3
+ - coverage<5
+ virtualenv: "{{ virtualenv }}"
+ virtualenv_command: "{{ virtualenv_command }}"
+ virtualenv_site_packages: no
+
+- include_tasks: network_assign_public_ip_fail.yml
+ vars:
+ ansible_python_interpreter: "{{ virtualenv_interpreter }}"
+
+- include_tasks: network_force_new_deployment_fail.yml
+ vars:
+ ansible_python_interpreter: "{{ virtualenv_interpreter }}"
+
+- file:
+ path: "{{ virtualenv }}"
+ state: absent
+
+# Test graceful failures when botocore<1.12.38
+
+- pip:
+ name:
+ - 'botocore>=1.12.60'
+ - boto3
+ - coverage<5
+ virtualenv: "{{ virtualenv }}"
+ virtualenv_command: "{{ virtualenv_command }}"
+ virtualenv_site_packages: no
+
+- include_tasks: network_force_new_deployment.yml
+ vars:
+ ansible_python_interpreter: "{{ virtualenv_interpreter }}"
+
+- include_tasks: full_test.yml
+ vars:
+ ansible_python_interpreter: "{{ virtualenv_interpreter }}"
+
+- file:
+ path: "{{ virtualenv }}"
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/network_assign_public_ip_fail.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/network_assign_public_ip_fail.yml
new file mode 100644
index 00000000..b4b7e531
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/network_assign_public_ip_fail.yml
@@ -0,0 +1,123 @@
+- block:
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: True
+
+ - name: create ecs cluster
+ ecs_cluster:
+ name: "{{ resource_prefix }}"
+ state: present
+ <<: *aws_connection_info
+
+ - name: create ecs_taskdefinition with bridged network
+ ecs_taskdefinition:
+ containers:
+ - name: my_container
+ image: ubuntu
+ memory: 128
+ family: "{{ resource_prefix }}"
+ state: present
+ network_mode: bridge
+ <<: *aws_connection_info
+ register: ecs_taskdefinition_creation
+
+ - name: create ecs_taskdefinition with awsvpc network
+ ecs_taskdefinition:
+ containers:
+ - name: my_container
+ image: ubuntu
+ memory: 128
+ family: "{{ resource_prefix }}-vpc"
+ state: present
+ network_mode: awsvpc
+ <<: *aws_connection_info
+ register: ecs_taskdefinition_creation_vpc
+
+ - name: ecs_taskdefinition works fine even when older botocore is used
+ assert:
+ that:
+ - ecs_taskdefinition_creation_vpc.changed
+
+ - name: create ecs_service using awsvpc network_configuration
+ ecs_service:
+ name: "{{ resource_prefix }}-vpc"
+ cluster: "{{ resource_prefix }}"
+ task_definition: "{{ resource_prefix }}-vpc"
+ desired_count: 1
+ network_configuration:
+ subnets:
+ - subnet-abcd1234
+ security_groups:
+ - sg-abcd1234
+ assign_public_ip: true
+ state: present
+ <<: *aws_connection_info
+ register: ecs_service_creation_vpc
+ ignore_errors: yes
+
+ - name: check that graceful failure message is returned from ecs_service
+ assert:
+ that:
+ - ecs_service_creation_vpc.failed
+ - 'ecs_service_creation_vpc.msg == "botocore needs to be version 1.8.4 or higher to use assign_public_ip in network_configuration"'
+
+ always:
+ - name: scale down ecs service
+ ecs_service:
+ name: "{{ resource_prefix }}"
+ cluster: "{{ resource_prefix }}"
+ task_definition: "{{ resource_prefix }}"
+ desired_count: 0
+ state: present
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: pause to wait for scale down
+ pause:
+ seconds: 30
+
+ - name: remove ecs service
+ ecs_service:
+ name: "{{ resource_prefix }}"
+ cluster: "{{ resource_prefix }}"
+ task_definition: "{{ resource_prefix }}"
+ desired_count: 1
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove ecs task definition
+ ecs_taskdefinition:
+ containers:
+ - name: my_container
+ image: ubuntu
+ memory: 128
+ family: "{{ resource_prefix }}"
+ revision: "{{ ecs_taskdefinition_creation.taskdefinition.revision }}"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove ecs task definition vpc
+ ecs_taskdefinition:
+ containers:
+ - name: my_container
+ image: ubuntu
+ memory: 128
+ family: "{{ resource_prefix }}-vpc"
+ revision: "{{ ecs_taskdefinition_creation_vpc.taskdefinition.revision }}"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove ecs cluster
+ ecs_cluster:
+ name: "{{ resource_prefix }}"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/network_fail.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/network_fail.yml
new file mode 100644
index 00000000..4c050837
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/network_fail.yml
@@ -0,0 +1,216 @@
+- block:
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: True
+
+ - name: create ecs cluster
+ ecs_cluster:
+ name: "{{ resource_prefix }}"
+ state: present
+ <<: *aws_connection_info
+
+ - name: create ecs_taskdefinition with bridged network
+ ecs_taskdefinition:
+ containers:
+ - name: my_container
+ image: ubuntu
+ memory: 128
+ family: "{{ resource_prefix }}"
+ state: present
+ network_mode: bridge
+ <<: *aws_connection_info
+ register: ecs_taskdefinition_creation
+
+ - name: create ecs_taskdefinition with awsvpc network
+ ecs_taskdefinition:
+ containers:
+ - name: my_container
+ image: ubuntu
+ memory: 128
+ family: "{{ resource_prefix }}-vpc"
+ state: present
+ network_mode: awsvpc
+ <<: *aws_connection_info
+ register: ecs_taskdefinition_creation_vpc
+
+ - name: create ecs_taskdefinition and execution_role_arn (expected to fail)
+ ecs_taskdefinition:
+ containers:
+ - name: my_container
+ image: ubuntu
+ memory: 128
+ family: "{{ resource_prefix }}-vpc"
+ execution_role_arn: not_a_real_arn
+ state: present
+ network_mode: awsvpc
+ <<: *aws_connection_info
+ ignore_errors: yes
+ register: ecs_taskdefinition_arn
+
+ - name: check that graceful failure message is returned from ecs_taskdefinition_arn
+ assert:
+ that:
+ - ecs_taskdefinition_arn.failed
+ - 'ecs_taskdefinition_arn.msg == "botocore needs to be version 1.10.44 or higher to use execution_role_arn"'
+
+ - name: ecs_taskdefinition works fine even when older botocore is used
+ assert:
+ that:
+ - ecs_taskdefinition_creation_vpc.changed
+
+ - name: create ecs_service using bridged network
+ ecs_service:
+ name: "{{ resource_prefix }}"
+ cluster: "{{ resource_prefix }}"
+ task_definition: "{{ resource_prefix }}"
+ desired_count: 1
+ state: present
+ <<: *aws_connection_info
+ register: ecs_service_creation
+
+ - name: create ecs_service using awsvpc network_configuration
+ ecs_service:
+ name: "{{ resource_prefix }}-vpc"
+ cluster: "{{ resource_prefix }}"
+ task_definition: "{{ resource_prefix }}-vpc"
+ desired_count: 1
+ network_configuration:
+ subnets:
+ - subnet-abcd1234
+ security_groups:
+ - sg-abcd1234
+ state: present
+ <<: *aws_connection_info
+ register: ecs_service_creation_vpc
+ ignore_errors: yes
+
+ - name: check that graceful failure message is returned from ecs_service
+ assert:
+ that:
+ - ecs_service_creation_vpc.failed
+ - 'ecs_service_creation_vpc.msg == "botocore needs to be version 1.7.44 or higher to use network configuration"'
+
+ - name: create ecs_service using awsvpc network_configuration and launch_type
+ ecs_service:
+ name: "{{ resource_prefix }}-vpc"
+ cluster: "{{ resource_prefix }}"
+ task_definition: "{{ resource_prefix }}-vpc"
+ desired_count: 1
+ network_configuration:
+ subnets:
+ - subnet-abcd1234
+ security_groups:
+ - sg-abcd1234
+ launch_type: FARGATE
+ state: present
+ <<: *aws_connection_info
+ register: ecs_service_creation_vpc_launchtype
+ ignore_errors: yes
+
+ - name: check that graceful failure message is returned from ecs_service
+ assert:
+ that:
+ - ecs_service_creation_vpc_launchtype.failed
+ - 'ecs_service_creation_vpc_launchtype.msg == "botocore needs to be version 1.7.44 or higher to use network configuration"'
+
+ - name: create ecs_service with launchtype and missing network_configuration
+ ecs_service:
+ name: "{{ resource_prefix }}-vpc"
+ cluster: "{{ resource_prefix }}"
+ task_definition: "{{ resource_prefix }}-vpc"
+ desired_count: 1
+ launch_type: FARGATE
+ state: present
+ <<: *aws_connection_info
+ register: ecs_service_creation_vpc_launchtype_nonet
+ ignore_errors: yes
+
+ - name: check that graceful failure message is returned from ecs_service
+ assert:
+ that:
+ - ecs_service_creation_vpc_launchtype_nonet.failed
+ - 'ecs_service_creation_vpc_launchtype_nonet.msg == "launch_type is FARGATE but all of the following are missing: network_configuration"'
+
+ - name: create ecs_task using awsvpc network_configuration
+ ecs_task:
+ cluster: "{{ resource_prefix }}-vpc"
+ task_definition: "{{ resource_prefix }}"
+ operation: run
+ count: 1
+ started_by: me
+ network_configuration:
+ subnets:
+ - subnet-abcd1234
+ security_groups:
+ - sg-abcd1234
+ <<: *aws_connection_info
+ register: ecs_task_creation_vpc
+ ignore_errors: yes
+
+ - name: check that graceful failure message is returned from ecs_task
+ assert:
+ that:
+ - ecs_task_creation_vpc.failed
+ - 'ecs_task_creation_vpc.msg == "botocore needs to be version 1.7.44 or higher to use network configuration"'
+
+
+ always:
+ - name: scale down ecs service
+ ecs_service:
+ name: "{{ resource_prefix }}"
+ cluster: "{{ resource_prefix }}"
+ task_definition: "{{ resource_prefix }}"
+ desired_count: 0
+ state: present
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: pause to wait for scale down
+ pause:
+ seconds: 30
+
+ - name: remove ecs service
+ ecs_service:
+ name: "{{ resource_prefix }}"
+ cluster: "{{ resource_prefix }}"
+ task_definition: "{{ resource_prefix }}"
+ desired_count: 1
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove ecs task definition
+ ecs_taskdefinition:
+ containers:
+ - name: my_container
+ image: ubuntu
+ memory: 128
+ family: "{{ resource_prefix }}"
+ revision: "{{ ecs_taskdefinition_creation.taskdefinition.revision }}"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove ecs task definition vpc
+ ecs_taskdefinition:
+ containers:
+ - name: my_container
+ image: ubuntu
+ memory: 128
+ family: "{{ resource_prefix }}-vpc"
+ revision: "{{ ecs_taskdefinition_creation_vpc.taskdefinition.revision }}"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove ecs cluster
+ ecs_cluster:
+ name: "{{ resource_prefix }}"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/network_force_new_deployment.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/network_force_new_deployment.yml
new file mode 100644
index 00000000..c86e7222
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/network_force_new_deployment.yml
@@ -0,0 +1,124 @@
+- block:
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: True
+
+ - name: create ecs cluster
+ ecs_cluster:
+ name: "{{ resource_prefix }}"
+ state: present
+ <<: *aws_connection_info
+
+ - name: create ecs_taskdefinition
+ ecs_taskdefinition:
+ containers:
+ - name: my_container
+ image: ubuntu
+ memory: 128
+ family: "{{ resource_prefix }}"
+ state: present
+ <<: *aws_connection_info
+ register: ecs_taskdefinition_creation
+
+ # even after deleting the cluster and recreating with a different name
+ # the previous service can prevent the current service from starting
+ # while it's in a draining state. Check the service info and sleep
+ # if the service does not report as inactive.
+
+ - name: check if service is still running from a previous task
+ ecs_service_info:
+ service: "{{ resource_prefix }}"
+ cluster: "{{ resource_prefix }}"
+ details: yes
+ <<: *aws_connection_info
+ register: ecs_service_info_results
+ - name: delay if the service was not inactive
+ debug: var=ecs_service_info_results
+
+ - name: delay if the service was not inactive
+ pause:
+ seconds: 30
+ when:
+ - ecs_service_info_results.services|length >0
+ - ecs_service_info_results.services[0]['status'] != 'INACTIVE'
+
+ - name: create ecs_service
+ ecs_service:
+ name: "{{ resource_prefix }}"
+ cluster: "{{ resource_prefix }}"
+ task_definition: "{{ resource_prefix }}"
+ desired_count: 1
+ state: present
+ <<: *aws_connection_info
+ register: ecs_service_creation
+
+ - name: ecs_service works fine even when older botocore is used
+ assert:
+ that:
+ - ecs_service_creation.changed
+
+ - name: create ecs_service using force_new_deployment
+ ecs_service:
+ name: "{{ resource_prefix }}"
+ cluster: "{{ resource_prefix }}"
+ task_definition: "{{ resource_prefix }}"
+ desired_count: 1
+ force_new_deployment: true
+ state: present
+ <<: *aws_connection_info
+ register: ecs_service_creation_force_new_deploy
+ ignore_errors: yes
+
+ - name: check that module returns success
+ assert:
+ that:
+ - ecs_service_creation_force_new_deploy.changed
+
+ always:
+ - name: scale down ecs service
+ ecs_service:
+ name: "{{ resource_prefix }}"
+ cluster: "{{ resource_prefix }}"
+ task_definition: "{{ resource_prefix }}"
+ desired_count: 0
+ state: present
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: pause to wait for scale down
+ pause:
+ seconds: 30
+
+ - name: remove ecs service
+ ecs_service:
+ name: "{{ resource_prefix }}"
+ cluster: "{{ resource_prefix }}"
+ task_definition: "{{ resource_prefix }}"
+ desired_count: 1
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove ecs task definition
+ ecs_taskdefinition:
+ containers:
+ - name: my_container
+ image: ubuntu
+ memory: 128
+ family: "{{ resource_prefix }}"
+ revision: "{{ ecs_taskdefinition_creation.taskdefinition.revision }}"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove ecs cluster
+ ecs_cluster:
+ name: "{{ resource_prefix }}"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/network_force_new_deployment_fail.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/network_force_new_deployment_fail.yml
new file mode 100644
index 00000000..95e8c576
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_cluster/tasks/network_force_new_deployment_fail.yml
@@ -0,0 +1,125 @@
+- block:
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: True
+
+ - name: create ecs cluster
+ ecs_cluster:
+ name: "{{ resource_prefix }}"
+ state: present
+ <<: *aws_connection_info
+
+ - name: create ecs_taskdefinition
+ ecs_taskdefinition:
+ containers:
+ - name: my_container
+ image: ubuntu
+ memory: 128
+ family: "{{ resource_prefix }}"
+ state: present
+ <<: *aws_connection_info
+ register: ecs_taskdefinition_creation
+
+ # even after deleting the cluster and recreating with a different name
+ # the previous service can prevent the current service from starting
+ # while it's in a draining state. Check the service info and sleep
+ # if the service does not report as inactive.
+
+ - name: check if service is still running from a previous task
+ ecs_service_info:
+ service: "{{ resource_prefix }}"
+ cluster: "{{ resource_prefix }}"
+ details: yes
+ <<: *aws_connection_info
+ register: ecs_service_info_results
+ - name: delay if the service was not inactive
+ debug: var=ecs_service_info_results
+
+ - name: delay if the service was not inactive
+ pause:
+ seconds: 30
+ when:
+ - ecs_service_info_results.services|length >0
+ - ecs_service_info_results.services[0]['status'] != 'INACTIVE'
+
+ - name: create ecs_service
+ ecs_service:
+ name: "{{ resource_prefix }}"
+ cluster: "{{ resource_prefix }}"
+ task_definition: "{{ resource_prefix }}"
+ desired_count: 1
+ state: present
+ <<: *aws_connection_info
+ register: ecs_service_creation
+
+ - name: ecs_service works fine even when older botocore is used
+ assert:
+ that:
+ - ecs_service_creation.changed
+
+ - name: create ecs_service using force_new_deployment
+ ecs_service:
+ name: "{{ resource_prefix }}"
+ cluster: "{{ resource_prefix }}"
+ task_definition: "{{ resource_prefix }}"
+ desired_count: 1
+ force_new_deployment: true
+ state: present
+ <<: *aws_connection_info
+ register: ecs_service_creation_force_new_deploy
+ ignore_errors: yes
+
+ - name: check that graceful failure message is returned from ecs_service
+ assert:
+ that:
+ - ecs_service_creation_force_new_deploy.failed
+ - 'ecs_service_creation_force_new_deploy.msg == "botocore needs to be version 1.8.4 or higher to use force_new_deployment"'
+
+ always:
+ - name: scale down ecs service
+ ecs_service:
+ name: "{{ resource_prefix }}"
+ cluster: "{{ resource_prefix }}"
+ task_definition: "{{ resource_prefix }}"
+ desired_count: 0
+ state: present
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: pause to wait for scale down
+ pause:
+ seconds: 30
+
+ - name: remove ecs service
+ ecs_service:
+ name: "{{ resource_prefix }}"
+ cluster: "{{ resource_prefix }}"
+ task_definition: "{{ resource_prefix }}"
+ desired_count: 1
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove ecs task definition
+ ecs_taskdefinition:
+ containers:
+ - name: my_container
+ image: ubuntu
+ memory: 128
+ family: "{{ resource_prefix }}"
+ revision: "{{ ecs_taskdefinition_creation.taskdefinition.revision }}"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: remove ecs cluster
+ ecs_cluster:
+ name: "{{ resource_prefix }}"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_ecr/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_ecr/aliases
new file mode 100644
index 00000000..6e3860be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_ecr/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_ecr/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_ecr/defaults/main.yml
new file mode 100644
index 00000000..4a912794
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_ecr/defaults/main.yml
@@ -0,0 +1,22 @@
+policy:
+ Version: '2008-10-17'
+ Statement:
+ - Sid: new statement
+ Effect: Allow
+ Principal: "*"
+ Action:
+ - ecr:GetDownloadUrlForLayer
+ - ecr:BatchGetImage
+ - ecr:BatchCheckLayerAvailability
+
+lifecycle_policy:
+ rules:
+ - rulePriority: 1
+ description: new policy
+ selection:
+ tagStatus: untagged
+ countType: sinceImagePushed
+ countUnit: days
+ countNumber: 365
+ action:
+ type: expire
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_ecr/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_ecr/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_ecr/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_ecr/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_ecr/tasks/main.yml
new file mode 100644
index 00000000..f92ba965
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_ecr/tasks/main.yml
@@ -0,0 +1,541 @@
+---
+- module_defaults:
+ group/aws:
+ region: "{{ aws_region }}"
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+
+ block:
+ - set_fact:
+ ecr_name: '{{ resource_prefix }}-ecr'
+
+ - name: When creating with check mode
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ register: result
+ check_mode: yes
+
+ - name: it should skip, change and create
+ assert:
+ that:
+ - result is skipped
+ - result is changed
+ - result.created
+
+
+ - name: When specifying a registry that is inaccessible
+ ecs_ecr:
+ registry_id: 999999999999
+ name: '{{ ecr_name }}'
+ register: result
+ ignore_errors: true
+
+ - name: it should fail with an AccessDeniedException
+ assert:
+ that:
+ - result is failed
+ - '"AccessDeniedException" in result.msg'
+
+
+ - name: When creating a repository
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ register: result
+
+ - name: it should change and create
+ assert:
+ that:
+ - result is changed
+ - result.created
+
+ - name: it should have been configured as mutable by default
+ assert:
+ that:
+ - result.repository.imageTagMutability == "MUTABLE"
+
+
+ - name: When creating a repository that already exists in check mode
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ register: result
+ check_mode: yes
+
+ - name: it should not skip, should not change
+ assert:
+ that:
+ - result is not skipped
+ - result is not changed
+
+
+ - name: When creating a repository that already exists
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ register: result
+
+ - name: it should not change
+ assert:
+ that:
+ - result is not changed
+
+
+ - name: When in check mode, and deleting a policy that does not exist
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ purge_policy: yes
+ register: result
+ check_mode: yes
+
+ - name: it should not skip and not change
+ assert:
+ that:
+ - result is not skipped
+ - result is not changed
+
+
+ - name: When in check mode, setting policy on a repository that has no policy
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ policy: '{{ policy }}'
+ register: result
+ check_mode: yes
+
+ - name: it should skip, change and not create
+ assert:
+ that:
+ - result is skipped
+ - result is changed
+ - not result.created
+
+
+ - name: When setting policy on a repository that has no policy
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ policy: '{{ policy }}'
+ register: result
+
+ - name: it should change and not create
+ assert:
+ that:
+ - result is changed
+ - not result.created
+
+
+ - name: When in check mode, and deleting a policy that exists
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ delete_policy: yes
+ register: result
+ check_mode: yes
+
+ - name: it should skip, change but not create, have deprecations
+ assert:
+ that:
+ - result is skipped
+ - result is changed
+ - not result.created
+ - result.deprecations
+
+
+ - name: When in check mode, and purging a policy that exists
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ purge_policy: yes
+ register: result
+ check_mode: yes
+
+ - name: it should skip, change but not create, no deprecations
+ assert:
+ that:
+ - result is skipped
+ - result is changed
+ - not result.created
+ - result.deprecations is not defined
+
+
+ - name: When purging a policy that exists
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ purge_policy: yes
+ register: result
+
+ - name: it should change and not create
+ assert:
+ that:
+ - result is changed
+ - not result.created
+
+
+ - name: When setting a policy as a string
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ policy: '{{ policy | to_json }}'
+ register: result
+
+ - name: it should change and not create
+ assert:
+ that:
+ - result is changed
+ - not result.created
+
+
+ - name: When setting a policy to its current value
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ policy: '{{ policy }}'
+ register: result
+
+ - name: it should not change
+ assert:
+ that:
+ - result is not changed
+
+ - name: When omitting policy on a repository that has a policy
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ register: result
+
+ - name: it should not change
+ assert:
+ that:
+ - result is not changed
+
+ - name: When specifying both policy and purge_policy
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ policy: '{{ policy }}'
+ purge_policy: yes
+ register: result
+ ignore_errors: true
+
+ - name: it should fail
+ assert:
+ that:
+ - result is failed
+
+
+ - name: When specifying invalid JSON for policy
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ policy: "Ceci n'est pas une JSON"
+ register: result
+ ignore_errors: true
+
+ - name: it should fail
+ assert:
+ that:
+ - result is failed
+
+
+ - name: When in check mode, and purging a lifecycle policy that does not exists
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ purge_lifecycle_policy: yes
+ register: result
+ check_mode: yes
+
+ - name: it should not skip and not change
+ assert:
+ that:
+ - not result is skipped
+ - not result is changed
+
+
+ - name: When in check mode, setting lifecyle policy on a repository that has no policy
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ lifecycle_policy: '{{ lifecycle_policy }}'
+ register: result
+ check_mode: yes
+
+ - name: it should skip, change and not create
+ assert:
+ that:
+ - result is skipped
+ - result is changed
+ - not result.created
+
+
+ - name: When setting lifecycle policy on a repository that has no policy
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ lifecycle_policy: '{{ lifecycle_policy }}'
+ register: result
+
+ - name: it should change and not create
+ assert:
+ that:
+ - result is changed
+ - not result.created
+ - result.lifecycle_policy is defined
+ - result.lifecycle_policy.rules|length == 1
+
+
+ - name: When in check mode, and purging a lifecyle policy that exists
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ purge_lifecycle_policy: yes
+ register: result
+ check_mode: yes
+
+ - name: it should skip, change but not create
+ assert:
+ that:
+ - result is skipped
+ - result is changed
+ - not result.created
+
+
+ - name: When purging a lifecycle policy that exists
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ purge_lifecycle_policy: yes
+ register: result
+
+ - name: it should change and not create
+ assert:
+ that:
+ - result is changed
+ - not result.created
+
+
+ - name: When setting a lifecyle policy as a string
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ lifecycle_policy: '{{ lifecycle_policy | to_json }}'
+ register: result
+
+ - name: it should change and not create
+ assert:
+ that:
+ - result is changed
+ - not result.created
+
+
+ - name: When setting a lifecycle policy to its current value
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ lifecycle_policy: '{{ lifecycle_policy }}'
+ register: result
+
+ - name: it should not change
+ assert:
+ that:
+ - not result is changed
+
+
+ - name: When omitting lifecycle policy on a repository that has a policy
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ register: result
+
+ - name: it should not change
+ assert:
+ that:
+ - not result is changed
+
+
+ - name: When specifying both lifecycle_policy and purge_lifecycle_policy
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ lifecycle_policy: '{{ lifecycle_policy }}'
+ purge_lifecycle_policy: yes
+ register: result
+ ignore_errors: true
+
+ - name: it should fail
+ assert:
+ that:
+ - result is failed
+
+
+ - name: When specifying invalid JSON for lifecycle policy
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ lifecycle_policy: "Ceci n'est pas une JSON"
+ register: result
+ ignore_errors: true
+
+ - name: it should fail
+ assert:
+ that:
+ - result is failed
+
+
+ - name: When specifying an invalid document for lifecycle policy
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ lifecycle_policy:
+ rules:
+ - invalid: "Ceci n'est pas une rule"
+ register: result
+ ignore_errors: true
+
+ - name: it should fail
+ assert:
+ that:
+ - result is failed
+
+
+ - name: When in check mode, deleting a repository that exists
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ state: absent
+ register: result
+ check_mode: yes
+
+ - name: it should skip, change and not create
+ assert:
+ that:
+ - result is skipped
+ - result is changed
+ - not result.created
+
+
+ - name: When deleting a repository that exists
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ state: absent
+ register: result
+
+ - name: it should change
+ assert:
+ that:
+ - result is changed
+
+
+ - name: When in check mode, deleting a repository that does not exist
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ state: absent
+ register: result
+ check_mode: yes
+
+ - name: it should not change
+ assert:
+ that:
+ - result is not skipped
+ - result is not changed
+
+
+ - name: When deleting a repository that does not exist
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ state: absent
+ register: result
+
+ - name: it should not change
+ assert:
+ that:
+ - result is not changed
+
+ - name: When creating an immutable repository
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ image_tag_mutability: immutable
+ register: result
+
+ - name: it should change and create
+ assert:
+ that:
+ - result is changed
+ - result.created
+
+ - name: it should have been configured as immutable
+ assert:
+ that:
+ - result.repository.imageTagMutability == "IMMUTABLE"
+
+
+ - name: When configuring an existing immutable repository to be mutable in check mode
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ image_tag_mutability: mutable
+ register: result
+ check_mode: yes
+
+ - name: it should skip, change and configured mutable
+ assert:
+ that:
+ - result is skipped
+ - result is changed
+ - result.repository.imageTagMutability == "MUTABLE"
+
+ - name: When configuring an existing immutable repository to be mutable
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ image_tag_mutability: mutable
+ register: result
+
+ - name: it should change and configured mutable
+ assert:
+ that:
+ - result is changed
+ - result.repository.imageTagMutability == "MUTABLE"
+
+ - name: When configuring an already mutable repository to be mutable
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ image_tag_mutability: mutable
+ register: result
+
+ - name: it should not change
+ assert:
+ that:
+ - result is not changed
+
+ - name: enable scan on push in check mode
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ scan_on_push: yes
+ check_mode: yes
+ register: result
+
+ - name: it should change
+ assert:
+ that:
+ - result is skipped
+ - result is changed
+
+ - name: enable scan on push
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ scan_on_push: yes
+ register: result
+
+ - name: it should change
+ assert:
+ that:
+ - result is changed
+ - result.repository.imageScanningConfiguration.scanOnPush
+
+ - name: verify enable scan on push
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ scan_on_push: yes
+ register: result
+
+ - name: it should not change
+ assert:
+ that:
+ - result is not changed
+ - result.repository.imageScanningConfiguration.scanOnPush
+
+ - name: disable scan on push
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ scan_on_push: no
+ register: result
+
+ - name: it should change
+ assert:
+ that:
+ - result is changed
+ - not result.repository.imageScanningConfiguration.scanOnPush
+
+ always:
+
+ - name: Delete lingering ECR repository
+ ecs_ecr:
+ name: '{{ ecr_name }}'
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_tag/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_tag/aliases
new file mode 100644
index 00000000..fe51f28b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_tag/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+ecs_tag
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_tag/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_tag/tasks/main.yml
new file mode 100644
index 00000000..78a11837
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/ecs_tag/tasks/main.yml
@@ -0,0 +1,322 @@
+- module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key | default(omit) }}'
+ aws_secret_key: '{{ aws_secret_key | default(omit) }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region | default(omit) }}'
+ collections:
+ - amazon.aws
+ block:
+ - name: create ecs cluster
+ ecs_cluster:
+ name: "{{ resource_prefix }}"
+ state: present
+ register: cluster_info
+
+ - name: create ecs_taskdefinition
+ ecs_taskdefinition:
+ containers:
+ - name: my_container
+ image: ubuntu
+ memory: 128
+ family: "{{ resource_prefix }}"
+ state: present
+ register: ecs_taskdefinition_creation
+
+ # even after deleting the cluster and recreating with a different name
+ # the previous service can prevent the current service from starting
+ # while it's in a draining state. Check the service info and sleep
+ # if the service does not report as inactive.
+
+ - name: check if service is still running from a previous task
+ ecs_service_info:
+ service: "{{ resource_prefix }}"
+ cluster: "{{ resource_prefix }}"
+ details: yes
+ register: ecs_service_info_results
+
+ - name: delay if the service was not inactive
+ pause:
+ seconds: 30
+ when:
+ - ecs_service_info_results.services|length >0
+ - ecs_service_info_results.services[0]['status'] != 'INACTIVE'
+
+ - name: create ecs_service
+ ecs_service:
+ name: "{{ resource_prefix }}"
+ cluster: "{{ resource_prefix }}"
+ task_definition: "{{ resource_prefix }}"
+ desired_count: 1
+ state: present
+ register: ecs_service_creation
+
+ - name: ecs_service up
+ assert:
+ that:
+ - ecs_service_creation.changed
+
+ # Test tagging cluster resource
+
+ - name: cluster tags - Add tags to cluster
+ ecs_tag:
+ cluster_name: "{{resource_prefix}}"
+ resource: "{{resource_prefix}}"
+ resource_type: cluster
+ state: present
+ tags:
+ Name: "{{ resource_prefix }}"
+ another: foobar
+ register: taglist
+
+ - name: cluster tags - tags should be there
+ assert:
+ that:
+ - taglist.changed == true
+ - taglist.added_tags.Name == "{{ resource_prefix }}"
+ - taglist.added_tags.another == "foobar"
+
+ - name: cluster tags - Add tags to cluster again
+ ecs_tag:
+ cluster_name: "{{resource_prefix}}"
+ resource: "{{resource_prefix}}"
+ resource_type: cluster
+ state: present
+ tags:
+ Name: "{{ resource_prefix }}"
+ another: foobar
+ register: taglist
+
+ - name: cluster tags - No change after adding again
+ assert:
+ that:
+ - taglist.changed == false
+
+ - name: cluster tags - List tags
+ ecs_tag:
+ cluster_name: "{{ resource_prefix}}"
+ resource: "{{ resource_prefix}}"
+ resource_type: cluster
+ state: list
+ register: taglist
+
+ - name: cluster tags - should have 2 tags
+ assert:
+ that:
+ - taglist.tags|list|length == 2
+ - taglist.failed == false
+ - taglist.changed == false
+
+ - name: cluster tags - remove tag another
+ ecs_tag:
+ cluster_name: "{{resource_prefix}}"
+ resource: "{{resource_prefix}}"
+ resource_type: cluster
+ state: absent
+ tags:
+ another:
+ register: taglist
+
+ - name: cluster tags - tag another should be gone
+ assert:
+ that:
+ - taglist.changed == true
+ - '"another" not in taglist.tags'
+
+ - name: cluster tags - remove tag when not present
+ ecs_tag:
+ cluster_name: "{{resource_prefix}}"
+ resource: "{{resource_prefix}}"
+ resource_type: cluster
+ state: absent
+ tags:
+ temp:
+ temp_two:
+ register: taglist
+ ignore_errors: yes
+
+ - name: cluster tags - check that there was no fail, but changed is false
+ assert:
+ that:
+ - taglist.failed == false
+ - taglist.changed == false
+
+
+ - name: cluster tags - invalid cluster name
+ ecs_tag:
+ cluster_name: "{{resource_prefix}}-foo"
+ resource: "{{resource_prefix}}-foo"
+ resource_type: cluster
+ state: absent
+ tags:
+ temp:
+ temp_two:
+ register: taglist
+ ignore_errors: yes
+
+ - name: cluster tags - Make sure invalid clustername is handled
+ assert:
+ that:
+ - taglist.failed == true
+ - taglist.changed == false
+ - 'taglist.msg is regex("Failed to find cluster ansible-test-.*-foo")'
+
+ # Test tagging service resource
+
+ - name: services tags - Add name tag
+ ecs_tag:
+ cluster_name: "{{resource_prefix}}"
+ resource: "{{ecs_service_creation.service.serviceName}}"
+ resource_type: service
+ state: present
+ tags:
+ Name: "service-{{resource_prefix}}"
+ register: taglist
+
+ - name: service tag - name tags should be there
+ assert:
+ that:
+ - taglist.changed == true
+ - taglist.added_tags.Name == "service-{{ resource_prefix }}"
+ - taglist.tags.Name == "service-{{ resource_prefix }}"
+
+ - name: services tags - Add name tag again - see no change
+ ecs_tag:
+ cluster_name: "{{resource_prefix}}"
+ resource: "{{ecs_service_creation.service.serviceName}}"
+ resource_type: service
+ state: present
+ tags:
+ Name: "service-{{resource_prefix}}"
+ register: taglist
+
+ - name: service tag - test adding tag twice has no effect
+ assert:
+ that:
+ - taglist.changed == false
+ - taglist.tags.Name == "service-{{ resource_prefix }}"
+
+ - name: service tags - remove service tags
+ ecs_tag:
+ cluster_name: "{{resource_prefix}}"
+ resource: "{{ecs_service_creation.service.serviceName}}"
+ resource_type: service
+ state: absent
+ tags:
+ Name:
+ register: taglist
+
+ - name: service tags - all tags gone
+ assert:
+ that:
+ - taglist.tags|list|length == 0
+ - taglist.changed == true
+ - '"Name" not in taglist.tags'
+
+
+ # Test tagging task_definition resource
+
+ - name: task_definition tags - Add name tag
+ ecs_tag:
+ cluster_name: "{{resource_prefix}}"
+ resource: "{{ecs_taskdefinition_creation.taskdefinition.family}}"
+ resource_type: task_definition
+ state: present
+ tags:
+ Name: "task_definition-{{resource_prefix}}"
+ register: taglist
+
+ - name: task_definition tag - name tags should be there
+ assert:
+ that:
+ - taglist.changed == true
+ - taglist.added_tags.Name == "task_definition-{{ resource_prefix }}"
+ - taglist.tags.Name == "task_definition-{{ resource_prefix }}"
+
+ - name: task_definition tags - Add name tag again - see no change
+ ecs_tag:
+ cluster_name: "{{resource_prefix}}"
+ resource: "{{ecs_taskdefinition_creation.taskdefinition.family}}"
+ resource_type: task_definition
+ state: present
+ tags:
+ Name: "task_definition-{{resource_prefix}}"
+ register: taglist
+
+ - name: task_definition tag - test adding tag twice has no effect
+ assert:
+ that:
+ - taglist.changed == false
+ - taglist.tags.Name == "task_definition-{{ resource_prefix }}"
+
+ - name: task_definition tags - retrieve all tags on a task_definition
+ ecs_tag:
+ cluster_name: "{{resource_prefix}}"
+ resource: "{{ecs_taskdefinition_creation.taskdefinition.family}}"
+ resource_type: task_definition
+ state: list
+ register: taglist
+
+ - name: task_definition tags - should have 1 tag
+ assert:
+ that:
+ - taglist.tags|list|length == 1
+ - taglist.failed == false
+ - taglist.changed == false
+
+ - name: task_definition tags - remove task_definition tags
+ ecs_tag:
+ cluster_name: "{{resource_prefix}}"
+ resource: "{{ecs_taskdefinition_creation.taskdefinition.family}}"
+ resource_type: task_definition
+ state: absent
+ tags:
+ Name:
+ register: taglist
+
+ - name: task_definition tags - all tags gone
+ assert:
+ that:
+ - taglist.tags|list|length == 0
+ - taglist.changed == true
+ - '"Name" not in taglist.tags'
+
+ always:
+ - name: scale down ecs service
+ ecs_service:
+ name: "{{ resource_prefix }}"
+ cluster: "{{ resource_prefix }}"
+ task_definition: "{{ resource_prefix }}"
+ desired_count: 0
+ state: present
+ ignore_errors: yes
+
+ - name: pause to wait for scale down
+ pause:
+ seconds: 30
+
+ - name: remove ecs service
+ ecs_service:
+ name: "{{ resource_prefix }}"
+ cluster: "{{ resource_prefix }}"
+ task_definition: "{{ resource_prefix }}"
+ desired_count: 1
+ state: absent
+ ignore_errors: yes
+
+ - name: remove ecs task definition
+ ecs_taskdefinition:
+ containers:
+ - name: my_container
+ image: ubuntu
+ memory: 128
+ family: "{{ resource_prefix }}"
+ revision: "{{ ecs_taskdefinition_creation.taskdefinition.revision }}"
+ state: absent
+ ignore_errors: yes
+
+ - name: remove ecs cluster
+ ecs_cluster:
+ name: "{{ resource_prefix }}"
+ state: absent
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/efs/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/efs/aliases
new file mode 100644
index 00000000..3f9eda99
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/efs/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+unsupported
+efs_info
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/efs/playbooks/full_test.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/efs/playbooks/full_test.yml
new file mode 100644
index 00000000..e15e5aa4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/efs/playbooks/full_test.yml
@@ -0,0 +1,9 @@
+- hosts: localhost
+ connection: local
+# environment: "{{ ansible_test.environment }}"
+
+ vars:
+ resource_prefix: 'ansible-testing'
+
+ roles:
+ - efs
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/efs/playbooks/roles/efs/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/efs/playbooks/roles/efs/tasks/main.yml
new file mode 100644
index 00000000..c17378fd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/efs/playbooks/roles/efs/tasks/main.yml
@@ -0,0 +1,331 @@
+---
+- name: efs tests
+ collections:
+ - amazon.aws
+
+ block:
+
+ # ============================================================
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: true
+
+ - name: Create VPC for testing
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ cidr_block: 10.22.32.0/23
+ tags:
+ Name: Ansible ec2_instance Testing VPC
+ tenancy: default
+ <<: *aws_connection_info
+ register: testing_vpc
+
+ - name: Create subnet in zone A for testing
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: 10.22.32.0/24
+ az: "{{ aws_region }}a"
+ resource_tags:
+ Name: "{{ resource_prefix }}-subnet-a"
+ <<: *aws_connection_info
+ register: testing_subnet_a
+
+ - name: Create subnet in zone B for testing
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: 10.22.33.0/24
+ az: "{{ aws_region }}b"
+ resource_tags:
+ Name: "{{ resource_prefix }}-subnet-b"
+ <<: *aws_connection_info
+ register: testing_subnet_b
+
+ - name: Get default security group id for vpc
+ ec2_group_info:
+ <<: *aws_connection_info
+ filters:
+ vpc-id: "{{ testing_vpc.vpc.id }}"
+ register: sg_facts
+
+ - set_fact:
+ vpc_default_sg_id: "{{sg_facts.security_groups[0].group_id}}"
+
+
+ # ============================================================
+ - name: Create Efs for testing
+ efs:
+ <<: *aws_connection_info
+ state: present
+ name: "{{ resource_prefix }}-test-efs"
+ tags:
+ Name: "{{ resource_prefix }}-test-tag"
+ Purpose: file-storage
+ targets:
+ - subnet_id: "{{testing_subnet_a.subnet.id}}"
+ - subnet_id: "{{testing_subnet_b.subnet.id}}"
+ throughput_mode: 'bursting'
+ register: created_efs
+
+ # ============================================================
+ - name: Get all EFS Facts
+ efs_info:
+ <<: *aws_connection_info
+ register: efs_result
+
+ - assert:
+ that:
+ - (efs_result.efs | length) >= 1
+
+ # ============================================================
+ - name: Get EFS by creation token
+ efs_info:
+ name: "{{ resource_prefix }}-test-efs"
+ <<: *aws_connection_info
+ register: efs_result
+
+ - set_fact:
+ efs_result_assertions:
+ - efs_result is not changed
+ - (efs_result.efs | length) == 1
+ - efs_result.efs[0].creation_token == "{{ resource_prefix }}-test-efs"
+ - efs_result.efs[0].file_system_id == created_efs.efs.file_system_id
+ - efs_result.efs[0].number_of_mount_targets == 2
+ - (efs_result.efs[0].mount_targets | length) == 2
+ - efs_result.efs[0].name == "{{ resource_prefix }}-test-tag"
+ - efs_result.efs[0].tags.Name == "{{ resource_prefix }}-test-tag"
+ - efs_result.efs[0].tags.Purpose == "file-storage"
+ - efs_result.efs[0].encrypted == false
+ - efs_result.efs[0].life_cycle_state == "available"
+ - efs_result.efs[0].performance_mode == "generalPurpose"
+ - efs_result.efs[0].throughput_mode == "bursting"
+ - efs_result.efs[0].mount_targets[0].security_groups[0] == vpc_default_sg_id
+ - efs_result.efs[0].mount_targets[1].security_groups[0] == vpc_default_sg_id
+
+ - assert:
+ that: "{{efs_result_assertions}}"
+
+ # ============================================================
+ - name: Get EFS by id
+ efs_info:
+ id: "{{created_efs.efs.file_system_id}}"
+ <<: *aws_connection_info
+ register: efs_result
+
+ - assert:
+ that: "{{efs_result_assertions}}"
+
+ # ============================================================
+ - name: Get EFS by tag
+ efs_info:
+ tags:
+ Name: "{{ resource_prefix }}-test-tag"
+ <<: *aws_connection_info
+ register: efs_result
+
+ - assert:
+ that: "{{efs_result_assertions}}"
+
+ # ============================================================
+ - name: Get EFS by target (subnet_id)
+ efs_info:
+ targets:
+ - "{{testing_subnet_a.subnet.id}}"
+ <<: *aws_connection_info
+ register: efs_result
+
+ - assert:
+ that: "{{efs_result_assertions}}"
+
+ # ============================================================
+ - name: Get EFS by target (security_group_id)
+ efs_info:
+ targets:
+ - "{{vpc_default_sg_id}}"
+ <<: *aws_connection_info
+ register: efs_result
+
+ - assert:
+ that: "{{efs_result_assertions}}"
+
+ # ============================================================
+ - name: Get EFS by tag and target
+ efs_info:
+ tags:
+ Name: "{{ resource_prefix }}-test-tag"
+ targets:
+ - "{{testing_subnet_a.subnet.id}}"
+ <<: *aws_connection_info
+ register: efs_result
+
+ - assert:
+ that: "{{efs_result_assertions}}"
+
+ # ============================================================
+ # Not checking efs_result.efs["throughput_mode"] here as
+ # Efs with status "life_cycle_state": "updating" might return the previous values
+ - name: Update Efs to use provisioned throughput_mode
+ efs:
+ <<: *aws_connection_info
+ state: present
+ name: "{{ resource_prefix }}-test-efs"
+ tags:
+ Name: "{{ resource_prefix }}-test-tag"
+ Purpose: file-storage
+ targets:
+ - subnet_id: "{{testing_subnet_a.subnet.id}}"
+ - subnet_id: "{{testing_subnet_b.subnet.id}}"
+ throughput_mode: 'provisioned'
+ provisioned_throughput_in_mibps: 5.0
+ register: efs_result
+
+ - assert:
+ that:
+ - efs_result is changed
+
+ # ============================================================
+ - name: Efs same value for provisioned_throughput_in_mibps
+ efs:
+ <<: *aws_connection_info
+ state: present
+ name: "{{ resource_prefix }}-test-efs"
+ tags:
+ Name: "{{ resource_prefix }}-test-tag"
+ Purpose: file-storage
+ targets:
+ - subnet_id: "{{testing_subnet_a.subnet.id}}"
+ - subnet_id: "{{testing_subnet_b.subnet.id}}"
+ throughput_mode: 'provisioned'
+ provisioned_throughput_in_mibps: 5.0
+ register: efs_result
+
+ - assert:
+ that:
+ - efs_result is not changed
+ - efs_result.efs["throughput_mode"] == "provisioned"
+ - efs_result.efs["provisioned_throughput_in_mibps"] == 5.0
+
+ # ============================================================
+ - name: Efs new value for provisioned_throughput_in_mibps
+ efs:
+ <<: *aws_connection_info
+ state: present
+ name: "{{ resource_prefix }}-test-efs"
+ tags:
+ Name: "{{ resource_prefix }}-test-tag"
+ Purpose: file-storage
+ targets:
+ - subnet_id: "{{testing_subnet_a.subnet.id}}"
+ - subnet_id: "{{testing_subnet_b.subnet.id}}"
+ throughput_mode: 'provisioned'
+ provisioned_throughput_in_mibps: 8.0
+ register: efs_result
+
+ - assert:
+ that:
+ - efs_result is changed
+ - efs_result.efs["provisioned_throughput_in_mibps"] == 8.0
+
+ # ============================================================
+ - name: Check new facts with provisioned mode
+ efs_info:
+ name: "{{ resource_prefix }}-test-efs"
+ <<: *aws_connection_info
+ register: efs_result
+
+ - set_fact:
+ efs_result_assertions:
+ - efs_result is not changed
+ - efs_result.efs[0].throughput_mode == "provisioned"
+ - efs_result.efs[0].provisioned_throughput_in_mibps == 8.0
+ - (efs_result.efs | length) == 1
+ - efs_result.efs[0].creation_token == "{{ resource_prefix }}-test-efs"
+ - efs_result.efs[0].file_system_id == created_efs.efs.file_system_id
+
+ - assert:
+ that: "{{efs_result_assertions}}"
+
+ # ============================================================
+ - name: Query unknown EFS by tag
+ efs_info:
+ tags:
+ Name: "{{ resource_prefix }}-unknown"
+ <<: *aws_connection_info
+ register: efs_result
+
+ - assert:
+ that:
+ - efs_result is not changed
+ - (efs_result.efs | length) == 0
+
+ - name: Query unknown EFS by target
+ efs_info:
+ targets:
+ - sg-00000000000
+ <<: *aws_connection_info
+ register: efs_result
+
+ - assert:
+ that:
+ - efs_result is not changed
+ - (efs_result.efs | length) == 0
+
+ # ============================================================
+ always:
+ - name: Delete EFS used for tests
+ efs:
+ <<: *aws_connection_info
+ state: absent
+ name: "{{ resource_prefix }}-test-efs"
+ tags:
+ Name: "{{ resource_prefix }}-test-tag"
+ Purpose: file-storage
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+ - name: Remove test subnet in zone A
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: 10.22.32.0/24
+ az: "{{ aws_region }}a"
+ resource_tags:
+ Name: "{{ resource_prefix }}-subnet-a"
+ <<: *aws_connection_info
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+ - name: Remove test subnet in zone B
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: 10.22.33.0/24
+ az: "{{ aws_region }}b"
+ resource_tags:
+ Name: "{{ resource_prefix }}-subnet-b"
+ <<: *aws_connection_info
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+ - name: remove the VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ cidr_block: 10.22.32.0/23
+ state: absent
+ <<: *aws_connection_info
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/efs/playbooks/version_fail.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/efs/playbooks/version_fail.yml
new file mode 100644
index 00000000..49c94ae3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/efs/playbooks/version_fail.yml
@@ -0,0 +1,32 @@
+- hosts: localhost
+ connection: local
+ environment: "{{ ansible_test.environment }}"
+ vars:
+ resource_prefix: 'ansible-testing'
+
+ tasks:
+ - block:
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: True
+
+ - name: create efs with provisioned_throughput options (fails gracefully)
+ efs:
+ state: present
+ name: "{{ resource_prefix }}-efs"
+ throughput_mode: 'provisioned'
+ provisioned_throughput_in_mibps: 8.0
+ <<: *aws_connection_info
+ register: efs_provisioned_throughput_creation
+ ignore_errors: yes
+
+ - name: check that graceful error message is returned when creation with throughput_mode and old botocore
+ assert:
+ that:
+ - efs_provisioned_throughput_creation.failed
+ - 'efs_provisioned_throughput_creation.msg == "throughput_mode parameter requires botocore >= 1.10.57"'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/efs/runme.sh b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/efs/runme.sh
new file mode 100755
index 00000000..e4f214b8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/efs/runme.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/env bash
+
+set -eux
+
+export ANSIBLE_ROLES_PATH=../
+
+# Test graceful failure for older versions of botocore
+source virtualenv.sh
+pip install 'botocore<1.10.57' boto3
+ansible-playbook -i ../../inventory -v playbooks/version_fail.yml "$@"
+
+# Run full test suite
+source virtualenv.sh
+pip install 'botocore>=1.10.57' boto3
+ansible-playbook -i ../../inventory -v playbooks/full_test.yml "$@"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/aliases
new file mode 100644
index 00000000..56927195
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/defaults/main.yml
new file mode 100644
index 00000000..8100bd55
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+# load balancer and target group names have to be less than 32 characters
+# the 8 digit identifier at the end of resource_prefix helps determine during which test something
+# was created and allows tests to be run in parallel
+alb_name: "my-alb-{{ resource_prefix | regex_search('([0-9]+)$') }}"
+tg_name: "my-tg-{{ resource_prefix | regex_search('([0-9]+)$') }}"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/meta/main.yml
new file mode 100644
index 00000000..1810d4be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/full_test.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/full_test.yml
new file mode 100644
index 00000000..82aabf1a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/full_test.yml
@@ -0,0 +1,283 @@
+- name: elb_application_lb full_test
+ collections:
+ - amazon.aws
+ block:
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ awscli_connection_info:
+ AWS_ACCESS_KEY_ID: '{{ aws_access_key }}'
+ AWS_SECRET_ACCESS_KEY: '{{ aws_secret_key }}'
+ AWS_SESSION_TOKEN: '{{ security_token }}'
+ AWS_DEFAULT_REGION: '{{ aws_region }}'
+ no_log: true
+ - name: create VPC
+ ec2_vpc_net:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ cidr_block: 10.228.228.0/22
+ name: '{{ resource_prefix }}_vpc'
+ state: present
+ register: vpc
+ - name: create internet gateway
+ ec2_vpc_igw:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ vpc_id: '{{ vpc.vpc.id }}'
+ state: present
+ tags:
+ Name: '{{ resource_prefix }}'
+ register: igw
+ - name: create public subnet
+ ec2_vpc_subnet:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ cidr: '{{ item.cidr }}'
+ az: '{{ aws_region}}{{ item.az }}'
+ vpc_id: '{{ vpc.vpc.id }}'
+ state: present
+ tags:
+ Public: '{{ item.public|string }}'
+ Name: '{{ item.public|ternary(''public'', ''private'') }}-{{ item.az }}'
+ with_items:
+ - cidr: 10.228.228.0/24
+ az: a
+ public: 'True'
+ - cidr: 10.228.229.0/24
+ az: b
+ public: 'True'
+ - cidr: 10.228.230.0/24
+ az: a
+ public: 'False'
+ - cidr: 10.228.231.0/24
+ az: b
+ public: 'False'
+ register: subnets
+ - ec2_vpc_subnet_info:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ filters:
+ vpc-id: '{{ vpc.vpc.id }}'
+ register: vpc_subnets
+ - name: create list of subnet ids
+ set_fact:
+ alb_subnets: '{{ vpc_subnets|community.general.json_query(''subnets[?tags.Public == `True`].id'') }}'
+ private_subnets: '{{ vpc_subnets|community.general.json_query(''subnets[?tags.Public != `True`].id'') }}'
+ - name: create a route table
+ ec2_vpc_route_table:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Name: igw-route
+ Created: '{{ resource_prefix }}'
+ subnets: '{{ alb_subnets + private_subnets }}'
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: '{{ igw.gateway_id }}'
+ register: route_table
+ - ec2_group:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ name: '{{ resource_prefix }}'
+ description: security group for Ansible ALB integration tests
+ state: present
+ vpc_id: '{{ vpc.vpc.id }}'
+ rules:
+ - proto: tcp
+ from_port: 1
+ to_port: 65535
+ cidr_ip: 0.0.0.0/0
+ register: sec_group
+ - name: create a target group for testing
+ elb_target_group:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ name: '{{ tg_name }}'
+ protocol: http
+ port: 80
+ vpc_id: '{{ vpc.vpc.id }}'
+ state: present
+ register: tg
+ - name: create privatekey for testing
+ community.crypto.openssl_privatekey:
+ path: ./ansible_alb_test.pem
+ size: 2048
+ - name: create csr for cert
+ community.crypto.openssl_csr:
+ path: ./ansible_alb_test.csr
+ privatekey_path: ./ansible_alb_test.pem
+ C: US
+ ST: AnyPrincipality
+ L: AnyTown
+ O: AnsibleIntegrationTest
+ OU: Test
+ CN: ansible-alb-test.example.com
+ - name: create certificate
+ community.crypto.openssl_certificate:
+ path: ./ansible_alb_test.crt
+ privatekey_path: ./ansible_alb_test.pem
+ csr_path: ./ansible_alb_test.csr
+ provider: selfsigned
+ - name: upload server cert to iam
+ iam_cert:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ name: '{{ alb_name }}'
+ state: present
+ cert: ./ansible_alb_test.crt
+ key: ./ansible_alb_test.pem
+ register: cert_upload
+ - name: register certificate arn to acm_arn fact
+ set_fact:
+ cert_arn: '{{ cert_upload.arn }}'
+ - include_tasks: test_alb_bad_listener_options.yml
+ - include_tasks: test_alb_tags.yml
+ - include_tasks: test_creating_alb.yml
+ - include_tasks: test_alb_with_asg.yml
+ - include_tasks: test_modifying_alb_listeners.yml
+ - include_tasks: test_deleting_alb.yml
+ - include_tasks: test_multiple_actions.yml
+ always:
+ - name: destroy ALB
+ elb_application_lb:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ name: '{{ alb_name }}'
+ state: absent
+ wait: true
+ wait_timeout: 600
+ ignore_errors: true
+ - name: destroy target group if it was created
+ elb_target_group:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ name: '{{ tg_name }}'
+ protocol: http
+ port: 80
+ vpc_id: '{{ vpc.vpc.id }}'
+ state: absent
+ wait: true
+ wait_timeout: 600
+ register: remove_tg
+ retries: 5
+ delay: 3
+ until: remove_tg is success
+ when: tg is defined
+ ignore_errors: true
+ - name: destroy acm certificate
+ iam_cert:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ name: '{{ alb_name }}'
+ state: absent
+ register: remove_cert
+ retries: 5
+ delay: 3
+ until: remove_cert is success
+ when: cert_arn is defined
+ ignore_errors: true
+ - name: destroy sec group
+ ec2_group:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ name: '{{ sec_group.group_name }}'
+ description: security group for Ansible ALB integration tests
+ state: absent
+ vpc_id: '{{ vpc.vpc.id }}'
+ register: remove_sg
+ retries: 10
+ delay: 5
+ until: remove_sg is success
+ ignore_errors: true
+ - name: remove route table
+ ec2_vpc_route_table:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ vpc_id: '{{ vpc.vpc.id }}'
+ route_table_id: '{{ route_table.route_table.route_table_id }}'
+ lookup: id
+ state: absent
+ register: remove_rt
+ retries: 10
+ delay: 5
+ until: remove_rt is success
+ ignore_errors: true
+ - name: destroy subnets
+ ec2_vpc_subnet:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ cidr: '{{ item.cidr }}'
+ vpc_id: '{{ vpc.vpc.id }}'
+ state: absent
+ register: remove_subnet
+ retries: 10
+ delay: 5
+ until: remove_subnet is success
+ with_items:
+ - cidr: 10.228.228.0/24
+ - cidr: 10.228.229.0/24
+ - cidr: 10.228.230.0/24
+ - cidr: 10.228.231.0/24
+ ignore_errors: true
+ - name: destroy internet gateway
+ ec2_vpc_igw:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Name: '{{ resource_prefix }}'
+ state: absent
+ register: remove_igw
+ retries: 10
+ delay: 5
+ until: remove_igw is success
+ ignore_errors: true
+ - name: destroy VPC
+ ec2_vpc_net:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ cidr_block: 10.228.228.0/22
+ name: '{{ resource_prefix }}_vpc'
+ state: absent
+ register: remove_vpc
+ retries: 10
+ delay: 5
+ until: remove_vpc is success
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/main.yml
new file mode 100644
index 00000000..425469bc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/main.yml
@@ -0,0 +1,44 @@
+- set_fact:
+ virtualenv: "{{ remote_tmp_dir }}/virtualenv"
+ virtualenv_command: "{{ ansible_python_interpreter }} -m virtualenv"
+
+- set_fact:
+ virtualenv_interpreter: "{{ virtualenv }}/bin/python"
+
+- pip:
+ name: virtualenv
+
+- pip:
+ name:
+ - 'botocore<1.10.30'
+ - boto3
+ - boto
+ - coverage<5
+ - cryptography
+ virtualenv: "{{ virtualenv }}"
+ virtualenv_command: "{{ virtualenv_command }}"
+ virtualenv_site_packages: no
+
+- include_tasks: multiple_actions_fail.yml
+ vars:
+ ansible_python_interpreter: "{{ virtualenv_interpreter }}"
+
+
+- pip:
+ name:
+ - 'botocore>=1.10.30'
+ - boto3
+ - boto
+ - coverage<5
+ - cryptography
+ virtualenv: "{{ virtualenv }}"
+ virtualenv_command: "{{ virtualenv_command }}"
+ virtualenv_site_packages: no
+
+- include_tasks: full_test.yml
+ vars:
+ ansible_python_interpreter: "{{ virtualenv_interpreter }}"
+
+- file:
+ path: "{{ virtualenv }}"
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/multiple_actions_fail.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/multiple_actions_fail.yml
new file mode 100644
index 00000000..56b6a194
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/multiple_actions_fail.yml
@@ -0,0 +1,277 @@
+- name: elb_application_lb multiple_actions_fail tests
+ collections:
+ - amazon.aws
+ block:
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ awscli_connection_info:
+ AWS_ACCESS_KEY_ID: '{{ aws_access_key }}'
+ AWS_SECRET_ACCESS_KEY: '{{ aws_secret_key }}'
+ AWS_SESSION_TOKEN: '{{ security_token }}'
+ AWS_DEFAULT_REGION: '{{ aws_region }}'
+ no_log: true
+ - name: create VPC
+ ec2_vpc_net:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ cidr_block: 10.228.228.0/22
+ name: '{{ resource_prefix }}_vpc'
+ state: present
+ register: vpc
+ - name: create internet gateway
+ ec2_vpc_igw:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ vpc_id: '{{ vpc.vpc.id }}'
+ state: present
+ tags:
+ Name: '{{ resource_prefix }}'
+ register: igw
+ - name: create public subnet
+ ec2_vpc_subnet:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ cidr: '{{ item.cidr }}'
+ az: '{{ aws_region}}{{ item.az }}'
+ vpc_id: '{{ vpc.vpc.id }}'
+ state: present
+ tags:
+ Public: '{{ item.public|string }}'
+ Name: '{{ item.public|ternary(''public'', ''private'') }}-{{ item.az }}'
+ with_items:
+ - cidr: 10.228.228.0/24
+ az: a
+ public: 'True'
+ - cidr: 10.228.229.0/24
+ az: b
+ public: 'True'
+ - cidr: 10.228.230.0/24
+ az: a
+ public: 'False'
+ - cidr: 10.228.231.0/24
+ az: b
+ public: 'False'
+ register: subnets
+ - ec2_vpc_subnet_facts:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ filters:
+ vpc-id: '{{ vpc.vpc.id }}'
+ register: vpc_subnets
+ - name: create list of subnet ids
+ set_fact:
+ alb_subnets: '{{ vpc_subnets|community.general.json_query(''subnets[?tags.Public == `True`].id'') }}'
+ private_subnets: '{{ vpc_subnets|community.general.json_query(''subnets[?tags.Public != `True`].id'') }}'
+ - name: create a route table
+ ec2_vpc_route_table:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Name: igw-route
+ Created: '{{ resource_prefix }}'
+ subnets: '{{ alb_subnets + private_subnets }}'
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: '{{ igw.gateway_id }}'
+ register: route_table
+ - ec2_group:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ name: '{{ resource_prefix }}'
+ description: security group for Ansible ALB integration tests
+ state: present
+ vpc_id: '{{ vpc.vpc.id }}'
+ rules:
+ - proto: tcp
+ from_port: 1
+ to_port: 65535
+ cidr_ip: 0.0.0.0/0
+ register: sec_group
+ - name: create a target group for testing
+ elb_target_group:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ name: '{{ tg_name }}'
+ protocol: http
+ port: 80
+ vpc_id: '{{ vpc.vpc.id }}'
+ state: present
+ register: tg
+ - name: create privatekey for testing
+ community.crypto.openssl_privatekey:
+ path: ./ansible_alb_test.pem
+ size: 2048
+ - name: create csr for cert
+ community.crypto.openssl_csr:
+ path: ./ansible_alb_test.csr
+ privatekey_path: ./ansible_alb_test.pem
+ C: US
+ ST: AnyPrincipality
+ L: AnyTown
+ O: AnsibleIntegrationTest
+ OU: Test
+ CN: ansible-alb-test.example.com
+ - name: create certificate
+ community.crypto.openssl_certificate:
+ path: ./ansible_alb_test.crt
+ privatekey_path: ./ansible_alb_test.pem
+ csr_path: ./ansible_alb_test.csr
+ provider: selfsigned
+ - name: upload server cert to iam
+ iam_cert:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ name: '{{ alb_name }}'
+ state: present
+ cert: ./ansible_alb_test.crt
+ key: ./ansible_alb_test.pem
+ register: cert_upload
+ - name: register certificate arn to acm_arn fact
+ set_fact:
+ cert_arn: '{{ cert_upload.arn }}'
+ - include_tasks: test_multiple_actions_fail.yml
+ always:
+ - name: destroy ALB
+ elb_application_lb:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ name: '{{ alb_name }}'
+ state: absent
+ wait: true
+ wait_timeout: 600
+ ignore_errors: true
+ - name: destroy target group if it was created
+ elb_target_group:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ name: '{{ tg_name }}'
+ protocol: http
+ port: 80
+ vpc_id: '{{ vpc.vpc.id }}'
+ state: absent
+ wait: true
+ wait_timeout: 600
+ register: remove_tg
+ retries: 10
+ delay: 5
+ until: remove_tg is success
+ when: tg is defined
+ ignore_errors: true
+ - name: destroy acm certificate
+ iam_cert:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ name: '{{ alb_name }}'
+ state: absent
+ register: remove_cert
+ retries: 10
+ delay: 5
+ until: remove_cert is success
+ when: cert_arn is defined
+ ignore_errors: true
+ - name: destroy sec group
+ ec2_group:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ name: '{{ sec_group.group_name }}'
+ description: security group for Ansible ALB integration tests
+ state: absent
+ vpc_id: '{{ vpc.vpc.id }}'
+ register: remove_sg
+ retries: 10
+ delay: 5
+ until: remove_sg is success
+ ignore_errors: true
+ - name: remove route table
+ ec2_vpc_route_table:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ vpc_id: '{{ vpc.vpc.id }}'
+ route_table_id: '{{ route_table.route_table.route_table_id }}'
+ lookup: id
+ state: absent
+ register: remove_rt
+ retries: 10
+ delay: 5
+ until: remove_rt is success
+ ignore_errors: true
+ - name: destroy subnets
+ ec2_vpc_subnet:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ cidr: '{{ item.cidr }}'
+ vpc_id: '{{ vpc.vpc.id }}'
+ state: absent
+ register: remove_subnet
+ retries: 10
+ delay: 5
+ until: remove_subnet is success
+ with_items:
+ - cidr: 10.228.228.0/24
+ - cidr: 10.228.229.0/24
+ - cidr: 10.228.230.0/24
+ - cidr: 10.228.231.0/24
+ ignore_errors: true
+ - name: destroy internet gateway
+ ec2_vpc_igw:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Name: '{{ resource_prefix }}'
+ state: absent
+ register: remove_igw
+ retries: 10
+ delay: 5
+ until: remove_igw is success
+ ignore_errors: true
+ - name: destroy VPC
+ ec2_vpc_net:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ cidr_block: 10.228.228.0/22
+ name: '{{ resource_prefix }}_vpc'
+ state: absent
+ register: remove_vpc
+ retries: 10
+ delay: 5
+ until: remove_vpc is success
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_alb_bad_listener_options.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_alb_bad_listener_options.yml
new file mode 100644
index 00000000..821ad36d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_alb_bad_listener_options.yml
@@ -0,0 +1,71 @@
+- block:
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: test creating an ALB with invalid listener options
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ listeners:
+ - Protocol: HTTPS
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ <<: *aws_connection_info
+ ignore_errors: yes
+ register: alb
+
+ - assert:
+ that:
+ - alb is failed
+ - alb.msg.startswith("'SslPolicy' is a required listener dict key when Protocol = HTTPS")
+
+ - name: test creating an ALB without providing required listener options
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ listeners:
+ - Port: 80
+ <<: *aws_connection_info
+ ignore_errors: yes
+ register: alb
+
+ - assert:
+ that:
+ - alb is failed
+ - '"missing required arguments" in alb.msg'
+ - '"Protocol" in alb.msg'
+ - '"DefaultActions" in alb.msg'
+
+ - name: test creating an ALB providing an invalid listener option type
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: "bad type"
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ <<: *aws_connection_info
+ ignore_errors: yes
+ register: alb
+
+ - assert:
+ that:
+ - alb is failed
+ - "'unable to convert to int' in alb.msg"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_alb_tags.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_alb_tags.yml
new file mode 100644
index 00000000..b7942fa7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_alb_tags.yml
@@ -0,0 +1,93 @@
+- block:
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: create ALB with no listeners
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ <<: *aws_connection_info
+ register: alb
+
+ - assert:
+ that:
+ - alb.changed
+
+ - name: re-create ALB with no listeners
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ <<: *aws_connection_info
+ register: alb
+
+ - assert:
+ that:
+ - not alb.changed
+
+ - name: add tags to ALB
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ tags:
+ created_by: "ALB test {{ resource_prefix }}"
+ <<: *aws_connection_info
+ register: alb
+
+ - assert:
+ that:
+ - alb.changed
+ - 'alb.tags == {"created_by": "ALB test {{ resource_prefix }}"}'
+
+ - name: remove tags from ALB
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ tags: {}
+ <<: *aws_connection_info
+ register: alb
+
+ - assert:
+ that:
+ - alb.changed
+ - not alb.tags
+
+ - name: test idempotence
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ tags: {}
+ <<: *aws_connection_info
+ register: alb
+
+ - assert:
+ that:
+ - not alb.changed
+ - not alb.tags
+
+ - name: destroy ALB with no listeners
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ state: absent
+ <<: *aws_connection_info
+ register: alb
+
+ - assert:
+ that:
+ - alb.changed
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_alb_with_asg.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_alb_with_asg.yml
new file mode 100644
index 00000000..de97d5bd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_alb_with_asg.yml
@@ -0,0 +1,89 @@
+- block:
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - ec2_ami_info:
+ <<: *aws_connection_info
+ filters:
+ architecture: x86_64
+ virtualization-type: hvm
+ root-device-type: ebs
+ name: "amzn-ami-hvm*"
+ owner-alias: "amazon"
+ register: amis
+
+ - set_fact:
+ latest_amazon_linux: "{{ amis.images | sort(attribute='creation_date') | last }}"
+
+ - ec2_asg:
+ <<: *aws_connection_info
+ state: absent
+ name: "{{ resource_prefix }}-webservers"
+ wait_timeout: 900
+
+ - ec2_lc:
+ <<: *aws_connection_info
+ name: "{{ resource_prefix }}-web-lcfg"
+ state: absent
+
+ - name: Create launch config for testing
+ ec2_lc:
+ <<: *aws_connection_info
+ name: "{{ resource_prefix }}-web-lcfg"
+ assign_public_ip: true
+ image_id: "{{ latest_amazon_linux.image_id }}"
+ security_groups: "{{ sec_group.group_id }}"
+ instance_type: t2.medium
+ user_data: |
+ #!/bin/bash
+ set -x
+ yum update -y --nogpgcheck
+ yum install -y --nogpgcheck httpd
+ echo "Hello Ansiblings!" >> /var/www/html/index.html
+ service httpd start
+ volumes:
+ - device_name: /dev/xvda
+ volume_size: 10
+ volume_type: gp2
+ delete_on_termination: true
+
+ - name: Create autoscaling group for app server fleet
+ ec2_asg:
+ <<: *aws_connection_info
+ name: "{{ resource_prefix }}-webservers"
+ vpc_zone_identifier: "{{ alb_subnets }}"
+ launch_config_name: "{{ resource_prefix }}-web-lcfg"
+ termination_policies:
+ - OldestLaunchConfiguration
+ - Default
+ health_check_period: 600
+ health_check_type: EC2
+ replace_all_instances: true
+ min_size: 0
+ max_size: 2
+ desired_capacity: 1
+ wait_for_instances: true
+ target_group_arns:
+ - "{{ tg.target_group_arn }}"
+
+ always:
+
+ - ec2_asg:
+ <<: *aws_connection_info
+ state: absent
+ name: "{{ resource_prefix }}-webservers"
+ wait_timeout: 900
+ ignore_errors: yes
+
+ - ec2_lc:
+ <<: *aws_connection_info
+ name: "{{ resource_prefix }}-web-lcfg"
+ state: absent
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_creating_alb.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_creating_alb.yml
new file mode 100644
index 00000000..ee932d4e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_creating_alb.yml
@@ -0,0 +1,52 @@
+- block:
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: create ALB with a listener
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ <<: *aws_connection_info
+ register: alb
+
+ - assert:
+ that:
+ - alb.changed
+ - alb.listeners|length == 1
+ - alb.listeners[0].rules|length == 1
+
+ - name: test idempotence creating ALB with a listener
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ <<: *aws_connection_info
+ register: alb
+
+ - assert:
+ that:
+ - not alb.changed
+ - alb.listeners|length == 1
+ - alb.listeners[0].rules|length == 1
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_deleting_alb.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_deleting_alb.yml
new file mode 100644
index 00000000..34e278cb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_deleting_alb.yml
@@ -0,0 +1,52 @@
+- block:
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: destroy ALB with listener
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: absent
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ <<: *aws_connection_info
+ wait: yes
+ wait_timeout: 300
+ register: alb
+
+ - assert:
+ that:
+ - alb.changed
+
+ - name: test idempotence
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: absent
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ <<: *aws_connection_info
+ wait: yes
+ wait_timeout: 300
+ register: alb
+
+ - assert:
+ that:
+ - not alb.changed
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_modifying_alb_listeners.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_modifying_alb_listeners.yml
new file mode 100644
index 00000000..3e4765b1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_modifying_alb_listeners.yml
@@ -0,0 +1,240 @@
+- block:
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: add a rule to the listener
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ Rules:
+ - Conditions:
+ - Field: path-pattern
+ Values:
+ - '/test'
+ Priority: '1'
+ Actions:
+ - TargetGroupName: "{{ tg_name }}"
+ Type: forward
+ <<: *aws_connection_info
+ register: alb
+
+ - assert:
+ that:
+ - alb.changed
+ - alb.listeners[0].rules|length == 2
+
+ - name: test replacing the rule with one with the same priority
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ purge_listeners: true
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ Rules:
+ - Conditions:
+ - Field: path-pattern
+ Values:
+ - '/new'
+ Priority: '1'
+ Actions:
+ - TargetGroupName: "{{ tg_name }}"
+ Type: forward
+ <<: *aws_connection_info
+ register: alb
+
+ - assert:
+ that:
+ - alb.changed
+ - alb.listeners[0].rules|length == 2
+
+ - name: test the rule will not be removed without purge_listeners
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ <<: *aws_connection_info
+ register: alb
+
+ - assert:
+ that:
+ - not alb.changed
+ - alb.listeners[0].rules|length == 2
+
+ - name: test a rule can be added and other rules will not be removed when purge_rules is no.
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ purge_rules: no
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ Rules:
+ - Conditions:
+ - Field: path-pattern
+ Values:
+ - '/new'
+ Priority: '2'
+ Actions:
+ - TargetGroupName: "{{ tg_name }}"
+ Type: forward
+ <<: *aws_connection_info
+ register: alb
+
+ - assert:
+ that:
+ - alb.changed
+ - alb.listeners[0].rules|length == 3
+
+ - name: add a rule that uses the host header condition to the listener
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ purge_rules: no
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ Rules:
+ - Conditions:
+ - Field: host-header
+ Values:
+ - 'local.mydomain.com'
+ Priority: '3'
+ Actions:
+ - TargetGroupName: "{{ tg_name }}"
+ Type: forward
+ <<: *aws_connection_info
+ register: alb
+
+ - assert:
+ that:
+ - alb.changed
+ - alb.listeners[0].rules|length == 4
+ - '{{ alb|community.general.json_query("listeners[].rules[].conditions[].host_header_config.values[]")|length == 1 }}'
+
+ - name: test replacing the rule that uses the host header condition with multiple host header conditions
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ purge_rules: no
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ Rules:
+ - Conditions:
+ - Field: host-header
+ Values:
+ - 'local.mydomain.com'
+ - 'alternate.mydomain.com'
+ Priority: '3'
+ Actions:
+ - TargetGroupName: "{{ tg_name }}"
+ Type: forward
+ <<: *aws_connection_info
+ register: alb
+
+ - assert:
+ that:
+ - alb.changed
+ - alb.listeners[0].rules|length == 4
+ - '{{ alb|community.general.json_query("listeners[].rules[].conditions[].host_header_config.values[]")|length == 2 }}'
+
+ - name: remove the rule
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ purge_listeners: true
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ Rules: []
+ <<: *aws_connection_info
+ register: alb
+
+ - assert:
+ that:
+ - alb.changed
+ - alb.listeners[0].rules|length == 1
+
+ - name: remove listener from ALB
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ listeners: []
+ <<: *aws_connection_info
+ register: alb
+
+ - assert:
+ that:
+ - alb.changed
+ - not alb.listeners
+
+ - name: add the listener to the ALB
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ <<: *aws_connection_info
+ register: alb
+
+ - assert:
+ that:
+ - alb.changed
+ - alb.listeners|length == 1
+ - alb.availability_zones|length == 2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_multiple_actions.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_multiple_actions.yml
new file mode 100644
index 00000000..6223270c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_multiple_actions.yml
@@ -0,0 +1,467 @@
+- block:
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: register dummy OIDC config
+ set_fact:
+ AuthenticateOidcActionConfig:
+ AuthorizationEndpoint: "https://www.example.com/auth"
+ ClientId: "eeeeeeeeeeeeeeeeeeeeeeeeee"
+ ClientSecret: "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"
+ Issuer: "https://www.example.com/issuer"
+ OnUnauthenticatedRequest: "authenticate"
+ Scope: "openid"
+ SessionCookieName: "AWSELBAuthSessionCookie"
+ SessionTimeout: 604800
+ TokenEndpoint: "https://www.example.com/token"
+ UserInfoEndpoint: "https://www.example.com/userinfo"
+ UseExistingClientSecret: true
+
+ - name: register fixed response action
+ set_fact:
+ FixedResponseActionConfig:
+ ContentType: "text/plain"
+ MessageBody: "This is the page you're looking for"
+ StatusCode: "200"
+
+ - name: register redirect action
+ set_fact:
+ RedirectActionConfig:
+ Host: "#{host}"
+ Path: "/example/redir" # or /#{path}
+ Port: "#{port}"
+ Protocol: "#{protocol}"
+ Query: "#{query}"
+ StatusCode: "HTTP_302" # or HTTP_301
+
+ - name: delete existing ALB to avoid target group association issues
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ state: absent
+ <<: *aws_connection_info
+ wait: yes
+ wait_timeout: 600
+
+ - name: cleanup tg to avoid target group association issues
+ elb_target_group:
+ name: "{{ tg_name }}"
+ protocol: http
+ port: 80
+ vpc_id: "{{ vpc.vpc.id }}"
+ state: absent
+ wait: yes
+ wait_timeout: 600
+ <<: *aws_connection_info
+ register: cleanup_tg
+ retries: 5
+ delay: 3
+ until: cleanup_tg is success
+
+ - name: recreate a target group
+ elb_target_group:
+ name: "{{ tg_name }}"
+ protocol: http
+ port: 80
+ vpc_id: "{{ vpc.vpc.id }}"
+ state: present
+ <<: *aws_connection_info
+ register: tg
+
+ - name: create ALB with redirect DefaultAction
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ listeners:
+ - Protocol: HTTPS
+ Port: 443
+ DefaultActions:
+ - Type: redirect
+ RedirectConfig: "{{ RedirectActionConfig }}"
+ Certificates:
+ - CertificateArn: "{{ cert_arn }}"
+ SslPolicy: ELBSecurityPolicy-2016-08
+ <<: *aws_connection_info
+ register: alb
+
+ - assert:
+ that:
+ - alb.changed
+ - alb.listeners|length == 1
+ - alb.listeners[0].rules[0].actions|length == 1
+ - alb.listeners[0].rules[0].actions[0].type == "redirect"
+
+ - name: test idempotence with redirect DefaultAction
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ listeners:
+ - Protocol: HTTPS
+ Port: 443
+ DefaultActions:
+ - Type: redirect
+ RedirectConfig: "{{ RedirectActionConfig }}"
+ Certificates:
+ - CertificateArn: "{{ cert_arn }}"
+ SslPolicy: ELBSecurityPolicy-2016-08
+ <<: *aws_connection_info
+ register: alb
+
+ - assert:
+ that:
+ - not alb.changed
+ - alb.listeners|length == 1
+ - alb.listeners[0].rules[0].actions|length == 1
+ - alb.listeners[0].rules[0].actions[0].type == "redirect"
+
+ - name: update ALB with fixed-response DefaultAction
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ listeners:
+ - Protocol: HTTPS
+ Port: 443
+ DefaultActions:
+ - Type: fixed-response
+ FixedResponseConfig: "{{ FixedResponseActionConfig }}"
+ Certificates:
+ - CertificateArn: "{{ cert_arn }}"
+ SslPolicy: ELBSecurityPolicy-2016-08
+ <<: *aws_connection_info
+ register: alb
+
+ - assert:
+ that:
+ - alb.changed
+ - alb.listeners|length == 1
+ - alb.listeners[0].rules[0].actions|length == 1
+ - alb.listeners[0].rules[0].actions[0].type == "fixed-response"
+
+ - name: test idempotence with fixed-response DefaultAction
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ listeners:
+ - Protocol: HTTPS
+ Port: 443
+ DefaultActions:
+ - Type: fixed-response
+ FixedResponseConfig: "{{ FixedResponseActionConfig }}"
+ Certificates:
+ - CertificateArn: "{{ cert_arn }}"
+ SslPolicy: ELBSecurityPolicy-2016-08
+ <<: *aws_connection_info
+ register: alb
+
+ - assert:
+ that:
+ - not alb.changed
+ - alb.listeners|length == 1
+ - alb.listeners[0].rules[0].actions|length == 1
+ - alb.listeners[0].rules[0].actions[0].type == "fixed-response"
+
+ - name: test multiple non-default rules
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ listeners:
+ - Protocol: HTTPS
+ Port: 443
+ DefaultActions:
+ - Type: fixed-response
+ FixedResponseConfig: "{{ FixedResponseActionConfig }}"
+ Certificates:
+ - CertificateArn: "{{ cert_arn }}"
+ SslPolicy: ELBSecurityPolicy-2016-08
+ Rules:
+ - Conditions:
+ - Field: path-pattern
+ Values:
+ - "/forward-path/*"
+ Priority: 1
+ Actions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ - Conditions:
+ - Field: path-pattern
+ Values:
+ - "/redirect-path/*"
+ Priority: 2
+ Actions:
+ - Type: redirect
+ RedirectConfig: "{{ RedirectActionConfig }}"
+ - Conditions:
+ - Field: path-pattern
+ Values:
+ - "/fixed-response-path/"
+ Priority: 3
+ Actions:
+ - Type: fixed-response
+ FixedResponseConfig: "{{ FixedResponseActionConfig }}"
+ <<: *aws_connection_info
+ register: alb
+
+ - assert:
+ that:
+ - alb.changed
+ - alb.listeners|length == 1
+ - alb.listeners[0].rules|length == 4 ## defaultactions is included as a rule
+ - alb.listeners[0].rules[0].actions|length == 1
+ - alb.listeners[0].rules[0].actions[0].type == "forward"
+ - alb.listeners[0].rules[1].actions|length == 1
+ - alb.listeners[0].rules[1].actions[0].type == "redirect"
+ - alb.listeners[0].rules[2].actions|length == 1
+ - alb.listeners[0].rules[2].actions[0].type == "fixed-response"
+
+ - name: test idempotence multiple non-default rules
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ listeners:
+ - Protocol: HTTPS
+ Port: 443
+ DefaultActions:
+ - Type: fixed-response
+ FixedResponseConfig: "{{ FixedResponseActionConfig }}"
+ Certificates:
+ - CertificateArn: "{{ cert_arn }}"
+ SslPolicy: ELBSecurityPolicy-2016-08
+ Rules:
+ - Conditions:
+ - Field: path-pattern
+ Values:
+ - "/forward-path/*"
+ Priority: 1
+ Actions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ - Conditions:
+ - Field: path-pattern
+ Values:
+ - "/redirect-path/*"
+ Priority: 2
+ Actions:
+ - Type: redirect
+ RedirectConfig: "{{ RedirectActionConfig }}"
+ - Conditions:
+ - Field: path-pattern
+ Values:
+ - "/fixed-response-path/"
+ Priority: 3
+ Actions:
+ - Type: fixed-response
+ FixedResponseConfig: "{{ FixedResponseActionConfig }}"
+ <<: *aws_connection_info
+ register: alb
+
+ - assert:
+ that:
+ - not alb.changed
+ - alb.listeners|length == 1
+ - alb.listeners[0].rules|length == 4 ## defaultactions is included as a rule
+ - alb.listeners[0].rules[0].actions|length == 1
+ - alb.listeners[0].rules[0].actions[0].type == "forward"
+ - alb.listeners[0].rules[1].actions|length == 1
+ - alb.listeners[0].rules[1].actions[0].type == "redirect"
+ - alb.listeners[0].rules[2].actions|length == 1
+ - alb.listeners[0].rules[2].actions[0].type == "fixed-response"
+
+
+# - name: test creating ALB with a default listener with multiple actions
+# elb_application_lb:
+# name: "{{ alb_name }}"
+# subnets: "{{ alb_subnets }}"
+# security_groups: "{{ sec_group.group_id }}"
+# state: present
+# listeners:
+# - Protocol: HTTPS
+# Port: 443
+# DefaultActions:
+# - Type: forward
+# TargetGroupName: "{{ tg_name }}"
+# Order: 2
+# - Type: authenticate-oidc
+# AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}"
+# Order: 1
+# Certificates:
+# - CertificateArn: "{{ cert_arn }}"
+# SslPolicy: ELBSecurityPolicy-2016-08
+# <<: *aws_connection_info
+# register: alb
+#
+# - assert:
+# that:
+# - alb.listeners|length == 1
+# - alb.listeners[0].rules[0].actions|length == 2
+#
+# - name: test changing order of actions
+# elb_application_lb:
+# name: "{{ alb_name }}"
+# subnets: "{{ alb_subnets }}"
+# security_groups: "{{ sec_group.group_id }}"
+# state: present
+# listeners:
+# - Protocol: HTTPS
+# Port: 443
+# DefaultActions:
+# - Type: authenticate-oidc
+# AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}"
+# Order: 1
+# - Type: forward
+# TargetGroupName: "{{ tg_name }}"
+# Order: 2
+# Certificates:
+# - CertificateArn: "{{ cert_arn }}"
+# SslPolicy: ELBSecurityPolicy-2016-08
+# <<: *aws_connection_info
+# register: alb
+#
+# - assert:
+# that:
+# - not alb.changed
+# - alb.listeners|length == 1
+# - alb.listeners[0].rules[0].actions|length == 2
+#
+# - name: test non-default rule with multiple actions
+# elb_application_lb:
+# name: "{{ alb_name }}"
+# subnets: "{{ alb_subnets }}"
+# security_groups: "{{ sec_group.group_id }}"
+# state: present
+# listeners:
+# - Protocol: HTTPS
+# Port: 443
+# DefaultActions:
+# - Type: authenticate-oidc
+# AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}"
+# Order: 1
+# - Type: forward
+# TargetGroupName: "{{ tg_name }}"
+# Order: 2
+# Certificates:
+# - CertificateArn: "{{ cert_arn }}"
+# SslPolicy: ELBSecurityPolicy-2016-08
+# Rules:
+# - Conditions:
+# - Field: path-pattern
+# Values:
+# - "*"
+# Priority: 1
+# Actions:
+# - Type: forward
+# TargetGroupName: "{{ tg_name }}"
+# Order: 2
+# - Type: authenticate-oidc
+# AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}"
+# Order: 1
+# <<: *aws_connection_info
+# register: alb
+#
+# - assert:
+# that:
+# - alb.changed
+# - alb.listeners|length == 1
+# - alb.listeners[0].rules[0].actions|length == 2
+# - alb.listeners[0].rules[1].actions|length == 2
+#
+# - name: test idempotency non-default rule with multiple actions
+# elb_application_lb:
+# name: "{{ alb_name }}"
+# subnets: "{{ alb_subnets }}"
+# security_groups: "{{ sec_group.group_id }}"
+# state: present
+# listeners:
+# - Protocol: HTTPS
+# Port: 443
+# DefaultActions:
+# - Type: authenticate-oidc
+# AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}"
+# Order: 1
+# - Type: forward
+# TargetGroupName: "{{ tg_name }}"
+# Order: 2
+# Certificates:
+# - CertificateArn: "{{ cert_arn }}"
+# SslPolicy: ELBSecurityPolicy-2016-08
+# Rules:
+# - Conditions:
+# - Field: path-pattern
+# Values:
+# - "*"
+# Priority: 1
+# Actions:
+# - Type: forward
+# TargetGroupName: "{{ tg_name }}"
+# Order: 2
+# - Type: authenticate-oidc
+# AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}"
+# Order: 1
+# <<: *aws_connection_info
+# register: alb
+#
+# - assert:
+# that:
+# - not alb.changed
+# - alb.listeners|length == 1
+# - alb.listeners[0].rules[0].actions|length == 2
+# - alb.listeners[0].rules[1].actions|length == 2
+#
+# - name: test non-default rule action order change
+# elb_application_lb:
+# name: "{{ alb_name }}"
+# subnets: "{{ alb_subnets }}"
+# security_groups: "{{ sec_group.group_id }}"
+# state: present
+# listeners:
+# - Protocol: HTTPS
+# Port: 443
+# DefaultActions:
+# - Type: authenticate-oidc
+# AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}"
+# Order: 1
+# - Type: forward
+# TargetGroupName: "{{ tg_name }}"
+# Order: 2
+# Certificates:
+# - CertificateArn: "{{ cert_arn }}"
+# SslPolicy: ELBSecurityPolicy-2016-08
+# Rules:
+# - Conditions:
+# - Field: path-pattern
+# Values:
+# - "*"
+# Priority: 1
+# Actions:
+# - Type: authenticate-oidc
+# AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}"
+# Order: 1
+# - Type: forward
+# TargetGroupName: "{{ tg_name }}"
+# Order: 2
+# <<: *aws_connection_info
+# register: alb
+#
+# - assert:
+# that:
+# - not alb.changed
+# - alb.listeners|length == 1
+# - alb.listeners[0].rules[0].actions|length == 2
+# - alb.listeners[0].rules[1].actions|length == 2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_multiple_actions_fail.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_multiple_actions_fail.yml
new file mode 100644
index 00000000..722002f2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_application_lb/tasks/test_multiple_actions_fail.yml
@@ -0,0 +1,53 @@
+- block:
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: register dummy OIDC config
+ set_fact:
+ AuthenticateOidcActionConfig:
+ AuthorizationEndpoint: "https://www.example.com/auth"
+ ClientId: "eeeeeeeeeeeeeeeeeeeeeeeeee"
+ ClientSecret: "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"
+ Issuer: "https://www.example.com/issuer"
+ OnUnauthenticatedRequest: "authenticate"
+ Scope: "openid"
+ SessionCookieName: "AWSELBAuthSessionCookie"
+ SessionTimeout: 604800
+ TokenEndpoint: "https://www.example.com/token"
+ UserInfoEndpoint: "https://www.example.com/userinfo"
+
+ - name: create ALB with multiple DefaultActions
+ elb_application_lb:
+ name: "{{ alb_name }}"
+ subnets: "{{ alb_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ listeners:
+ - Protocol: HTTPS
+ Port: 443
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ Order: 2
+ - Type: authenticate-oidc
+ AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}"
+ Order: 1
+ Certificates:
+ - CertificateArn: "{{ cert_arn }}"
+ SslPolicy: ELBSecurityPolicy-2016-08
+ <<: *aws_connection_info
+ register: alb
+ ignore_errors: yes
+
+ - name: check for a graceful failure message
+ assert:
+ that:
+ - alb.failed
+ - 'alb.msg == "installed version of botocore does not support multiple actions, please upgrade botocore to version 1.10.30 or higher"'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_classic_lb/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_classic_lb/aliases
new file mode 100644
index 00000000..6e3860be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_classic_lb/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_classic_lb/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_classic_lb/defaults/main.yml
new file mode 100644
index 00000000..76164523
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_classic_lb/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+# defaults file for test_ec2_eip
+tag_prefix: '{{resource_prefix}}'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_classic_lb/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_classic_lb/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_classic_lb/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_classic_lb/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_classic_lb/tasks/main.yml
new file mode 100644
index 00000000..2b368c6f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_classic_lb/tasks/main.yml
@@ -0,0 +1,425 @@
+---
+# __Test Info__
+# Create a self signed cert and upload it to AWS
+# http://www.akadia.com/services/ssh_test_certificate.html
+# http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/ssl-server-cert.html
+
+# __Test Outline__
+#
+# __elb_classic_lb__
+# create test elb with listeners and certificate
+# change AZ's
+# change listeners
+# remove listeners
+# remove elb
+
+# __ec2-common__
+# test environment variable EC2_REGION
+# test with no parameters
+# test with only instance_id
+# test invalid region parameter
+# test valid region parameter
+# test invalid ec2_url parameter
+# test valid ec2_url parameter
+# test credentials from environment
+# test credential parameters
+
+- block:
+
+ # ============================================================
+ # create test elb with listeners, certificate, and health check
+
+ - name: Create ELB
+ elb_classic_lb:
+ name: "{{ tag_prefix }}"
+ region: "{{ ec2_region }}"
+ ec2_access_key: "{{ ec2_access_key }}"
+ ec2_secret_key: "{{ ec2_secret_key }}"
+ security_token: "{{ security_token }}"
+ state: present
+ zones:
+ - "{{ ec2_region }}a"
+ - "{{ ec2_region }}b"
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ - protocol: http
+ load_balancer_port: 8080
+ instance_port: 8080
+ health_check:
+ ping_protocol: http
+ ping_port: 80
+ ping_path: "/index.html"
+ response_timeout: 5
+ interval: 30
+ unhealthy_threshold: 2
+ healthy_threshold: 10
+ register: info
+
+ - assert:
+ that:
+ - 'info.changed'
+ - 'info.elb.status == "created"'
+ - '"{{ ec2_region }}a" in info.elb.zones'
+ - '"{{ ec2_region }}b" in info.elb.zones'
+ - 'info.elb.health_check.healthy_threshold == 10'
+ - 'info.elb.health_check.interval == 30'
+ - 'info.elb.health_check.target == "HTTP:80/index.html"'
+ - 'info.elb.health_check.timeout == 5'
+ - 'info.elb.health_check.unhealthy_threshold == 2'
+ - '[80, 80, "HTTP", "HTTP"] in info.elb.listeners'
+ - '[8080, 8080, "HTTP", "HTTP"] in info.elb.listeners'
+
+ # ============================================================
+
+ # check ports, would be cool, but we are at the mercy of AWS
+ # to start things in a timely manner
+
+ #- name: check to make sure 80 is listening
+ # wait_for: host={{ info.elb.dns_name }} port=80 timeout=600
+ # register: result
+
+ #- name: assert can connect to port#
+ # assert: 'result.state == "started"'
+
+ #- name: check to make sure 443 is listening
+ # wait_for: host={{ info.elb.dns_name }} port=443 timeout=600
+ # register: result
+
+ #- name: assert can connect to port#
+ # assert: 'result.state == "started"'
+
+ # ============================================================
+
+ # Change AZ's
+
+ - name: Change AZ's
+ elb_classic_lb:
+ name: "{{ tag_prefix }}"
+ region: "{{ ec2_region }}"
+ ec2_access_key: "{{ ec2_access_key }}"
+ ec2_secret_key: "{{ ec2_secret_key }}"
+ security_token: "{{ security_token }}"
+ state: present
+ zones:
+ - "{{ ec2_region }}c"
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ purge_zones: yes
+ health_check:
+ ping_protocol: http
+ ping_port: 80
+ ping_path: "/index.html"
+ response_timeout: 5
+ interval: 30
+ unhealthy_threshold: 2
+ healthy_threshold: 10
+ register: info
+
+
+
+ - assert:
+ that:
+ - 'info.elb.status == "ok"'
+ - 'info.changed'
+ - 'info.elb.zones[0] == "{{ ec2_region }}c"'
+
+ # ============================================================
+
+ # Update AZ's
+
+ - name: Update AZ's
+ elb_classic_lb:
+ name: "{{ tag_prefix }}"
+ region: "{{ ec2_region }}"
+ ec2_access_key: "{{ ec2_access_key }}"
+ ec2_secret_key: "{{ ec2_secret_key }}"
+ security_token: "{{ security_token }}"
+ state: present
+ zones:
+ - "{{ ec2_region }}a"
+ - "{{ ec2_region }}b"
+ - "{{ ec2_region }}c"
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ purge_zones: yes
+ register: info
+
+ - assert:
+ that:
+ - 'info.changed'
+ - 'info.elb.status == "ok"'
+ - '"{{ ec2_region }}a" in info.elb.zones'
+ - '"{{ ec2_region }}b" in info.elb.zones'
+ - '"{{ ec2_region }}c" in info.elb.zones'
+
+
+ # ============================================================
+
+ # Purge Listeners
+
+ - name: Purge Listeners
+ elb_classic_lb:
+ name: "{{ tag_prefix }}"
+ region: "{{ ec2_region }}"
+ ec2_access_key: "{{ ec2_access_key }}"
+ ec2_secret_key: "{{ ec2_secret_key }}"
+ security_token: "{{ security_token }}"
+ state: present
+ zones:
+ - "{{ ec2_region }}a"
+ - "{{ ec2_region }}b"
+ - "{{ ec2_region }}c"
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 81
+ purge_listeners: yes
+ register: info
+
+ - assert:
+ that:
+ - 'info.elb.status == "ok"'
+ - 'info.changed'
+ - '[80, 81, "HTTP", "HTTP"] in info.elb.listeners'
+ - 'info.elb.listeners|length == 1'
+
+
+
+ # ============================================================
+
+ # add Listeners
+
+ - name: Add Listeners
+ elb_classic_lb:
+ name: "{{ tag_prefix }}"
+ region: "{{ ec2_region }}"
+ ec2_access_key: "{{ ec2_access_key }}"
+ ec2_secret_key: "{{ ec2_secret_key }}"
+ security_token: "{{ security_token }}"
+ state: present
+ zones:
+ - "{{ ec2_region }}a"
+ - "{{ ec2_region }}b"
+ - "{{ ec2_region }}c"
+ listeners:
+ - protocol: http
+ load_balancer_port: 8081
+ instance_port: 8081
+ purge_listeners: no
+ register: info
+
+ - assert:
+ that:
+ - 'info.elb.status == "ok"'
+ - 'info.changed'
+ - '[80, 81, "HTTP", "HTTP"] in info.elb.listeners'
+ - '[8081, 8081, "HTTP", "HTTP"] in info.elb.listeners'
+ - 'info.elb.listeners|length == 2'
+
+
+ # ============================================================
+
+ - name: test with no parameters
+ elb_classic_lb:
+ register: result
+ ignore_errors: true
+
+ - name: assert failure when called with no parameters
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("missing required arguments: ")'
+
+
+
+ # ============================================================
+ - name: test with only name
+ elb_classic_lb:
+ name="{{ tag_prefix }}"
+ register: result
+ ignore_errors: true
+
+ - name: assert failure when called with only name
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "missing required arguments: state"'
+
+
+ # ============================================================
+ - name: test invalid region parameter
+ elb_classic_lb:
+ name: "{{ tag_prefix }}"
+ region: 'asdf querty 1234'
+ state: present
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ zones:
+ - "{{ ec2_region }}a"
+ - "{{ ec2_region }}b"
+ - "{{ ec2_region }}c"
+ register: result
+ ignore_errors: true
+
+ - name: assert invalid region parameter
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("Region asdf querty 1234 does not seem to be available ")'
+
+
+ # ============================================================
+ - name: test valid region parameter
+ elb_classic_lb:
+ name: "{{ tag_prefix }}"
+ region: "{{ ec2_region }}"
+ state: present
+ zones:
+ - "{{ ec2_region }}a"
+ - "{{ ec2_region }}b"
+ - "{{ ec2_region }}c"
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+
+ register: result
+ ignore_errors: true
+
+ - name: assert valid region parameter
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("No handler was ready to authenticate.")'
+
+
+ # ============================================================
+
+ - name: test invalid ec2_url parameter
+ elb_classic_lb:
+ name: "{{ tag_prefix }}"
+ region: "{{ ec2_region }}"
+ state: present
+ zones:
+ - "{{ ec2_region }}a"
+ - "{{ ec2_region }}b"
+ - "{{ ec2_region }}c"
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ environment:
+ EC2_URL: bogus.example.com
+ register: result
+ ignore_errors: true
+
+ - name: assert invalid ec2_url parameter
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("No handler was ready to authenticate.")'
+
+
+ # ============================================================
+ - name: test valid ec2_url parameter
+ elb_classic_lb:
+ name: "{{ tag_prefix }}"
+ region: "{{ ec2_region }}"
+ state: present
+ zones:
+ - "{{ ec2_region }}a"
+ - "{{ ec2_region }}b"
+ - "{{ ec2_region }}c"
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ environment:
+ EC2_URL: '{{ec2_url}}'
+ register: result
+ ignore_errors: true
+
+ - name: assert valid ec2_url parameter
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("No handler was ready to authenticate.")'
+
+
+ # ============================================================
+ - name: test credentials from environment
+ elb_classic_lb:
+ name: "{{ tag_prefix }}"
+ region: "{{ ec2_region }}"
+ state: present
+ zones:
+ - "{{ ec2_region }}a"
+ - "{{ ec2_region }}b"
+ - "{{ ec2_region }}c"
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ environment:
+ EC2_ACCESS_KEY: bogus_access_key
+ EC2_SECRET_KEY: bogus_secret_key
+ register: result
+ ignore_errors: true
+
+ - name: assert credentials from environment
+ assert:
+ that:
+ - 'result.failed'
+ - '"InvalidClientTokenId" in result.exception'
+
+
+ # ============================================================
+ - name: test credential parameters
+ elb_classic_lb:
+ name: "{{ tag_prefix }}"
+ region: "{{ ec2_region }}"
+ state: present
+ zones:
+ - "{{ ec2_region }}a"
+ - "{{ ec2_region }}b"
+ - "{{ ec2_region }}c"
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ register: result
+ ignore_errors: true
+
+ - name: assert credential parameters
+ assert:
+ that:
+ - 'result.failed'
+ - '"No handler was ready to authenticate. 1 handlers were checked." in result.msg'
+
+ always:
+
+ # ============================================================
+ - name: remove the test load balancer completely
+ elb_classic_lb:
+ name: "{{ tag_prefix }}"
+ region: "{{ ec2_region }}"
+ state: absent
+ ec2_access_key: "{{ ec2_access_key }}"
+ ec2_secret_key: "{{ ec2_secret_key }}"
+ security_token: "{{ security_token }}"
+ register: result
+
+ - name: assert the load balancer was removed
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.elb.name == "{{tag_prefix}}"'
+ - 'result.elb.status == "deleted"'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_classic_lb/vars/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_classic_lb/vars/main.yml
new file mode 100644
index 00000000..79194af1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_classic_lb/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for test_ec2_elb_lb
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/aliases
new file mode 100644
index 00000000..56927195
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/defaults/main.yml
new file mode 100644
index 00000000..5ee3fa45
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+# load balancer and target group names have to be less than 32 characters
+# the 8 digit identifier at the end of resource_prefix helps determine during which test something
+# was created and allows tests to be run in parallel
+nlb_name: "my-nlb-{{ resource_prefix | regex_search('([0-9]+)$') }}"
+tg_name: "my-tg-{{ resource_prefix | regex_search('([0-9]+)$') }}"
+tg_tcpudp_name: "my-tg-tcpudp-{{ resource_prefix | regex_search('([0-9]+)$') }}"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/files/cert.pem b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/files/cert.pem
new file mode 100644
index 00000000..81df3a6f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/files/cert.pem
@@ -0,0 +1,32 @@
+-----BEGIN CERTIFICATE-----
+MIIFiTCCA3GgAwIBAgIJAKE+XLUKdWtPMA0GCSqGSIb3DQEBCwUAMFsxCzAJBgNV
+BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX
+aWRnaXRzIFB0eSBMdGQxFDASBgNVBAMMC2V4YW1wbGUuY29tMB4XDTE5MDYxNDEx
+MzIzM1oXDTIwMDYxMzExMzIzM1owWzELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNv
+bWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEUMBIG
+A1UEAwwLZXhhbXBsZS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
+AQDOFNh5eQ6+9tYvtzjrqvFDzPoXmZuOFeqFS7iBH4gLorvmQQIQcVEAH7O+tkhW
+Z8+6PgvXXd43GFtEbY8jZoBQwupl7lIdzywFRoyZYkREXodmDixkwxlPvUWdrb3r
+ZDRON6qIbX8LrzTPD1+JL4Rtkgr1RTlLrHT3ABEqEV1fQODOdbRd7rq6fmqwPlbl
+zS5kN3RPFuJVDZrnCPcEMOA3QftQgDTzyOlZJYWDZsJxel7H/O9qZjPBTitNJxg1
+ierPaIXT6u6CdWA0A7t3Knyn2+vcyvemjsbQg9v/U5zKR3h+6F0slqgOT/ZnrEos
+AzxdeaA5POJFy6xCHZiVgsE7OVaPB9imWrrAYbKsHVLP2rdlhnGZQnnebmTYCll5
+SvXWCIr5vp4i1qxIa95QBU/xmEY6kTy9GjAOSmYXj7UnwnBZwgEop0yUdBMb4s9G
+x8S6Yxaj1DZVyiyrzInBri9lqabkPLPQNaK7wTKN5zl7r5pSCsF8rl4R+mvcxyyY
+dS+cqseGjn98ubdd/vyQWqLbQtr5Njk4ROs5Rv6/2z/RUFdwsqB5aXztxOs3J7aJ
+5ScTgmoK+wkQY+tej6H5pgT02vKuXLwe4wHKKAYepgH7Azkm7XoFlHhBEUy+uUsI
+PMBm2Meo1JzOc8E5QqLX2YO/MDiZhI+NYOMJF0/huWqM7wIDAQABo1AwTjAdBgNV
+HQ4EFgQU3cPtQEUQYkj4lTM5fbdkd8FSVVIwHwYDVR0jBBgwFoAU3cPtQEUQYkj4
+lTM5fbdkd8FSVVIwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEACWQX
+CJK6f+/Zci1e7Gvw0gd+1xV8jbz2lH2zOGSpoQ6UCRVrANEX+p0nZZRpnTTrJfGa
+Yx+TjIJ4SSoaCud+N2IGXWxzuMan+NskyycTaHSA/i6OwZ8jKki/iVvE5XQN+gHw
+h5lWnEh5PiGG91lYi+FShUN0DX7Su776UK3mloLlq+ANCubWxDycWc0xBNH8iD2U
+xBV7MfuCX9rSei+2NU+hnOPrzh0OKNGOTSPHY1N1KU3grxTLTpF8PTMHC55sEy2E
+EMokRb3V/lFyrfX755KT5cQG6uQKQorI95BoxoNjeek54tuBUjenLfOH6dMO/6Xd
+WaouLXiDM73fwFI0ByQ5CTJizC0ehoZtv2XB7yjZJEuI7zz4wXBBwgNlBV2j4aJJ
+0wNG9210ZC7NxNH7MRfZfzLQpsOMTm9UP85TXsB1ieaN6OD2CnHP9O6umy633Aie
+dsQt5yaC9+N86NBceb44IlkqlTv720BQjq7Dz5UCthhNg0VYpICzymD9kAKPfx9Z
+1ug2gB4i2r6eHqFIexwSfa29DxW+KEPlL7pP9P9FVsYNyZYOvX8OgVERkA/9L53i
+MpzRPPzTyjW/sJcDWVfrQU0NhPj5lOxdMcbvvU+KD3afraUXPiN4N4+FFWENOFZ/
+HEKjPj+As/3OHNyUXrCciYjq6gCLZ6SV945l2h8=
+-----END CERTIFICATE-----
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/files/key.pem b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/files/key.pem
new file mode 100644
index 00000000..3b4da6fe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/files/key.pem
@@ -0,0 +1,52 @@
+-----BEGIN PRIVATE KEY-----
+MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDOFNh5eQ6+9tYv
+tzjrqvFDzPoXmZuOFeqFS7iBH4gLorvmQQIQcVEAH7O+tkhWZ8+6PgvXXd43GFtE
+bY8jZoBQwupl7lIdzywFRoyZYkREXodmDixkwxlPvUWdrb3rZDRON6qIbX8LrzTP
+D1+JL4Rtkgr1RTlLrHT3ABEqEV1fQODOdbRd7rq6fmqwPlblzS5kN3RPFuJVDZrn
+CPcEMOA3QftQgDTzyOlZJYWDZsJxel7H/O9qZjPBTitNJxg1ierPaIXT6u6CdWA0
+A7t3Knyn2+vcyvemjsbQg9v/U5zKR3h+6F0slqgOT/ZnrEosAzxdeaA5POJFy6xC
+HZiVgsE7OVaPB9imWrrAYbKsHVLP2rdlhnGZQnnebmTYCll5SvXWCIr5vp4i1qxI
+a95QBU/xmEY6kTy9GjAOSmYXj7UnwnBZwgEop0yUdBMb4s9Gx8S6Yxaj1DZVyiyr
+zInBri9lqabkPLPQNaK7wTKN5zl7r5pSCsF8rl4R+mvcxyyYdS+cqseGjn98ubdd
+/vyQWqLbQtr5Njk4ROs5Rv6/2z/RUFdwsqB5aXztxOs3J7aJ5ScTgmoK+wkQY+te
+j6H5pgT02vKuXLwe4wHKKAYepgH7Azkm7XoFlHhBEUy+uUsIPMBm2Meo1JzOc8E5
+QqLX2YO/MDiZhI+NYOMJF0/huWqM7wIDAQABAoICAB3iqAH1rE3FPgptZ7PFdnd+
+okYJ4KUaSIhMEtWm3PPTBay+gK4hwL1j240sohDlvRolJVJ2KmOTBKlHuhpIIxOT
+MKrXhNEN2jRpproXpg7EJp6GL6ntIR6PNClJqOEaBvvQ1soyFtp67g2ZDSG34lyB
+cVVgVI7E07F+MP8IxaGqpu9J4n48wJeK/a3RXIi22KNv504Q44GyF2SpyCizbdCV
+oPxrm0I/QJfM+S+1Fz2doWEfLRkg+SBvVZg6sygQeBzb64xv5WbF3s2sPONrJeix
+2+KJDKD605ophR3h4jrzYmYFDH4K2xQ4RGOEeL0pOvfTS4kBa07z2mc8I4SLEbpi
+VzQblmftRvwye2eKk74GVhJho7Il6ssTL29TJxIyzEljVFrprILkmAVEV8SOn544
+pgSj6i7gDcav4OdhxldT6dk7PNqMq49p3acYzLtXCknlLkHOODEFH3BWP1oAWN6e
+m34kwPGFviKEIYkurWV0LGV9h/zLL3kxjdbgFyLY24jVbvWuJ9VeJkcHVgL3Rs1A
+5irHFpW9TSKYg+R8zLM50S5HRcnL0wV+hl02TcJbkjyVToFh5FeDdyIxN+sQnh+a
+b+g/IA+um2RbqjEUoaVxCdIo7/oPzzj0u8Pw5FvAedNM1a8sZiUJ/1CW7In8yRPC
+Nb5rONsL/eEHAJU9EWIBAoIBAQDnzEl7NUZ50wjqBTna69u9luxb6ixJM3o9asnY
+BXU90Bp7kl14WbHUmTPYGKoGIEKDmmKpA/LhGwD1vykjtHWkUcgGYYucXIBuzRb7
+hEcAa9qkce6Ik4HS8khRmmikNDu/t5zJU2tkgNuGVQlHvsjpJz/rdX857G5Cv8v7
+GSBL4aNxhp9OoMav3/XEUpRSoccR7WEAdfeTfiR4EgaIy864w4twxr2nLroB6SzN
+dYSPZ4hMkTS34ixzjO233QioAGiEnG22XyBA7DTB41EoRFIBcbPrCMqDONkNHbeO
+j25g4okNjK+7ihmIHZBP0awN+mlfNHnDXuJ6L2LCrxWHQQtHAoIBAQDjmS6h51/H
+gcBDUxot16M/7PPJZUbJ8y+qqPbaqu3ORADyvguE/Ww80we+8xeaOa5tVpVE6diZ
+tg5BfBGwltyCEwKDAG1g9/D3IVw3yE1AuxyG0+1q0/rTcdZl01PgGVwi+28YCLyO
+VxgyIvpGFwgQ9WV22l16JatyhsZLRIWFk78ECJO3v5X8KuCJLnKfcd9nkem9DXdS
+iKqiylOXzvIKGUe5HxeDd/itI8V8b8OTQQxM0jEwCOZQg1o1BNN0uEJo4dENkuYa
+dZyJFYe0ZsM5ZRm5HmcIYMlPejcYaINRX30TZHRNE/X+fCfrIwg0LmJxFVieFcc3
+Dc3ZU1K5T3UZAoIBAQDCAK3ji+RPY/xK+VLB0rOYSy/JggXMwmPl8XG79l14/aqc
+kBTg/JkkqZDFpWbwN6HTyawXb3GhS9uS0oqZEpl/jN8P0CZsGNN5aPd6TOysApj9
+F0ogTuzSY+t5OPWpsPns7L0xlzsD5AFXveZFgP1gfnQ2GqIAFcz22tXbc90fBVup
+UZYV1eRVIOnuhXsUachWTcno+9LZRDM7t0J2zbVX2HnlSsFCoo4VuVXBJEFtUKa4
+BrQLzyWLFIGFaF6tVaIkk1QT1iwFimxhdmLfg8bq5IJEDZiJGVQ4cQ3HKG6mchNp
+Hr2aBex2it/UnlUVYec9QotCpDCDORO8g5NOH3dTAoIBAQCJH9htqVYWgIESzvW9
+2ua7dAqpM0CEGeWFH8mik0s1c9lSZtfP51OYDdjj3dhBjjEeJQVIANAERCCwqYyK
+5UmzgLqJrO85dgiTDGFgJmkM7+7O+M5ZB6BeVn7C2KD3KrBNT0lupIzeAXFNe69o
+HSY5+W+JPSUGm72BAdxkqsL7aLm0N2qwUViPFlIztG1QzS25W7sEsSFL85VDAT1+
+ACvpk7OXwDjNd7G2tw+b2kZt6Mn9WsJR23rP1WO6/85ay00PncXzNKc4F9YY7YTW
+VveWE+h8lOmkrZN8M/kP1qAPncVgsqwzaCxUh/8Q9wlRTwT2dtLuY9ajv8hfAImd
+pla5AoIBAQCduiA3aZBDqGrUTH9bcMRtyU8RTlc2Y5+jFSBP4KfcS/ORoILVdvZz
+v+o0jw01keGAtW77Mq70ZCpc8HjOm8Ju8GYtwUGmPRS+hQaZwT8/QseF6q2Q+Bi5
+Wc0Lqa4YA0cI7XViJRhHIPfdV8YEEAW8rIAUqFSoAT6G7z/o0K4zlSa+2RbG0l1v
+zLWmJtF8OJfM8IboIyER0PHrWjNFzxKCJssu2WE7WT6/Rupus04XVXRR+Fb6DAGb
+yw2MpB3kLvjugQpolx4YbXE4n+F1mkqm9fHjo4fbfSwjmeFnPsRvRmiRTomHxq/s
+DUZ6eZM8TIlGhUrx/Y1TP0GQjKxDN6ZQ
+-----END PRIVATE KEY-----
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/main.yml
new file mode 100644
index 00000000..e18c3d44
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/main.yml
@@ -0,0 +1,252 @@
+- name: elb_network_lb tests
+ collections:
+ - amazon.aws
+
+ block:
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: create certificate
+ iam_cert:
+ name: test_cert
+ state: present
+ cert: "{{ lookup('file', 'cert.pem') }}"
+ key: "{{ lookup('file', 'key.pem') }}"
+ <<: *aws_connection_info
+ register: cert
+
+ - name: create VPC
+ ec2_vpc_net:
+ cidr_block: 10.228.228.0/22
+ name: "{{ resource_prefix }}_vpc"
+ state: present
+ <<: *aws_connection_info
+ register: vpc
+
+ - name: create internet gateway
+ ec2_vpc_igw:
+ vpc_id: "{{ vpc.vpc.id }}"
+ state: present
+ tags:
+ Name: "{{ resource_prefix }}"
+ <<: *aws_connection_info
+ register: igw
+
+ - name: create subnets
+ ec2_vpc_subnet:
+ cidr: "{{ item.cidr }}"
+ az: "{{ aws_region}}{{ item.az }}"
+ vpc_id: "{{ vpc.vpc.id }}"
+ state: present
+ tags:
+ Created_By: "{{ resource_prefix }}"
+ Public: "{{ item.public }}"
+ <<: *aws_connection_info
+ with_items:
+ - cidr: 10.228.228.0/24
+ az: "a"
+ public: True
+ - cidr: 10.228.229.0/24
+ az: "b"
+ public: True
+ - cidr: 10.228.230.0/24
+ az: "a"
+ public: False
+ - cidr: 10.228.231.0/24
+ az: "b"
+ public: False
+ register: subnets
+
+ - ec2_vpc_subnet_info:
+ filters:
+ vpc-id: "{{ vpc.vpc.id }}"
+ <<: *aws_connection_info
+ register: vpc_subnets
+
+ - name: create list of subnet ids
+ set_fact:
+ nlb_subnets: "{{ vpc_subnets|community.general.json_query('subnets[?tags.Public == `True`].id') }}"
+ private_subnets: "{{ vpc_subnets|community.general.json_query('subnets[?tags.Public != `True`].id') }}"
+
+ - name: create a route table
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ <<: *aws_connection_info
+ tags:
+ Name: igw-route
+ Created: "{{ resource_prefix }}"
+ subnets: "{{ nlb_subnets + private_subnets }}"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: "{{ igw.gateway_id }}"
+ register: route_table
+
+ - ec2_group:
+ name: "{{ resource_prefix }}"
+ description: "security group for Ansible NLB integration tests"
+ state: present
+ vpc_id: "{{ vpc.vpc.id }}"
+ rules:
+ - proto: tcp
+ from_port: 1
+ to_port: 65535
+ cidr_ip: 0.0.0.0/0
+ - proto: all
+ ports: 80
+ cidr_ip: 10.228.228.0/22
+ <<: *aws_connection_info
+ register: sec_group
+
+ - name: create a target group for testing
+ elb_target_group:
+ name: "{{ tg_name }}"
+ protocol: tcp
+ port: 80
+ vpc_id: "{{ vpc.vpc.id }}"
+ state: present
+ <<: *aws_connection_info
+ register: tg
+
+ - name: create a target group for testing tcp_udp protocols
+ elb_target_group:
+ name: "{{ tg_tcpudp_name }}"
+ protocol: tcp_udp
+ port: 80
+ vpc_id: "{{ vpc.vpc.id }}"
+ state: present
+ <<: *aws_connection_info
+ register: tg_tcpudp
+
+ - include_tasks: test_nlb_bad_listener_options.yml
+ - include_tasks: test_nlb_tags.yml
+ - include_tasks: test_creating_nlb.yml
+ - include_tasks: test_nlb_with_asg.yml
+ - include_tasks: test_modifying_nlb_listeners.yml
+ - include_tasks: test_deleting_nlb.yml
+
+ always:
+
+ - name: destroy NLB
+ elb_network_lb:
+ name: "{{ nlb_name }}"
+ state: absent
+ wait: yes
+ wait_timeout: 600
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: destroy target group if it was created
+ elb_target_group:
+ name: "{{ tg_name }}"
+ protocol: tcp
+ port: 80
+ vpc_id: "{{ vpc.vpc.id }}"
+ state: absent
+ wait: yes
+ wait_timeout: 600
+ <<: *aws_connection_info
+ register: remove_tg
+ retries: 5
+ delay: 3
+ until: remove_tg is success
+ when: tg is defined
+ ignore_errors: yes
+
+ - name: destroy tcp_udp target group if it was created
+ elb_target_group:
+ name: "{{ tg_tcpudp_name }}"
+ protocol: tcp_udp
+ port: 80
+ vpc_id: "{{ vpc.vpc.id }}"
+ state: absent
+ wait: yes
+ wait_timeout: 600
+ <<: *aws_connection_info
+ register: remove_tg
+ retries: 5
+ delay: 3
+ until: remove_tg is success
+ when: tg_tcpudp is defined
+ ignore_errors: yes
+
+ - name: destroy sec group
+ ec2_group:
+ name: "{{ sec_group.group_name }}"
+ description: "security group for Ansible NLB integration tests"
+ state: absent
+ vpc_id: "{{ vpc.vpc.id }}"
+ <<: *aws_connection_info
+ register: remove_sg
+ retries: 10
+ delay: 5
+ until: remove_sg is success
+ ignore_errors: yes
+
+ - name: remove route table
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ route_table_id: "{{ route_table.route_table.route_table_id }}"
+ lookup: id
+ state: absent
+ <<: *aws_connection_info
+ register: remove_rt
+ retries: 10
+ delay: 5
+ until: remove_rt is success
+ ignore_errors: yes
+
+ - name: destroy subnets
+ ec2_vpc_subnet:
+ cidr: "{{ item.cidr }}"
+ vpc_id: "{{ vpc.vpc.id }}"
+ state: absent
+ <<: *aws_connection_info
+ register: remove_subnet
+ retries: 10
+ delay: 5
+ until: remove_subnet is success
+ with_items:
+ - cidr: 10.228.228.0/24
+ - cidr: 10.228.229.0/24
+ - cidr: 10.228.230.0/24
+ - cidr: 10.228.231.0/24
+ ignore_errors: yes
+
+ - name: destroy internet gateway
+ ec2_vpc_igw:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Name: "{{ resource_prefix }}"
+ state: absent
+ <<: *aws_connection_info
+ register: remove_igw
+ retries: 10
+ delay: 5
+ until: remove_igw is success
+ ignore_errors: yes
+
+ - name: destroy VPC
+ ec2_vpc_net:
+ cidr_block: 10.228.228.0/22
+ name: "{{ resource_prefix }}_vpc"
+ state: absent
+ <<: *aws_connection_info
+ register: remove_vpc
+ retries: 10
+ delay: 5
+ until: remove_vpc is success
+ ignore_errors: yes
+
+ - name: destroy certificate
+ iam_cert:
+ name: test_cert
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_creating_nlb.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_creating_nlb.yml
new file mode 100644
index 00000000..b99af17b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_creating_nlb.yml
@@ -0,0 +1,82 @@
+- block:
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: create NLB with listeners
+ elb_network_lb:
+ name: "{{ nlb_name }}"
+ subnets: "{{ nlb_subnets }}"
+ state: present
+ listeners:
+ - Protocol: TCP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ - Protocol: TLS
+ Port: 443
+ Certificates:
+ - CertificateArn: "{{ cert.arn }}"
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ - Protocol: UDP
+ Port: 13
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_tcpudp_name }}"
+ - Protocol: TCP_UDP
+ Port: 17
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_tcpudp_name }}"
+ <<: *aws_connection_info
+ register: nlb
+
+ - assert:
+ that:
+ - nlb.changed
+ - nlb.listeners|length == 4
+
+ - name: test idempotence creating NLB with listeners
+ elb_network_lb:
+ name: "{{ nlb_name }}"
+ subnets: "{{ nlb_subnets }}"
+ state: present
+ listeners:
+ - Protocol: TCP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ - Protocol: TLS
+ Port: 443
+ Certificates:
+ - CertificateArn: "{{ cert.arn }}"
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ - Protocol: UDP
+ Port: 13
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_tcpudp_name }}"
+ - Protocol: TCP_UDP
+ Port: 17
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_tcpudp_name }}"
+ <<: *aws_connection_info
+ register: nlb
+
+ - assert:
+ that:
+ - not nlb.changed
+ - nlb.listeners|length == 4
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_deleting_nlb.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_deleting_nlb.yml
new file mode 100644
index 00000000..23d1d535
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_deleting_nlb.yml
@@ -0,0 +1,50 @@
+- block:
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: destroy NLB with listener
+ elb_network_lb:
+ name: "{{ nlb_name }}"
+ subnets: "{{ nlb_subnets }}"
+ state: absent
+ listeners:
+ - Protocol: TCP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ <<: *aws_connection_info
+ wait: yes
+ wait_timeout: 300
+ register: nlb
+
+ - assert:
+ that:
+ - nlb.changed
+
+ - name: test idempotence
+ elb_network_lb:
+ name: "{{ nlb_name }}"
+ subnets: "{{ nlb_subnets }}"
+ state: absent
+ listeners:
+ - Protocol: TCP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ <<: *aws_connection_info
+ wait: yes
+ wait_timeout: 300
+ register: nlb
+
+ - assert:
+ that:
+ - not nlb.changed
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_modifying_nlb_listeners.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_modifying_nlb_listeners.yml
new file mode 100644
index 00000000..67ab99e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_modifying_nlb_listeners.yml
@@ -0,0 +1,88 @@
+- block:
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: add a listener
+ elb_network_lb:
+ name: "{{ nlb_name }}"
+ subnets: "{{ nlb_subnets }}"
+ state: present
+ listeners:
+ - Protocol: TCP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ - Protocol: TCP
+ Port: 443
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ <<: *aws_connection_info
+ register: nlb
+
+ - assert:
+ that:
+ - nlb.changed
+ - nlb.listeners|length == 2
+
+ - name: test an omitted listener will not be removed without purge_listeners
+ elb_network_lb:
+ name: "{{ nlb_name }}"
+ subnets: "{{ nlb_subnets }}"
+ state: present
+ purge_listeners: false
+ listeners:
+ - Protocol: TCP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ <<: *aws_connection_info
+ register: nlb
+
+ - assert:
+ that:
+ - not nlb.changed
+ - nlb.listeners|length == 2
+
+ - name: remove the rule
+ elb_network_lb:
+ name: "{{ nlb_name }}"
+ subnets: "{{ nlb_subnets }}"
+ state: present
+ purge_listeners: true
+ listeners:
+ - Protocol: TCP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ <<: *aws_connection_info
+ register: nlb
+
+ - assert:
+ that:
+ - nlb.changed
+ - nlb.listeners|length == 1
+
+ - name: remove listener from NLB
+ elb_network_lb:
+ name: "{{ nlb_name }}"
+ subnets: "{{ nlb_subnets }}"
+ state: present
+ listeners: []
+ <<: *aws_connection_info
+ register: nlb
+
+ - assert:
+ that:
+ - nlb.changed
+ - not nlb.listeners
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_nlb_bad_listener_options.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_nlb_bad_listener_options.yml
new file mode 100644
index 00000000..5372cae3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_nlb_bad_listener_options.yml
@@ -0,0 +1,72 @@
+- block:
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: test creating an NLB with invalid listener options
+ elb_network_lb:
+ name: "{{ nlb_name }}"
+ subnets: "{{ nlb_subnets }}"
+ #security_groups: "{{ sec_group.group_id }}"
+ state: present
+ listeners:
+ - Protocol: TCP
+ Port: 80
+ Certificates: {'CertificateArn': 'test', 'IsDefault': 'True'}
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ <<: *aws_connection_info
+ ignore_errors: yes
+ register: nlb
+
+ - assert:
+ that:
+ - nlb is failed
+ - "'unable to convert to list' in nlb.msg"
+
+ - name: test creating an NLB without providing required listener options
+ elb_network_lb:
+ name: "{{ nlb_name }}"
+ subnets: "{{ nlb_subnets }}"
+ #security_groups: "{{ sec_group.group_id }}"
+ state: present
+ listeners:
+ - Port: 80
+ <<: *aws_connection_info
+ ignore_errors: yes
+ register: nlb
+
+ - assert:
+ that:
+ - nlb is failed
+ - '"missing required arguments" in nlb.msg'
+ - '"Protocol" in nlb.msg'
+ - '"DefaultActions" in nlb.msg'
+
+ - name: test creating an NLB providing an invalid listener option type
+ elb_network_lb:
+ name: "{{ nlb_name }}"
+ subnets: "{{ nlb_subnets }}"
+ #security_groups: "{{ sec_group.group_id }}"
+ state: present
+ listeners:
+ - Protocol: TCP
+ Port: "bad type"
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ <<: *aws_connection_info
+ ignore_errors: yes
+ register: nlb
+
+ - assert:
+ that:
+ - nlb is failed
+ - "'unable to convert to int' in nlb.msg"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_nlb_tags.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_nlb_tags.yml
new file mode 100644
index 00000000..6b81e90c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_nlb_tags.yml
@@ -0,0 +1,101 @@
+- block:
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: create NLB with no listeners
+ elb_network_lb:
+ name: "{{ nlb_name }}"
+ subnets: "{{ nlb_subnets }}"
+ state: present
+ <<: *aws_connection_info
+ register: nlb
+
+ - assert:
+ that:
+ - nlb.changed
+
+ - name: re-create NLB with no listeners
+ elb_network_lb:
+ name: "{{ nlb_name }}"
+ subnets: "{{ nlb_subnets }}"
+ state: present
+ <<: *aws_connection_info
+ register: nlb
+
+ - assert:
+ that:
+ - not nlb.changed
+
+ - name: add tags to NLB
+ elb_network_lb:
+ name: "{{ nlb_name }}"
+ subnets: "{{ nlb_subnets }}"
+ state: present
+ tags:
+ created_by: "NLB test {{ resource_prefix }}"
+ <<: *aws_connection_info
+ register: nlb
+
+ - assert:
+ that:
+ - nlb.changed
+ - 'nlb.tags.created_by == "NLB test {{ resource_prefix }}"'
+
+ - name: test tags are not removed if unspecified
+ elb_network_lb:
+ name: "{{ nlb_name }}"
+ subnets: "{{ nlb_subnets }}"
+ state: present
+ <<: *aws_connection_info
+ register: nlb
+
+ - assert:
+ that:
+ - not nlb.changed
+ - 'nlb.tags.created_by == "NLB test {{ resource_prefix }}"'
+
+ - name: remove tags from NLB
+ elb_network_lb:
+ name: "{{ nlb_name }}"
+ subnets: "{{ nlb_subnets }}"
+ state: present
+ tags: {}
+ <<: *aws_connection_info
+ register: nlb
+
+ - assert:
+ that:
+ - nlb.changed
+ - not nlb.tags
+
+ - name: test idempotence
+ elb_network_lb:
+ name: "{{ nlb_name }}"
+ subnets: "{{ nlb_subnets }}"
+ state: present
+ tags: {}
+ <<: *aws_connection_info
+ register: nlb
+
+ - assert:
+ that:
+ - not nlb.changed
+ - not nlb.tags
+
+ - name: destroy NLB with no listeners
+ elb_network_lb:
+ name: "{{ nlb_name }}"
+ state: absent
+ <<: *aws_connection_info
+ register: nlb
+
+ - assert:
+ that:
+ - nlb.changed
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_nlb_with_asg.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_nlb_with_asg.yml
new file mode 100644
index 00000000..f5005df6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_network_lb/tasks/test_nlb_with_asg.yml
@@ -0,0 +1,90 @@
+- block:
+
+ # create instances
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - ec2_ami_info:
+ <<: *aws_connection_info
+ filters:
+ architecture: x86_64
+ virtualization-type: hvm
+ root-device-type: ebs
+ name: "amzn-ami-hvm*"
+ register: amis
+
+ - set_fact:
+ latest_amazon_linux: "{{ amis.images | sort(attribute='creation_date') | last }}"
+
+ - ec2_asg:
+ <<: *aws_connection_info
+ state: absent
+ name: "{{ resource_prefix }}-webservers"
+ wait_timeout: 900
+
+ - ec2_lc:
+ <<: *aws_connection_info
+ name: "{{ resource_prefix }}-web-lcfg"
+ state: absent
+
+ - name: Create launch config for testing
+ ec2_lc:
+ <<: *aws_connection_info
+ name: "{{ resource_prefix }}-web-lcfg"
+ assign_public_ip: true
+ image_id: "{{ latest_amazon_linux.image_id }}"
+ security_groups: "{{ sec_group.group_id }}"
+ instance_type: t2.micro
+ user_data: |
+ #!/bin/bash
+ set -x
+ yum update -y --nogpgcheck
+ yum install -y --nogpgcheck httpd
+ echo "Hello Ansiblings!" >> /var/www/html/index.html
+ service httpd start
+ volumes:
+ - device_name: /dev/xvda
+ volume_size: 10
+ volume_type: gp2
+ delete_on_termination: true
+
+ - name: Create autoscaling group for app server fleet
+ ec2_asg:
+ <<: *aws_connection_info
+ name: "{{ resource_prefix }}-webservers"
+ vpc_zone_identifier: "{{ nlb_subnets }}"
+ launch_config_name: "{{ resource_prefix }}-web-lcfg"
+ termination_policies:
+ - OldestLaunchConfiguration
+ - Default
+ health_check_period: 600
+ health_check_type: EC2
+ replace_all_instances: true
+ min_size: 0
+ max_size: 2
+ desired_capacity: 1
+ wait_for_instances: true
+ target_group_arns:
+ - "{{ tg.target_group_arn }}"
+
+ always:
+
+ - ec2_asg:
+ <<: *aws_connection_info
+ state: absent
+ name: "{{ resource_prefix }}-webservers"
+ wait_timeout: 900
+ ignore_errors: yes
+
+ - ec2_lc:
+ <<: *aws_connection_info
+ name: "{{ resource_prefix }}-web-lcfg"
+ state: absent
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/aliases
new file mode 100644
index 00000000..b9935459
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+elb_target_group
+shippable/aws/group4
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/full_test.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/full_test.yml
new file mode 100644
index 00000000..ac95c66f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/full_test.yml
@@ -0,0 +1,7 @@
+- hosts: localhost
+ connection: local
+# environment: "{{ ansible_test.environment }}"
+
+ roles:
+ - elb_lambda_target
+ - elb_target
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/roles/elb_lambda_target/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/roles/elb_lambda_target/defaults/main.yml
new file mode 100644
index 00000000..a28253eb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/roles/elb_lambda_target/defaults/main.yml
@@ -0,0 +1,5 @@
+resource_shortprefix: 'ansible-test-{{ resource_prefix | regex_search("([0-9]+)$") }}'
+lambda_role_name: '{{ resource_shortprefix }}-elb-target-lambda'
+#lambda_role_name: '{{ resource_prefix }}-elb-target-lambda'
+lambda_name: '{{ resource_prefix }}-elb-target-lambda'
+elb_target_group_name: '{{ resource_shortprefix }}-elb-tg'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/roles/elb_lambda_target/files/ansible_lambda_target.py b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/roles/elb_lambda_target/files/ansible_lambda_target.py
new file mode 100644
index 00000000..3ea22472
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/roles/elb_lambda_target/files/ansible_lambda_target.py
@@ -0,0 +1,10 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+
+
+def lambda_handler(event, context):
+ return {
+ 'statusCode': 200,
+ 'body': json.dumps('Hello from Lambda!')
+ }
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/roles/elb_lambda_target/files/assume-role.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/roles/elb_lambda_target/files/assume-role.json
new file mode 100644
index 00000000..06456f79
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/roles/elb_lambda_target/files/assume-role.json
@@ -0,0 +1,8 @@
+{
+ "Version": "2012-10-17",
+ "Statement": {
+ "Effect": "Allow",
+ "Principal": { "Service": "lambda.amazonaws.com" },
+ "Action": "sts:AssumeRole"
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/roles/elb_lambda_target/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/roles/elb_lambda_target/tasks/main.yml
new file mode 100644
index 00000000..fb310b84
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/roles/elb_lambda_target/tasks/main.yml
@@ -0,0 +1,110 @@
+- name: set up lambda as elb_target
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ collections:
+ - community.general
+ block:
+ - name: create zip to deploy lambda code
+ archive:
+ path: '{{ role_path }}/files/ansible_lambda_target.py'
+ dest: /tmp/lambda.zip
+ format: zip
+ - name: create or update service-role for lambda
+ iam_role:
+ name: '{{ lambda_role_name }}'
+ assume_role_policy_document: '{{ lookup("file", role_path + "/files/assume-role.json") }}'
+ managed_policy:
+ - 'arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess'
+ register: ROLE_ARN
+ - name: when it is too fast, the role is not usable.
+ pause:
+ seconds: 10
+ - name: deploy lambda.zip to ansible_lambda_target function
+ lambda:
+ name: '{{ lambda_name }}'
+ state: present
+ zip_file: /tmp/lambda.zip
+ runtime: python3.7
+ role: '{{ ROLE_ARN.arn }}'
+ handler: ansible_lambda_target.lambda_handler
+ timeout: 30
+ register: lambda_function
+ retries: 3
+ delay: 15
+ until: lambda_function.changed
+ - name: create empty target group
+ elb_target_group:
+ name: '{{ elb_target_group_name }}'
+ target_type: lambda
+ state: present
+ modify_targets: false
+ register: elb_target_group
+ - name: tg is created, state must be changed
+ assert:
+ that:
+ - elb_target_group.changed
+ - name: allow elb to invoke the lambda function
+ lambda_policy:
+ state: present
+ function_name: '{{ lambda_name }}'
+ version: '{{ lambda_function.configuration.version }}'
+ statement_id: elb1
+ action: lambda:InvokeFunction
+ principal: elasticloadbalancing.amazonaws.com
+ source_arn: '{{ elb_target_group.target_group_arn }}'
+ - name: add lambda to elb target
+ elb_target_group:
+ name: '{{ elb_target_group_name }}'
+ target_type: lambda
+ state: present
+ targets:
+ - Id: '{{ lambda_function.configuration.function_arn }}'
+ register: elb_target_group
+ - name: target is updated, state must be changed
+ assert:
+ that:
+ - elb_target_group.changed
+ - name: re-add lambda to elb target (idempotency)
+ elb_target_group:
+ name: '{{ elb_target_group_name }}'
+ target_type: lambda
+ state: present
+ targets:
+ - Id: '{{ lambda_function.configuration.function_arn }}'
+ register: elb_target_group
+ - name: target is still the same, state must not be changed (idempotency)
+ assert:
+ that:
+ - not elb_target_group.changed
+ - name: remove lambda target from target group
+ elb_target_group:
+ name: '{{ elb_target_group_name }}'
+ target_type: lambda
+ state: absent
+ targets: []
+ register: elb_target_group
+ - name: target is still the same, state must not be changed (idempotency)
+ assert:
+ that:
+ - elb_target_group.changed
+ always:
+ - name: remove elb target group
+ elb_target_group:
+ name: '{{ elb_target_group_name }}'
+ target_type: lambda
+ state: absent
+ ignore_errors: true
+ - name: remove lambda function
+ lambda:
+ name: '{{ lambda_name }}'
+ state: absent
+ ignore_errors: true
+ - name: remove iam role for lambda
+ iam_role:
+ name: '{{ lambda_role_name }}'
+ state: absent
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/roles/elb_target/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/roles/elb_target/defaults/main.yml
new file mode 100644
index 00000000..731c84d6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/roles/elb_target/defaults/main.yml
@@ -0,0 +1,10 @@
+---
+ec2_ami_name: 'amzn2-ami-hvm-2.0.20190612-x86_64-gp2'
+
+resource_shortprefix: 'ansible-test-{{ resource_prefix | regex_search("([0-9]+)$") }}'
+tg_name: "{{ resource_shortprefix }}-tg"
+tg_tcpudp_name: "{{ resource_shortprefix }}-tgtcpudp"
+lb_name: "{{ resource_shortprefix }}-lb"
+
+healthy_state:
+ state: 'healthy'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/roles/elb_target/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/roles/elb_target/tasks/main.yml
new file mode 100644
index 00000000..2021823f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/roles/elb_target/tasks/main.yml
@@ -0,0 +1,484 @@
+---
+ - name: set up elb_target test prerequisites
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ collections:
+ - amazon.aws
+
+ block:
+
+ # ============================================================
+
+ - name:
+ debug: msg="********** Setting up elb_target test dependencies **********"
+
+ # ============================================================
+ - name: Find AMI to use
+ ec2_ami_info:
+ owners: 'amazon'
+ filters:
+ name: '{{ ec2_ami_name }}'
+ register: ec2_amis
+ - set_fact:
+ ec2_ami_image: '{{ ec2_amis.images[0].image_id }}'
+
+
+ - name: set up testing VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ state: present
+ cidr_block: 20.0.0.0/16
+ tags:
+ Name: "{{ resource_prefix }}-vpc"
+ Description: "Created by ansible-test"
+ register: vpc
+
+ - name: set up testing internet gateway
+ ec2_vpc_igw:
+ vpc_id: "{{ vpc.vpc.id }}"
+ state: present
+ register: igw
+
+ - name: set up testing subnet
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: "{{ vpc.vpc.id }}"
+ cidr: 20.0.0.0/18
+ az: "{{ aws_region }}a"
+ resource_tags:
+ Name: "{{ resource_prefix }}-subnet"
+ register: subnet_1
+
+ - name: set up testing subnet
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: "{{ vpc.vpc.id }}"
+ cidr: 20.0.64.0/18
+ az: "{{ aws_region }}b"
+ resource_tags:
+ Name: "{{ resource_prefix }}-subnet"
+ register: subnet_2
+
+ - name: create routing rules
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ created: "{{ resource_prefix }}-route"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: "{{ igw.gateway_id }}"
+ subnets:
+ - "{{ subnet_1.subnet.id }}"
+ - "{{ subnet_2.subnet.id }}"
+ register: route_table
+
+ - name: create testing security group
+ ec2_group:
+ name: "{{ resource_prefix }}-sg"
+ description: a security group for ansible tests
+ vpc_id: "{{ vpc.vpc.id }}"
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 0.0.0.0/0
+ register: sg
+
+ - name: set up testing target group (type=instance)
+ elb_target_group:
+ name: "{{ tg_name }}"
+ health_check_port: 80
+ protocol: http
+ port: 80
+ vpc_id: '{{ vpc.vpc.id }}'
+ state: present
+ target_type: instance
+ tags:
+ Description: "Created by {{ resource_prefix }}"
+
+ - name: set up testing target group (type=instance) with UDP protocol
+ elb_target_group:
+ name: "{{ tg_tcpudp_name }}"
+ protocol: udp
+ port: 53
+ vpc_id: '{{ vpc.vpc.id }}'
+ state: present
+ target_type: instance
+ tags:
+ Protocol: "UDP"
+ Description: "Created by {{ resource_prefix }}"
+
+ - name: set up testing target group for ALB (type=instance)
+ elb_target_group:
+ name: "{{ tg_name }}-used"
+ health_check_port: 80
+ protocol: http
+ port: 80
+ vpc_id: '{{ vpc.vpc.id }}'
+ state: present
+ target_type: instance
+ tags:
+ Description: "Created by {{ resource_prefix }}"
+
+ - name: set up ec2 instance to use as a target
+ ec2:
+ group_id: "{{ sg.group_id }}"
+ instance_type: t3.micro
+ image: "{{ ec2_ami_image }}"
+ vpc_subnet_id: "{{ subnet_2.subnet.id }}"
+ instance_tags:
+ Name: "{{ resource_prefix }}-inst"
+ exact_count: 1
+ count_tag:
+ Name: "{{ resource_prefix }}-inst"
+ assign_public_ip: true
+ volumes: []
+ wait: true
+ ebs_optimized: false
+ user_data: |
+ #cloud-config
+ package_upgrade: true
+ package_update: true
+ packages:
+ - httpd
+ runcmd:
+ - "service httpd start"
+ - echo "HELLO ANSIBLE" > /var/www/html/index.html
+ register: ec2
+
+ - name: create an application load balancer
+ elb_application_lb:
+ name: "{{ lb_name }}"
+ security_groups:
+ - "{{ sg.group_id }}"
+ subnets:
+ - "{{ subnet_1.subnet.id }}"
+ - "{{ subnet_2.subnet.id }}"
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}-used"
+ state: present
+
+ # ============================================================
+
+ - name:
+ debug: msg="********** Running elb_target integration tests **********"
+
+ # ============================================================
+
+ - name: register an instance to unused target group
+ elb_target:
+ target_group_name: "{{ tg_name }}"
+ target_id: "{{ ec2.instance_ids[0] }}"
+ state: present
+ register: result
+
+ - name: target is registered
+ assert:
+ that:
+ - result.changed
+ - result.target_group_arn
+ - result.target_health_descriptions.target.id == ec2.instance_ids[0]
+
+ # ============================================================
+
+ - name: test idempotence
+ elb_target:
+ target_group_name: "{{ tg_name }}"
+ target_id: "{{ ec2.instance_ids[0] }}"
+ state: present
+ register: result
+
+ - name: target was already registered
+ assert:
+ that:
+ - not result.changed
+
+ # ============================================================
+
+ - name: remove an unused target
+ elb_target:
+ target_group_name: "{{ tg_name }}"
+ target_id: "{{ ec2.instance_ids[0] }}"
+ state: absent
+ deregister_unused: true
+ register: result
+
+ - name: target group was deleted
+ assert:
+ that:
+ - result.changed
+ - not result.target_health_descriptions
+
+ # ============================================================
+
+ - name: register an instance to used target group and wait until healthy
+ elb_target:
+ target_group_name: "{{ tg_name }}-used"
+ target_id: "{{ ec2.instance_ids[0] }}"
+ state: present
+ target_status: healthy
+ target_status_timeout: 400
+ register: result
+
+ - name: target is registered
+ assert:
+ that:
+ - result.changed
+ - result.target_group_arn
+ - result.target_health_descriptions.target.id == ec2.instance_ids[0]
+ - result.target_health_descriptions.target_health == healthy_state
+
+ # ============================================================
+
+ - name: remove a target from used target group
+ elb_target:
+ target_group_name: "{{ tg_name }}-used"
+ target_id: "{{ ec2.instance_ids[0] }}"
+ state: absent
+ target_status: unused
+ target_status_timeout: 400
+ register: result
+
+ - name: target was deregistered
+ assert:
+ that:
+ - result.changed
+
+ # ============================================================
+
+ - name: test idempotence
+ elb_target:
+ target_group_name: "{{ tg_name }}-used"
+ target_id: "{{ ec2.instance_ids[0] }}"
+ state: absent
+ register: result
+
+ - name: target was already deregistered
+ assert:
+ that:
+ - not result.changed
+
+ # ============================================================
+
+ - name: register an instance to used target group and wait until healthy again to test deregistering differently
+ elb_target:
+ target_group_name: "{{ tg_name }}-used"
+ target_id: "{{ ec2.instance_ids[0] }}"
+ state: present
+ target_status: healthy
+ target_status_timeout: 400
+ register: result
+
+ - name: target is registered
+ assert:
+ that:
+ - result.changed
+ - result.target_group_arn
+ - result.target_health_descriptions.target.id == ec2.instance_ids[0]
+ - result.target_health_descriptions.target_health == healthy_state
+
+ - name: start deregisteration but don't wait
+ elb_target:
+ target_group_name: "{{ tg_name }}-used"
+ target_id: "{{ ec2.instance_ids[0] }}"
+ state: absent
+ register: result
+
+ - name: target is starting to deregister
+ assert:
+ that:
+ - result.changed
+ - result.target_health_descriptions.target_health.reason == "Target.DeregistrationInProgress"
+
+ - name: now wait for target to finish deregistering
+ elb_target:
+ target_group_name: "{{ tg_name }}-used"
+ target_id: "{{ ec2.instance_ids[0] }}"
+ state: absent
+ target_status: unused
+ target_status_timeout: 400
+ register: result
+
+ - name: target was deregistered already and now has finished
+ assert:
+ that:
+ - not result.changed
+ - not result.target_health_descriptions
+
+ # ============================================================
+
+ always:
+
+ - name:
+ debug: msg="********** Tearing down elb_target test dependencies **********"
+
+ - name: remove ec2 instance
+ ec2:
+ group_id: "{{ sg.group_id }}"
+ instance_type: t2.micro
+ image: "{{ ec2_ami_image }}"
+ vpc_subnet_id: "{{ subnet_2.subnet.id }}"
+ instance_tags:
+ Name: "{{ resource_prefix }}-inst"
+ exact_count: 0
+ count_tag:
+ Name: "{{ resource_prefix }}-inst"
+ assign_public_ip: true
+ volumes: []
+ wait: true
+ ebs_optimized: false
+ ignore_errors: true
+
+ - name: remove testing target groups
+ elb_target_group:
+ name: "{{ item }}"
+ health_check_port: 80
+ protocol: http
+ port: 80
+ vpc_id: '{{ vpc.vpc.id }}'
+ state: absent
+ target_type: instance
+ tags:
+ Description: "Created by {{ resource_prefix }}"
+ wait: true
+ wait_timeout: 400
+ register: removed
+ retries: 10
+ until: removed is not failed
+ with_items:
+ - "{{ tg_name }}"
+ - "{{ tg_name }}-used"
+ ignore_errors: true
+
+ - name: remove udp testing target groups
+ elb_target_group:
+ name: "{{ item }}"
+ protocol: udp
+ port: 53
+ vpc_id: '{{ vpc.vpc.id }}'
+ state: absent
+ target_type: instance
+ tags:
+ Description: "Created by {{ resource_prefix }}"
+ Protocol: "UDP"
+ wait: true
+ wait_timeout: 400
+ register: removed
+ retries: 10
+ until: removed is not failed
+ with_items:
+ - "{{ tg_tcpudp_name }}"
+ ignore_errors: true
+
+ - name: remove application load balancer
+ elb_application_lb:
+ name: "{{ lb_name }}"
+ security_groups:
+ - "{{ sg.group_id }}"
+ subnets:
+ - "{{ subnet_1.subnet.id }}"
+ - "{{ subnet_2.subnet.id }}"
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}-used"
+ state: absent
+ wait: true
+ wait_timeout: 400
+ register: removed
+ retries: 10
+ until: removed is not failed
+ ignore_errors: true
+
+ - name: remove testing security group
+ ec2_group:
+ state: absent
+ name: "{{ resource_prefix }}-sg"
+ description: a security group for ansible tests
+ vpc_id: "{{ vpc.vpc.id }}"
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 0.0.0.0/0
+ register: removed
+ retries: 10
+ until: removed is not failed
+ ignore_errors: true
+
+ - name: remove routing rules
+ ec2_vpc_route_table:
+ state: absent
+ lookup: id
+ route_table_id: "{{ route_table.route_table.id }}"
+ register: removed
+ retries: 10
+ until: removed is not failed
+ ignore_errors: true
+
+ - name: remove testing subnet
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: "{{ vpc.vpc.id }}"
+ cidr: 20.0.0.0/18
+ az: "{{ aws_region }}a"
+ resource_tags:
+ Name: "{{ resource_prefix }}-subnet"
+ register: removed
+ retries: 10
+ until: removed is not failed
+ ignore_errors: true
+
+ - name: remove testing subnet
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: "{{ vpc.vpc.id }}"
+ cidr: 20.0.64.0/18
+ az: "{{ aws_region }}b"
+ resource_tags:
+ Name: "{{ resource_prefix }}-subnet"
+ register: removed
+ retries: 10
+ until: removed is not failed
+ ignore_errors: true
+
+ - name: remove testing internet gateway
+ ec2_vpc_igw:
+ vpc_id: "{{ vpc.vpc.id }}"
+ state: absent
+ register: removed
+ retries: 10
+ until: removed is not failed
+ ignore_errors: true
+
+ - name: remove testing VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ state: absent
+ cidr_block: 20.0.0.0/16
+ tags:
+ Name: "{{ resource_prefix }}-vpc"
+ Description: "Created by ansible-test"
+ register: removed
+ retries: 10
+ until: removed is not failed
+
+ # ============================================================
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/version_fail.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/version_fail.yml
new file mode 100644
index 00000000..43cdba85
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/playbooks/version_fail.yml
@@ -0,0 +1,41 @@
+- hosts: localhost
+ connection: local
+ environment: "{{ ansible_test.environment }}"
+
+ tasks:
+ - name: set up aws connection info
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - name: set up testing target group (type=ip)
+ elb_target_group:
+ state: present
+ #name: "{{ resource_shortprefix }}-tg"
+ name: "ansible-test-{{ resource_prefix | regex_search('([0-9]+)$') }}-tg"
+ health_check_port: 80
+ protocol: http
+ port: 80
+ vpc_id: 'vpc-abcd1234'
+ target_type: ip
+ tags:
+ Description: "Created by {{ resource_prefix }}"
+ register: elb_target_group_type_ip
+ ignore_errors: yes
+
+ - name: check that setting up target group with type=ip fails with friendly message
+ assert:
+ that:
+ - elb_target_group_type_ip is failed
+ - "'msg' in elb_target_group_type_ip"
+
+ # In the off-chance that this went (partially) through when it shouldn't...
+ always:
+ - name: Remove testing target group (type=ip)
+ elb_target_group:
+ state: absent
+ #name: "{{ resource_shortprefix }}-tg"
+ name: "ansible-test-{{ resource_prefix | regex_search('([0-9]+)$') }}-tg"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/runme.sh b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/runme.sh
new file mode 100755
index 00000000..e379f24b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target/runme.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# Test graceful failure for older versions of botocore
+source virtualenv.sh
+pip install 'botocore<=1.7.1' boto3
+ansible-playbook -i ../../inventory -v playbooks/version_fail.yml "$@"
+
+# Run full test suite
+source virtualenv.sh
+pip install 'botocore>=1.8.0' boto3
+ansible-playbook -i ../../inventory -v playbooks/full_test.yml "$@"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target_info/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target_info/aliases
new file mode 100644
index 00000000..56927195
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target_info/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target_info/playbooks/full_test.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target_info/playbooks/full_test.yml
new file mode 100644
index 00000000..20942527
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target_info/playbooks/full_test.yml
@@ -0,0 +1,6 @@
+- hosts: localhost
+ connection: local
+ environment: "{{ ansible_test.environment }}"
+
+ roles:
+ - elb_target_info
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target_info/playbooks/roles/elb_target_info/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target_info/playbooks/roles/elb_target_info/defaults/main.yml
new file mode 100644
index 00000000..4420a8d5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target_info/playbooks/roles/elb_target_info/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+ec2_ami_name: 'amzn2-ami-hvm-2.0.20190612-x86_64-gp2'
+
+tg_name: "ansible-test-{{ resource_prefix | regex_search('([0-9]+)$') }}-tg"
+lb_name: "ansible-test-{{ resource_prefix | regex_search('([0-9]+)$') }}-lb"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target_info/playbooks/roles/elb_target_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target_info/playbooks/roles/elb_target_info/tasks/main.yml
new file mode 100644
index 00000000..8ca10069
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target_info/playbooks/roles/elb_target_info/tasks/main.yml
@@ -0,0 +1,507 @@
+---
+ - name: set up elb_target_info test prerequisites
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ collections:
+ - amazon.aws
+
+ block:
+
+ # ============================================================
+
+ - name:
+ debug: msg="********** Setting up elb_target_info test dependencies **********"
+
+ - name: Find AMI to use
+ ec2_ami_info:
+ owners: 'amazon'
+ filters:
+ name: '{{ ec2_ami_name }}'
+ register: ec2_amis
+ - set_fact:
+ ec2_ami_image: '{{ ec2_amis.images[0].image_id }}'
+
+ # ============================================================
+
+ - name: set up testing VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ state: present
+ cidr_block: 20.0.0.0/16
+ tags:
+ Name: "{{ resource_prefix }}-vpc"
+ Description: "Created by ansible-test"
+ register: vpc
+
+ - name: set up testing internet gateway
+ ec2_vpc_igw:
+ vpc_id: "{{ vpc.vpc.id }}"
+ state: present
+ register: igw
+
+ - name: set up testing subnet
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: "{{ vpc.vpc.id }}"
+ cidr: 20.0.0.0/18
+ az: "{{ aws_region }}a"
+ resource_tags:
+ Name: "{{ resource_prefix }}-subnet"
+ register: subnet_1
+
+ - name: set up testing subnet
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: "{{ vpc.vpc.id }}"
+ cidr: 20.0.64.0/18
+ az: "{{ aws_region }}b"
+ resource_tags:
+ Name: "{{ resource_prefix }}-subnet"
+ register: subnet_2
+
+ - name: create routing rules
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ created: "{{ resource_prefix }}-route"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: "{{ igw.gateway_id }}"
+ subnets:
+ - "{{ subnet_1.subnet.id }}"
+ - "{{ subnet_2.subnet.id }}"
+ register: route_table
+
+ - name: create testing security group
+ ec2_group:
+ name: "{{ resource_prefix }}-sg"
+ description: a security group for ansible tests
+ vpc_id: "{{ vpc.vpc.id }}"
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 0.0.0.0/0
+ register: sg
+
+ - name: set up testing target group (type=instance)
+ register: alb_target_group
+ elb_target_group:
+ name: "{{ tg_name }}-inst"
+ health_check_port: 80
+ protocol: http
+ port: 80
+ vpc_id: '{{ vpc.vpc.id }}'
+ state: present
+ target_type: instance
+ # set this to 30 to test polling for changes, instead of having everything go out immediately
+ deregistration_delay_timeout: 30
+ tags:
+ Description: "Created by {{ resource_prefix }}"
+
+ - name: set up testing target group (type=ip)
+ register: nlb_target_group
+ elb_target_group:
+ name: "{{ tg_name }}-ip"
+ health_check_port: 80
+ protocol: tcp
+ port: 80
+ vpc_id: '{{ vpc.vpc.id }}'
+ state: present
+ # set this to 30 to test polling for changes, instead of having everything go out immediately
+ deregistration_delay_timeout: 30
+ target_type: ip
+ tags:
+ Description: "Created by {{ resource_prefix }}"
+
+ - name: set up testing target group which will not be associated with any load balancers
+ register: idle_target_group
+ elb_target_group:
+ name: "{{ tg_name }}-idle"
+ health_check_port: 80
+ protocol: tcp
+ port: 80
+ vpc_id: '{{ vpc.vpc.id }}'
+ state: present
+ target_type: instance
+ tags:
+ Description: "Created by {{ resource_prefix }}"
+
+ - name: set up ec2 instance to use as a target
+ ec2:
+ group_id: "{{ sg.group_id }}"
+ instance_type: t2.micro
+ image: "{{ ec2_ami_image }}"
+ vpc_subnet_id: "{{ subnet_2.subnet.id }}"
+ instance_tags:
+ Name: "{{ resource_prefix }}-inst"
+ exact_count: 1
+ count_tag:
+ Name: "{{ resource_prefix }}-inst"
+ assign_public_ip: true
+ volumes: []
+ wait: true
+ ebs_optimized: false
+ user_data: |
+ #cloud-config
+ package_upgrade: true
+ package_update: true
+ packages:
+ - httpd
+ runcmd:
+ - "service httpd start"
+ - echo "HELLO ANSIBLE" > /var/www/html/index.html
+ register: ec2
+
+ - name: create an application load balancer
+ elb_application_lb:
+ name: "{{ lb_name }}-alb"
+ security_groups:
+ - "{{ sg.group_id }}"
+ subnets:
+ - "{{ subnet_1.subnet.id }}"
+ - "{{ subnet_2.subnet.id }}"
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}-inst"
+ state: present
+
+
+ - name: create a network load balancer
+ elb_network_lb:
+ name: "{{ lb_name }}-nlb"
+ subnets:
+ - "{{ subnet_1.subnet.id }}"
+ - "{{ subnet_2.subnet.id }}"
+ listeners:
+ - Protocol: TCP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}-ip"
+ state: present
+
+ - name: register with the ALB
+ elb_target:
+ target_group_name: "{{ tg_name }}-inst"
+ target_id: "{{ ec2.instance_ids[0] }}"
+ state: present
+ target_status: "initial"
+
+ - name: register with the NLB IP target group
+ elb_target:
+ target_group_name: "{{ tg_name }}-ip"
+ target_id: "{{ ec2.instances[0].private_ip }}"
+ state: present
+ target_status: "initial"
+
+ # ============================================================
+
+ - debug: msg="********** Running elb_target_info integration tests **********"
+
+ # ============================================================
+ - name: gather facts
+ elb_target_info:
+ instance_id: "{{ ec2.instance_ids[0]}}"
+ register: target_facts
+
+ - assert:
+ that:
+ - "{{ alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
+ - "{{ nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
+ - "{{ idle_target_group.target_group_arn not in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
+ - (target_facts.instance_target_groups | length) == 2
+ msg: "target facts showed the target in the right target groups"
+
+
+ - name: register with unused target group
+ elb_target:
+ target_group_name: "{{ tg_name }}-idle"
+ target_id: "{{ ec2.instance_ids[0]}}"
+ state: present
+ target_status: "unused"
+
+ - name: gather facts again, including the idle group
+ elb_target_info:
+ instance_id: "{{ ec2.instance_ids[0]}}"
+ register: target_facts
+
+ - assert:
+ that:
+ - "{{ alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
+ - "{{ nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
+ - "{{ idle_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
+ - (target_facts.instance_target_groups | length) == 3
+ msg: "target facts reflected the addition of the target to the idle group"
+
+ - name: gather facts again, this time excluding the idle group
+ elb_target_info:
+ instance_id: "{{ ec2.instance_ids[0]}}"
+ get_unused_target_groups: false
+ register: target_facts
+
+ - assert:
+ that:
+ - "{{ alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
+ - "{{ nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
+ - "{{ idle_target_group.target_group_arn not in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
+ - (target_facts.instance_target_groups | length) == 2
+ msg: "target_facts.instance_target_groups did not gather unused target groups when variable was set"
+
+ - name: register twice in the same target group
+ elb_target:
+ target_group_name: "{{ tg_name }}-ip"
+ target_port: 22
+ target_id: "{{ ec2.instances[0].private_ip }}"
+ state: present
+ target_status: "healthy"
+ target_status_timeout: 400
+
+ - name: gather facts
+ elb_target_info:
+ instance_id: "{{ ec2.instance_ids[0] }}"
+ get_unused_target_groups: false
+ register: target_facts
+
+ - assert:
+ that:
+ - alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn'))
+ - nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn'))
+ - (target_facts.instance_target_groups | length) == 2
+ - (target_facts.instance_target_groups |
+ selectattr('target_group_arn', 'equalto', nlb_target_group.target_group_arn) |
+ map(attribute='targets') |
+ flatten |
+ list |
+ length) == 2
+ msg: "registering a target twice didn't affect the overall target group count, increased target count"
+
+ - set_fact:
+ original_target_groups: "{{ target_facts.instance_target_groups }}"
+
+ - name: Deregister instance from all target groups
+ elb_target:
+ target_group_arn: "{{ item.0.target_group_arn }}"
+ target_port: "{{ item.1.target_port }}"
+ target_az: "{{ item.1.target_az }}"
+ target_id: "{{ item.1.target_id }}"
+ state: absent
+ target_status: "draining"
+ with_subelements:
+ - "{{ original_target_groups }}"
+ - "targets"
+
+ - name: wait for all targets to deregister simultaneously
+ elb_target_info:
+ get_unused_target_groups: false
+ instance_id: "{{ ec2.instance_ids[0] }}"
+ register: target_facts
+ until: (target_facts.instance_target_groups | length) == 0
+ retries: 60
+ delay: 10
+
+ - name: reregister in elbv2s
+ elb_target:
+ target_group_arn: "{{ item.0.target_group_arn }}"
+ target_port: "{{ item.1.target_port }}"
+ target_az: "{{ item.1.target_az }}"
+ target_id: "{{ item.1.target_id }}"
+ state: present
+ target_status: "initial"
+ with_subelements:
+ - "{{ original_target_groups }}"
+ - "targets"
+
+ # wait until all groups associated with this instance are 'healthy' or
+ # 'unused'
+ - name: wait for registration
+ elb_target_info:
+ get_unused_target_groups: false
+ instance_id: "{{ ec2.instance_ids[0] }}"
+ register: target_facts
+ until: >
+ (target_facts.instance_target_groups |
+ map(attribute='targets') |
+ flatten |
+ map(attribute='target_health') |
+ rejectattr('state', 'equalto', 'healthy') |
+ rejectattr('state', 'equalto', 'unused') |
+ list |
+ length) == 0
+ retries: 61
+ delay: 10
+
+ - assert:
+ that:
+ - alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn'))
+ - nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn'))
+ - (target_facts.instance_target_groups | length) == 2
+ - (target_facts.instance_target_groups |
+ selectattr('target_group_arn', 'equalto', nlb_target_group.target_group_arn) |
+ map(attribute='targets') |
+ flatten |
+ list |
+ length) == 2
+ msg: "reregistration completed successfully"
+
+ always:
+
+ - name:
+ debug: msg="********** Tearing down elb_target_info test dependencies **********"
+
+ - name: remove ec2 instance
+ ec2:
+ group_id: "{{ sg.group_id }}"
+ instance_type: t2.micro
+ image: "{{ ec2_ami_image }}"
+ vpc_subnet_id: "{{ subnet_2.subnet.id }}"
+ instance_tags:
+ Name: "{{ resource_prefix }}-inst"
+ exact_count: 0
+ count_tag:
+ Name: "{{ resource_prefix }}-inst"
+ assign_public_ip: true
+ volumes: []
+ wait: true
+ ebs_optimized: false
+ ignore_errors: true
+
+ - name: remove application load balancer
+ elb_application_lb:
+ name: "{{ lb_name }}-alb"
+ security_groups:
+ - "{{ sg.group_id }}"
+ subnets:
+ - "{{ subnet_1.subnet.id }}"
+ - "{{ subnet_2.subnet.id }}"
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}-inst"
+ state: absent
+ wait: true
+ wait_timeout: 200
+ register: removed
+ retries: 10
+ until: removed is not failed
+ ignore_errors: true
+
+ - name: remove NLB
+ ignore_errors: true
+ elb_network_lb:
+ name: "{{ lb_name }}-nlb"
+ state: absent
+
+ - name: remove testing target groups
+ elb_target_group:
+ name: "{{ item }}"
+ health_check_port: 80
+ protocol: http
+ port: 80
+ vpc_id: '{{ vpc.vpc.id }}'
+ state: absent
+ target_type: instance
+ tags:
+ Description: "Created by {{ resource_prefix }}"
+ wait: true
+ wait_timeout: 200
+ register: removed
+ retries: 10
+ until: removed is not failed
+ with_items:
+ - "{{ tg_name }}-idle"
+ - "{{ tg_name }}-ip"
+ - "{{ tg_name }}-inst"
+ ignore_errors: true
+
+ - name: remove testing security group
+ ec2_group:
+ state: absent
+ name: "{{ resource_prefix }}-sg"
+ description: a security group for ansible tests
+ vpc_id: "{{ vpc.vpc.id }}"
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 0.0.0.0/0
+ register: removed
+ retries: 10
+ until: removed is not failed
+ ignore_errors: true
+
+ - name: remove routing rules
+ ec2_vpc_route_table:
+ state: absent
+ lookup: id
+ route_table_id: "{{ route_table.route_table.id }}"
+ register: removed
+ retries: 10
+ until: removed is not failed
+ ignore_errors: true
+
+ - name: remove testing subnet
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: "{{ vpc.vpc.id }}"
+ cidr: 20.0.0.0/18
+ az: "{{ aws_region }}a"
+ resource_tags:
+ Name: "{{ resource_prefix }}-subnet"
+ register: removed
+ retries: 10
+ until: removed is not failed
+ ignore_errors: true
+
+ - name: remove testing subnet
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: "{{ vpc.vpc.id }}"
+ cidr: 20.0.64.0/18
+ az: "{{ aws_region }}b"
+ resource_tags:
+ Name: "{{ resource_prefix }}-subnet"
+ register: removed
+ retries: 10
+ until: removed is not failed
+ ignore_errors: true
+
+ - name: remove testing internet gateway
+ ec2_vpc_igw:
+ vpc_id: "{{ vpc.vpc.id }}"
+ state: absent
+ register: removed
+ retries: 10
+ until: removed is not failed
+ ignore_errors: true
+
+ - name: remove testing VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ state: absent
+ cidr_block: 20.0.0.0/16
+ tags:
+ Name: "{{ resource_prefix }}-vpc"
+ Description: "Created by ansible-test"
+ register: removed
+ retries: 10
+ until: removed is not failed
+
+ # ============================================================
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target_info/runme.sh b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target_info/runme.sh
new file mode 100755
index 00000000..33d2b8d0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/elb_target_info/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ansible-playbook -i ../../inventory -v playbooks/full_test.yml "$@"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_group/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_group/aliases
new file mode 100644
index 00000000..67ae2cc7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_group/aliases
@@ -0,0 +1,2 @@
+unsupported
+cloud/aws
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_group/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_group/defaults/main.yml
new file mode 100644
index 00000000..f5112b1a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_group/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+test_user: '{{ resource_prefix }}-user'
+test_group: '{{ resource_prefix }}-group'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_group/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_group/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_group/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_group/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_group/tasks/main.yml
new file mode 100644
index 00000000..65b44182
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_group/tasks/main.yml
@@ -0,0 +1,127 @@
+---
+- name: set up aws connection info
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ collections:
+ - amazon.aws
+ block:
+ - name: ensure ansible user exists
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+
+ - name: ensure group exists
+ iam_group:
+ name: '{{ test_group }}'
+ users:
+ - '{{ test_user }}'
+ state: present
+ register: iam_group
+
+ - assert:
+ that:
+ - iam_group.iam_group.users
+ - iam_group is changed
+
+ - name: add non existent user to group
+ iam_group:
+ name: '{{ test_group }}'
+ users:
+ - '{{ test_user }}'
+ - NonExistentUser
+ state: present
+ ignore_errors: yes
+ register: iam_group
+
+ - name: assert that adding non existent user to group fails with helpful message
+ assert:
+ that:
+ - iam_group is failed
+ - iam_group.msg.startswith("Couldn't add user NonExistentUser to group {{ test_group }}")
+
+ - name: remove a user
+ iam_group:
+ name: '{{ test_group }}'
+ purge_users: True
+ users: []
+ state: present
+ register: iam_group
+
+ - assert:
+ that:
+ - iam_group is changed
+ - not iam_group.iam_group.users
+
+ - name: re-remove a user (no change)
+ iam_group:
+ name: '{{ test_group }}'
+ purge_users: True
+ users: []
+ state: present
+ register: iam_group
+
+ - assert:
+ that:
+ - iam_group is not changed
+ - not iam_group.iam_group.users
+
+ - name: Add the user again
+ iam_group:
+ name: '{{ test_group }}'
+ users:
+ - '{{ test_user }}'
+ state: present
+ register: iam_group
+
+ - assert:
+ that:
+ - iam_group is changed
+ - iam_group.iam_group.users
+
+ - name: Re-add the user
+ iam_group:
+ name: '{{ test_group }}'
+ users:
+ - '{{ test_user }}'
+ state: present
+ register: iam_group
+
+ - assert:
+ that:
+ - iam_group is not changed
+ - iam_group.iam_group.users
+
+ - name: remove group
+ iam_group:
+ name: '{{ test_group }}'
+ state: absent
+ register: iam_group
+
+ - assert:
+ that:
+ - iam_group is changed
+
+ - name: re-remove group
+ iam_group:
+ name: '{{ test_group }}'
+ state: absent
+ register: iam_group
+
+ - assert:
+ that:
+ - iam_group is not changed
+
+ always:
+ - name: remove group
+ iam_group:
+ name: '{{ test_group }}'
+ state: absent
+
+ - name: remove ansible user
+ iam_user:
+ name: '{{ test_user }}'
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_password_policy/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_password_policy/aliases
new file mode 100644
index 00000000..56927195
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_password_policy/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_password_policy/tasks/main.yaml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_password_policy/tasks/main.yaml
new file mode 100644
index 00000000..7b773eac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_password_policy/tasks/main.yaml
@@ -0,0 +1,107 @@
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ collections:
+ - amazon.aws
+ block:
+ - name: set iam password policy
+ iam_password_policy:
+ state: present
+ min_pw_length: 8
+ require_symbols: false
+ require_numbers: true
+ require_uppercase: true
+ require_lowercase: true
+ allow_pw_change: true
+ pw_max_age: 60
+ pw_reuse_prevent: 5
+ pw_expire: false
+ register: result
+
+ - name: assert that changes were made
+ assert:
+ that:
+ - result.changed
+
+ - name: verify iam password policy has been created
+ iam_password_policy:
+ state: present
+ min_pw_length: 8
+ require_symbols: false
+ require_numbers: true
+ require_uppercase: true
+ require_lowercase: true
+ allow_pw_change: true
+ pw_max_age: 60
+ pw_reuse_prevent: 5
+ pw_expire: false
+ register: result
+
+ - name: assert that no changes were made
+ assert:
+ that:
+ - not result.changed
+
+ - name: update iam password policy with different settings
+ iam_password_policy:
+ state: present
+ min_pw_length: 15
+ require_symbols: true
+ require_numbers: true
+ require_uppercase: true
+ require_lowercase: true
+ allow_pw_change: true
+ pw_max_age: 30
+ pw_reuse_prevent: 10
+ pw_expire: true
+ register: result
+
+ - name: assert that updates were made
+ assert:
+ that:
+ - result.changed
+
+ # Test for regression of #59102
+ - name: update iam password policy without expiry
+ iam_password_policy:
+ state: present
+ min_pw_length: 15
+ require_symbols: true
+ require_numbers: true
+ require_uppercase: true
+ require_lowercase: true
+ allow_pw_change: true
+ register: result
+
+ - name: assert that changes were made
+ assert:
+ that:
+ - result.changed
+
+ - name: remove iam password policy
+ iam_password_policy:
+ state: absent
+ register: result
+
+ - name: assert password policy has been removed
+ assert:
+ that:
+ - result.changed
+
+ - name: verify password policy has been removed
+ iam_password_policy:
+ state: absent
+ register: result
+
+ - name: assert no changes were made
+ assert:
+ that:
+ - not result.changed
+ always:
+ - name: remove iam password policy
+ iam_password_policy:
+ state: absent
+ register: result
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/aliases
new file mode 100644
index 00000000..3f812e19
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/aliases
@@ -0,0 +1,3 @@
+iam_policy_info
+cloud/aws
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/defaults/main.yml
new file mode 100644
index 00000000..93759404
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+iam_name: '{{resource_prefix}}'
+iam_policy_name_a: '{{resource_prefix}}-document-a'
+iam_policy_name_b: '{{resource_prefix}}-document-b'
+iam_policy_name_c: '{{resource_prefix}}-json-a'
+iam_policy_name_d: '{{resource_prefix}}-json-b'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/files/no_access.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/files/no_access.json
new file mode 100644
index 00000000..a2f29975
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/files/no_access.json
@@ -0,0 +1,10 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Deny",
+ "Action": "*",
+ "Resource": "*"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/files/no_access_with_id.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/files/no_access_with_id.json
new file mode 100644
index 00000000..9d40dd54
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/files/no_access_with_id.json
@@ -0,0 +1,11 @@
+{
+ "Id": "MyId",
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Deny",
+ "Action": "*",
+ "Resource": "*"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/files/no_access_with_second_id.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/files/no_access_with_second_id.json
new file mode 100644
index 00000000..0efbc31d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/files/no_access_with_second_id.json
@@ -0,0 +1,11 @@
+{
+ "Id": "MyOtherId",
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Deny",
+ "Action": "*",
+ "Resource": "*"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/files/no_trust.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/files/no_trust.json
new file mode 100644
index 00000000..c3661618
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/files/no_trust.json
@@ -0,0 +1,10 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Deny",
+ "Principal": {"AWS": "*"},
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/tasks/main.yml
new file mode 100644
index 00000000..2780bd3e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/tasks/main.yml
@@ -0,0 +1,99 @@
+---
+- name: 'Run integration tests for IAM (inline) Policy management'
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ collections:
+ - amazon.aws
+ block:
+ # ============================================================
+ - name: Create a temporary folder for the policies
+ tempfile:
+ state: directory
+ register: tmpdir
+ - name: Copy over policy
+ copy:
+ src: no_access.json
+ dest: "{{ tmpdir.path }}"
+ - name: Copy over other policy
+ copy:
+ src: no_access_with_id.json
+ dest: "{{ tmpdir.path }}"
+ - name: Copy over other policy
+ copy:
+ src: no_access_with_second_id.json
+ dest: "{{ tmpdir.path }}"
+
+ # ============================================================
+ - name: Create user for tests
+ iam_user:
+ state: present
+ name: "{{ iam_name }}"
+ register: result
+ - name: Ensure user was created
+ assert:
+ that:
+ - result is changed
+
+ - name: Create role for tests
+ iam_role:
+ state: present
+ name: "{{ iam_name }}"
+ assume_role_policy_document: "{{ lookup('file','no_trust.json') }}"
+ register: result
+ - name: Ensure role was created
+ assert:
+ that:
+ - result is changed
+
+ - name: Create group for tests
+ iam_group:
+ state: present
+ name: "{{ iam_name }}"
+ register: result
+ - name: Ensure group was created
+ assert:
+ that:
+ - result is changed
+
+ # ============================================================
+
+ - name: Run tests for each type of object
+ include_tasks: object.yml
+ loop_control:
+ loop_var: iam_type
+ with_items:
+ - user
+ - group
+ - role
+
+ # ============================================================
+
+ always:
+ # ============================================================
+ - name: Remove user
+ iam_user:
+ state: absent
+ name: "{{ iam_name }}"
+ ignore_errors: yes
+
+ - name: Remove role
+ iam_role:
+ state: absent
+ name: "{{ iam_name }}"
+ ignore_errors: yes
+
+ - name: Remove group
+ iam_group:
+ state: absent
+ name: "{{ iam_name }}"
+ ignore_errors: yes
+
+ # ============================================================
+ - name: Delete temporary folder containing the policies
+ file:
+ state: absent
+ path: "{{ tmpdir.path }}/"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/tasks/object.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/tasks/object.yml
new file mode 100644
index 00000000..2007da11
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_policy/tasks/object.yml
@@ -0,0 +1,1065 @@
+---
+- name: 'Run integration tests for IAM (inline) Policy management on {{ iam_type }}s'
+ vars:
+ iam_object_key: '{{ iam_type }}_name'
+ block:
+ # ============================================================
+ - name: 'Fetch policies from {{ iam_type }} before making changes'
+ iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ register: iam_policy_info
+
+ - name: 'Assert empty policy list'
+ assert:
+ that:
+ - iam_policy_info is succeeded
+ - iam_policy_info.policies | length == 0
+ - iam_policy_info.all_policy_names | length == 0
+ - iam_policy_info.policy_names | length == 0
+
+ - name: 'Fetch policies from non-existent {{ iam_type }}'
+ iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}-junk'
+ register: iam_policy_info
+
+ - name: 'Assert not failed'
+ assert:
+ that:
+ - iam_policy_info is succeeded
+
+ # ============================================================
+ - name: 'Create policy using document for {{ iam_type }} (check mode)'
+ check_mode: yes
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ policy_document: '{{ tmpdir.path }}/no_access.json'
+ skip_duplicates: yes
+ register: result
+
+ - name: 'Assert policy would be added for {{ iam_type }}'
+ assert:
+ that:
+ - result is changed
+
+ - name: 'Create policy using document for {{ iam_type }}'
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ policy_document: '{{ tmpdir.path }}/no_access.json'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ register: iam_policy_info
+
+ - name: 'Assert policy was added for {{ iam_type }}'
+ assert:
+ that:
+ - result is changed
+ - result.policies | length == 1
+ - iam_policy_name_a in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_name_a in iam_policy_info.policy_names
+ - iam_policy_info.policy_names | length == 1
+ - iam_policy_info.policies | length == 1
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_info.all_policy_names | length == 1
+ - iam_policy_info.policies[0].policy_name == iam_policy_name_a
+ - '"Id" not in iam_policy_info.policies[0].policy_document'
+
+ - name: 'Create policy using document for {{ iam_type }} (idempotency)'
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ policy_document: '{{ tmpdir.path }}/no_access.json'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ register: iam_policy_info
+
+ - name: 'Assert no change'
+ assert:
+ that:
+ - result is not changed
+ - result.policies | length == 1
+ - iam_policy_name_a in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.policies | length == 1
+ - iam_policy_info.all_policy_names | length == 1
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_info.policies[0].policy_name == iam_policy_name_a
+ - '"Id" not in iam_policy_info.policies[0].policy_document'
+
+ # ============================================================
+ - name: 'Create policy using document for {{ iam_type }} (check mode) (skip_duplicates)'
+ check_mode: yes
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ policy_document: '{{ tmpdir.path }}/no_access.json'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ register: iam_policy_info
+
+ - name: 'Assert policy would be added for {{ iam_type }}'
+ assert:
+ that:
+ - result is not changed
+ - iam_policy_info.all_policy_names | length == 1
+ - '"policies" not in iam_policy_info'
+ - iam_policy_name_b not in iam_policy_info.all_policy_names
+
+ - name: 'Create policy using document for {{ iam_type }} (skip_duplicates)'
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ policy_document: '{{ tmpdir.path }}/no_access.json'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ register: iam_policy_info
+
+ - name: 'Assert policy was not added for {{ iam_type }} (skip_duplicates)'
+ assert:
+ that:
+ - result is not changed
+ - result.policies | length == 1
+ - iam_policy_name_b not in result.policies
+ - result[iam_object_key] == iam_name
+ - '"policies" not in iam_policy_info'
+ - '"policy_names" not in iam_policy_info'
+ - iam_policy_info.all_policy_names | length == 1
+ - iam_policy_name_b not in iam_policy_info.all_policy_names
+
+ - name: 'Create policy using document for {{ iam_type }} (check mode) (skip_duplicates = no)'
+ check_mode: yes
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ policy_document: '{{ tmpdir.path }}/no_access.json'
+ skip_duplicates: no
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ register: iam_policy_info
+
+ - name: 'Assert policy would be added for {{ iam_type }}'
+ assert:
+ that:
+ - result.changed == True
+ - '"policies" not in iam_policy_info'
+ - iam_policy_info.all_policy_names | length == 1
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_name_b not in iam_policy_info.all_policy_names
+
+ - name: 'Create policy using document for {{ iam_type }} (skip_duplicates = no)'
+ iam_policy:
+ state: present
+ skip_duplicates: no
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ policy_document: '{{ tmpdir.path }}/no_access.json'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ register: iam_policy_info
+
+ - name: 'Assert policy was added for {{ iam_type }}'
+ assert:
+ that:
+ - result is changed
+ - result.policies | length == 2
+ - iam_policy_name_b in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.policies | length == 1
+ - iam_policy_info.all_policy_names | length == 2
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_name_b in iam_policy_info.all_policy_names
+ - iam_policy_info.policies[0].policy_name == iam_policy_name_b
+ - '"Id" not in iam_policy_info.policies[0].policy_document'
+
+ - name: 'Create policy using document for {{ iam_type }} (idempotency) (skip_duplicates = no)'
+ iam_policy:
+ state: present
+ skip_duplicates: no
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ policy_document: '{{ tmpdir.path }}/no_access.json'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ register: iam_policy_info
+
+ - name: 'Assert no change'
+ assert:
+ that:
+ - result is not changed
+ - result.policies | length == 2
+ - iam_policy_name_b in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.policies | length == 1
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_name_b in iam_policy_info.all_policy_names
+ - iam_policy_info.all_policy_names | length == 2
+ - iam_policy_info.policies[0].policy_name == iam_policy_name_b
+ - '"Id" not in iam_policy_info.policies[0].policy_document'
+
+ # ============================================================
+ - name: 'Create policy using json for {{ iam_type }} (check mode)'
+ check_mode: yes
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access_with_id.json") }}'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ register: iam_policy_info
+
+ - name: 'Assert policy would be added for {{ iam_type }}'
+ assert:
+ that:
+ - result is changed
+ - '"policies" not in iam_policy_info'
+ - iam_policy_info.all_policy_names | length == 2
+ - iam_policy_name_c not in iam_policy_info.all_policy_names
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_name_b in iam_policy_info.all_policy_names
+
+ - name: 'Create policy using json for {{ iam_type }}'
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access_with_id.json") }}'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ register: iam_policy_info
+
+ - name: 'Assert policy was added for {{ iam_type }}'
+ assert:
+ that:
+ - result is changed
+ - result.policies | length == 3
+ - iam_policy_name_c in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.policies | length == 1
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_name_b in iam_policy_info.all_policy_names
+ - iam_policy_name_c in iam_policy_info.all_policy_names
+ - iam_policy_info.all_policy_names | length == 3
+ - iam_policy_info.policies[0].policy_name == iam_policy_name_c
+ - iam_policy_info.policies[0].policy_document.Id == 'MyId'
+
+ - name: 'Create policy using json for {{ iam_type }} (idempotency)'
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access_with_id.json") }}'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ register: iam_policy_info
+
+ - name: 'Assert no change'
+ assert:
+ that:
+ - result is not changed
+ - result.policies | length == 3
+ - iam_policy_name_c in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_name_b in iam_policy_info.all_policy_names
+ - iam_policy_name_c in iam_policy_info.all_policy_names
+ - iam_policy_info.all_policy_names | length == 3
+ - iam_policy_info.policies[0].policy_name == iam_policy_name_c
+ - iam_policy_info.policies[0].policy_document.Id == 'MyId'
+
+ # ============================================================
+ - name: 'Create policy using json for {{ iam_type }} (check mode) (skip_duplicates)'
+ check_mode: yes
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access_with_id.json") }}'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: iam_policy_info
+
+ - name: 'Assert policy would be added for {{ iam_type }}'
+ assert:
+ that:
+ - result is not changed
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_name_b in iam_policy_info.all_policy_names
+ - iam_policy_name_c in iam_policy_info.all_policy_names
+ - iam_policy_name_d not in iam_policy_info.all_policy_names
+ - iam_policy_info.all_policy_names | length == 3
+ - '"policies" not in iam_policy_info'
+
+ - name: 'Create policy using json for {{ iam_type }} (skip_duplicates)'
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access_with_id.json") }}'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: iam_policy_info
+
+ - name: 'Assert policy was not added for {{ iam_type }} (skip_duplicates)'
+ assert:
+ that:
+ - result is not changed
+ - result.policies | length == 3
+ - iam_policy_name_d not in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_name_b in iam_policy_info.all_policy_names
+ - iam_policy_name_c in iam_policy_info.all_policy_names
+ - iam_policy_name_d not in iam_policy_info.all_policy_names
+ - iam_policy_info.all_policy_names | length == 3
+ - '"policies" not in iam_policy_info'
+
+ - name: 'Create policy using json for {{ iam_type }} (check mode) (skip_duplicates = no)'
+ check_mode: yes
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access_with_id.json") }}'
+ skip_duplicates: no
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: iam_policy_info
+
+ - name: 'Assert policy would be added for {{ iam_type }}'
+ assert:
+ that:
+ - result.changed == True
+
+ - name: 'Create policy using json for {{ iam_type }} (skip_duplicates = no)'
+ iam_policy:
+ state: present
+ skip_duplicates: no
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access_with_id.json") }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: iam_policy_info
+
+ - name: 'Assert policy was added for {{ iam_type }}'
+ assert:
+ that:
+ - result is changed
+ - result.policies | length == 4
+ - iam_policy_name_d in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_name_b in iam_policy_info.all_policy_names
+ - iam_policy_name_c in iam_policy_info.all_policy_names
+ - iam_policy_name_d in iam_policy_info.all_policy_names
+ - iam_policy_name_a not in iam_policy_info.policy_names
+ - iam_policy_name_b not in iam_policy_info.policy_names
+ - iam_policy_name_c not in iam_policy_info.policy_names
+ - iam_policy_name_d in iam_policy_info.policy_names
+ - iam_policy_info.policy_names | length == 1
+ - iam_policy_info.all_policy_names | length == 4
+ - iam_policy_info.policies[0].policy_name == iam_policy_name_d
+ - iam_policy_info.policies[0].policy_document.Id == 'MyId'
+
+ - name: 'Create policy using json for {{ iam_type }} (idempotency) (skip_duplicates = no)'
+ iam_policy:
+ state: present
+ skip_duplicates: no
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access_with_id.json") }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: iam_policy_info
+
+ - name: 'Assert no change'
+ assert:
+ that:
+ - result is not changed
+ - result.policies | length == 4
+ - iam_policy_name_d in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_name_b in iam_policy_info.all_policy_names
+ - iam_policy_name_c in iam_policy_info.all_policy_names
+ - iam_policy_name_d in iam_policy_info.all_policy_names
+ - iam_policy_info.all_policy_names | length == 4
+ - iam_policy_info.policies[0].policy_name == iam_policy_name_d
+ - iam_policy_info.policies[0].policy_document.Id == 'MyId'
+
+ # ============================================================
+ - name: 'Test fetching multiple policies from {{ iam_type }}'
+ iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ register: iam_policy_info
+
+ - name: 'Assert all policies returned'
+ assert:
+ that:
+ - iam_policy_info is succeeded
+ - iam_policy_info.policies | length == 4
+ - iam_policy_info.all_policy_names | length == 4
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_name_b in iam_policy_info.all_policy_names
+ - iam_policy_name_c in iam_policy_info.all_policy_names
+ - iam_policy_name_d in iam_policy_info.all_policy_names
+ # Quick test that the policies are the ones we expect
+ - iam_policy_info.policies | community.general.json_query('[*].policy_name') | length == 4
+ - iam_policy_info.policies | community.general.json_query('[?policy_document.Id == `MyId`].policy_name') | length == 2
+ - iam_policy_name_c in (iam_policy_info.policies | community.general.json_query('[?policy_document.Id == `MyId`].policy_name') | list)
+ - iam_policy_name_d in (iam_policy_info.policies | community.general.json_query('[?policy_document.Id == `MyId`].policy_name') | list)
+
+ # ============================================================
+ - name: 'Update policy using document for {{ iam_type }} (check mode) (skip_duplicates)'
+ check_mode: yes
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ policy_document: '{{ tmpdir.path }}/no_access_with_id.json'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ register: iam_policy_info
+
+ - name: 'Assert policy would be added for {{ iam_type }}'
+ assert:
+ that:
+ - result is not changed
+ - iam_policy_info.policies[0].policy_name == iam_policy_name_a
+ - '"Id" not in iam_policy_info.policies[0].policy_document'
+
+ - name: 'Update policy using document for {{ iam_type }} (skip_duplicates)'
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ policy_document: '{{ tmpdir.path }}/no_access_with_id.json'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ register: iam_policy_info
+
+ - name: 'Assert policy was not updated for {{ iam_type }} (skip_duplicates)'
+ assert:
+ that:
+ - result is not changed
+ - result.policies | length == 4
+ - iam_policy_name_a in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.all_policy_names | length == 4
+ - iam_policy_info.policies[0].policy_name == iam_policy_name_a
+ - '"Id" not in iam_policy_info.policies[0].policy_document'
+
+ - name: 'Update policy using document for {{ iam_type }} (check mode) (skip_duplicates = no)'
+ check_mode: yes
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ policy_document: '{{ tmpdir.path }}/no_access_with_id.json'
+ skip_duplicates: no
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ register: iam_policy_info
+
+ - name: 'Assert policy would be updated for {{ iam_type }}'
+ assert:
+ that:
+ - result.changed == True
+ - iam_policy_info.all_policy_names | length == 4
+ - iam_policy_info.policies[0].policy_name == iam_policy_name_a
+ - '"Id" not in iam_policy_info.policies[0].policy_document'
+
+ - name: 'Update policy using document for {{ iam_type }} (skip_duplicates = no)'
+ iam_policy:
+ state: present
+ skip_duplicates: no
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ policy_document: '{{ tmpdir.path }}/no_access_with_id.json'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ register: iam_policy_info
+
+ - name: 'Assert policy was updated for {{ iam_type }}'
+ assert:
+ that:
+ - result is changed
+ - result.policies | length == 4
+ - iam_policy_name_a in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.policies[0].policy_document.Id == 'MyId'
+
+ - name: 'Update policy using document for {{ iam_type }} (idempotency) (skip_duplicates = no)'
+ iam_policy:
+ state: present
+ skip_duplicates: no
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ policy_document: '{{ tmpdir.path }}/no_access_with_id.json'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ register: iam_policy_info
+
+ - name: 'Assert no change'
+ assert:
+ that:
+ - result is not changed
+ - result.policies | length == 4
+ - iam_policy_name_a in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.policies[0].policy_document.Id == 'MyId'
+
+ - name: 'Delete policy A'
+ iam_policy:
+ state: absent
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ register: iam_policy_info
+
+ - name: 'Assert deleted'
+ assert:
+ that:
+ - result is changed
+ - result.policies | length == 3
+ - iam_policy_name_a not in result.policies
+ - result[iam_object_key] == iam_name
+ - '"policies" not in iam_policy_info'
+ - iam_policy_info.all_policy_names | length == 3
+ - iam_policy_name_a not in iam_policy_info.all_policy_names
+
+ # ============================================================
+ # Update C with no_access.json
+ # Delete C
+
+ - name: 'Update policy using json for {{ iam_type }} (check mode) (skip_duplicates)'
+ check_mode: yes
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access.json") }}'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ register: iam_policy_info
+
+ - name: 'Assert policy would be added for {{ iam_type }}'
+ assert:
+ that:
+ - result is not changed
+ - iam_policy_info.policies[0].policy_document.Id == 'MyId'
+
+ - name: 'Update policy using json for {{ iam_type }} (skip_duplicates)'
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access.json") }}'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ register: iam_policy_info
+
+ - name: 'Assert policy was not updated for {{ iam_type }} (skip_duplicates)'
+ assert:
+ that:
+ - result is not changed
+ - result.policies | length == 3
+ - iam_policy_name_c in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.policies[0].policy_document.Id == 'MyId'
+
+ - name: 'Update policy using json for {{ iam_type }} (check mode) (skip_duplicates = no)'
+ check_mode: yes
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access.json") }}'
+ skip_duplicates: no
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ register: iam_policy_info
+
+ - name: 'Assert policy would be updated for {{ iam_type }}'
+ assert:
+ that:
+ - result.changed == True
+ - iam_policy_info.policies[0].policy_document.Id == 'MyId'
+
+ - name: 'Update policy using json for {{ iam_type }} (skip_duplicates = no)'
+ iam_policy:
+ state: present
+ skip_duplicates: no
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access.json") }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ register: iam_policy_info
+
+ - name: 'Assert policy was updated for {{ iam_type }}'
+ assert:
+ that:
+ - result is changed
+ - result.policies | length == 3
+ - iam_policy_name_c in result.policies
+ - result[iam_object_key] == iam_name
+ - '"Id" not in iam_policy_info.policies[0].policy_document'
+
+ - name: 'Update policy using json for {{ iam_type }} (idempotency) (skip_duplicates = no)'
+ iam_policy:
+ state: present
+ skip_duplicates: no
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access.json") }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ register: iam_policy_info
+
+ - name: 'Assert no change'
+ assert:
+ that:
+ - result is not changed
+ - result.policies | length == 3
+ - iam_policy_name_c in result.policies
+ - result[iam_object_key] == iam_name
+ - '"Id" not in iam_policy_info.policies[0].policy_document'
+
+ - name: 'Delete policy C'
+ iam_policy:
+ state: absent
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ register: iam_policy_info
+
+ - name: 'Assert deleted'
+ assert:
+ that:
+ - result is changed
+ - result.policies | length == 2
+ - iam_policy_name_c not in result.policies
+ - result[iam_object_key] == iam_name
+ - '"policies" not in iam_policy_info'
+ - iam_policy_info.all_policy_names | length == 2
+ - iam_policy_name_c not in iam_policy_info.all_policy_names
+
+ # ============================================================
+ - name: 'Update policy using document for {{ iam_type }} (check mode)'
+ check_mode: yes
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ policy_document: '{{ tmpdir.path }}/no_access_with_second_id.json'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ register: iam_policy_info
+
+ - name: 'Assert policy would be updated for {{ iam_type }}'
+ assert:
+ that:
+ - result.changed == True
+ - '"Id" not in iam_policy_info.policies[0].policy_document'
+
+ - name: 'Update policy using document for {{ iam_type }}'
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ policy_document: '{{ tmpdir.path }}/no_access_with_second_id.json'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ register: iam_policy_info
+
+ - name: 'Assert policy was updated for {{ iam_type }}'
+ assert:
+ that:
+ - result is changed
+ - result.policies | length == 2
+ - iam_policy_name_b in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId'
+
+ - name: 'Update policy using document for {{ iam_type }} (idempotency)'
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ policy_document: '{{ tmpdir.path }}/no_access_with_second_id.json'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ register: iam_policy_info
+
+ - name: 'Assert no change'
+ assert:
+ that:
+ - result is not changed
+ - result.policies | length == 2
+ - iam_policy_name_b in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId'
+
+ - name: 'Delete policy B'
+ iam_policy:
+ state: absent
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ register: iam_policy_info
+
+ - name: 'Assert deleted'
+ assert:
+ that:
+ - result is changed
+ - result.policies | length == 1
+ - iam_policy_name_b not in result.policies
+ - result[iam_object_key] == iam_name
+ - '"policies" not in iam_policy_info'
+ - iam_policy_info.all_policy_names | length == 1
+ - iam_policy_name_b not in iam_policy_info.all_policy_names
+
+ # ============================================================
+ - name: 'Update policy using json for {{ iam_type }} (check mode)'
+ check_mode: yes
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access_with_second_id.json") }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: iam_policy_info
+
+ - name: 'Assert policy would be updated for {{ iam_type }}'
+ assert:
+ that:
+ - result.changed == True
+ - iam_policy_info.policies[0].policy_document.Id == 'MyId'
+
+ - name: 'Update policy using json for {{ iam_type }}'
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access_with_second_id.json") }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: iam_policy_info
+
+ - name: 'Assert policy was updated for {{ iam_type }}'
+ assert:
+ that:
+ - result is changed
+ - result.policies | length == 1
+ - iam_policy_name_d in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId'
+
+ - name: 'Update policy using json for {{ iam_type }} (idempotency)'
+ iam_policy:
+ state: present
+ skip_duplicates: no
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access_with_second_id.json") }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: iam_policy_info
+
+ - name: 'Assert no change'
+ assert:
+ that:
+ - result is not changed
+ - result.policies | length == 1
+ - iam_policy_name_d in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId'
+
+ # ============================================================
+ - name: 'Delete policy D (check_mode)'
+ check_mode: yes
+ iam_policy:
+ state: absent
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: iam_policy_info
+
+ - name: 'Assert not deleted'
+ assert:
+ that:
+ - result is changed
+ - result.policies | length == 1
+ - iam_policy_name_d in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.all_policy_names | length == 1
+ - iam_policy_name_d in iam_policy_info.all_policy_names
+ - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId'
+
+ - name: 'Delete policy D'
+ iam_policy:
+ state: absent
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: iam_policy_info
+
+ - name: 'Assert deleted'
+ assert:
+ that:
+ - result is changed
+ - '"policies" not in iam_policy_info'
+ - iam_policy_name_d not in result.policies
+ - result[iam_object_key] == iam_name
+ - '"policies" not in iam_policy_info'
+ - iam_policy_info.all_policy_names | length == 0
+
+ - name: 'Delete policy D (test idempotency)'
+ iam_policy:
+ state: absent
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: iam_policy_info
+
+ - name: 'Assert deleted'
+ assert:
+ that:
+ - result is not changed
+ - '"policies" not in iam_policy_info'
+ - iam_policy_info.all_policy_names | length == 0
+
+ - name: 'Delete policy D (check_mode) (test idempotency)'
+ check_mode: yes
+ iam_policy:
+ state: absent
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: iam_policy_info
+
+ - name: 'Assert deleted'
+ assert:
+ that:
+ - result is not changed
+ - '"policies" not in iam_policy_info'
+ - iam_policy_info.all_policy_names | length == 0
+
+ always:
+ # ============================================================
+ - name: 'Delete policy A for {{ iam_type }}'
+ iam_policy:
+ state: absent
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ ignore_errors: yes
+
+ - name: 'Delete policy B for {{ iam_type }}'
+ iam_policy:
+ state: absent
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ ignore_errors: yes
+
+ - name: 'Delete policy C for {{ iam_type }}'
+ iam_policy:
+ state: absent
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ ignore_errors: yes
+
+ - name: 'Delete policy D for {{ iam_type }}'
+ iam_policy:
+ state: absent
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/aliases
new file mode 100644
index 00000000..3d7a2c9f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/aliases
@@ -0,0 +1,3 @@
+iam_role_info
+unsupported
+cloud/aws
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/defaults/main.yml
new file mode 100644
index 00000000..46db6050
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+test_role: '{{ resource_prefix }}-role'
+test_path: '/{{ resource_prefix }}/'
+safe_managed_policy: 'AWSDenyAll'
+custom_policy_name: '{{ resource_prefix }}-denyall'
+boundary_policy: 'arn:aws:iam::aws:policy/AWSDenyAll'
+paranoid_pauses: no
+standard_pauses: no
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/files/deny-all-a.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/files/deny-all-a.json
new file mode 100644
index 00000000..ae62fd19
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/files/deny-all-a.json
@@ -0,0 +1,13 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Action": [
+ "*"
+ ],
+ "Effect": "Deny",
+ "Resource": "*",
+ "Sid": "DenyA"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/files/deny-all-b.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/files/deny-all-b.json
new file mode 100644
index 00000000..3a4704a4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/files/deny-all-b.json
@@ -0,0 +1,13 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Action": [
+ "*"
+ ],
+ "Effect": "Deny",
+ "Resource": "*",
+ "Sid": "DenyB"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/files/deny-all.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/files/deny-all.json
new file mode 100644
index 00000000..3d324b9b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/files/deny-all.json
@@ -0,0 +1,12 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Action": [
+ "*"
+ ],
+ "Effect": "Deny",
+ "Resource": "*"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/files/deny-assume.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/files/deny-assume.json
new file mode 100644
index 00000000..73e87715
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/files/deny-assume.json
@@ -0,0 +1,10 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Action": "sts:AssumeRole",
+ "Principal": { "Service": "ec2.amazonaws.com" },
+ "Effect": "Deny"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/main.yml
new file mode 100644
index 00000000..34c17af3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_role/tasks/main.yml
@@ -0,0 +1,1521 @@
+---
+# Tests for iam_role and iam_role_info
+#
+# Tests:
+# - Minimal Role creation
+# - Role deletion
+# - Fetching a specific role
+# - Creating roles w/ and w/o instance profiles
+# - Creating roles w/ a path
+# - Updating Max Session Duration
+# - Updating Description
+# - Managing list of managed policies
+# - Managing list of inline policies (for testing _info)
+# - Managing boundary policy
+#
+# Notes:
+# - Only tests *documented* return values ( RESULT.iam_role )
+# - There are some known timing issues with boto3 returning before actions
+# complete in the case of problems with "changed" status it's worth enabling
+# the standard_pauses and paranoid_pauses options as a first step in debugging
+#
+# Possible Bugs:
+# - Fails to delete role if inline policies not removed first
+
+- name: 'Setup AWS connection info'
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ iam_role:
+ assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}'
+ collections:
+ - amazon.aws
+ block:
+ # ===================================================================
+ # Parameter Checks
+ - name: 'Friendly message when creating an instance profile and adding a boundary profile'
+ iam_role:
+ name: '{{ test_role }}'
+ boundary: '{{ boundary_policy }}'
+ register: iam_role
+ ignore_errors: yes
+ - assert:
+ that:
+ - iam_role is failed
+ - '"boundary policy" in iam_role.msg'
+ - '"create_instance_profile" in iam_role.msg'
+ - '"false" in iam_role.msg'
+
+ - name: 'Friendly message when boundary profile is not an ARN'
+ iam_role:
+ name: '{{ test_role }}'
+ boundary: 'AWSDenyAll'
+ create_instance_profile: no
+ register: iam_role
+ ignore_errors: yes
+ - assert:
+ that:
+ - iam_role is failed
+ - '"Boundary policy" in iam_role.msg'
+ - '"ARN" in iam_role.msg'
+
+ - name: 'Friendly message when "present" without assume_role_policy_document'
+ module_defaults: { iam_role: {} }
+ iam_role:
+ name: '{{ test_role }}'
+ register: iam_role
+ ignore_errors: yes
+ - assert:
+ that:
+ - iam_role is failed
+ - 'iam_role.msg.startswith("state is present but all of the following are missing")'
+ - '"assume_role_policy_document" in iam_role.msg'
+
+ - name: 'Maximum Session Duration needs to be between 1 and 12 hours'
+ iam_role:
+ name: '{{ test_role }}'
+ max_session_duration: 3599
+ register: iam_role
+ ignore_errors: yes
+ - assert:
+ that:
+ - iam_role is failed
+ - '"max_session_duration must be between" in iam_role.msg'
+
+ - name: 'Maximum Session Duration needs to be between 1 and 12 hours'
+ iam_role:
+ name: '{{ test_role }}'
+ max_session_duration: 43201
+ register: iam_role
+ ignore_errors: yes
+ - assert:
+ that:
+ - iam_role is failed
+ - '"max_session_duration must be between" in iam_role.msg'
+
+ - name: 'Role Paths must start with /'
+ iam_role:
+ name: '{{ test_role }}'
+ path: 'test/'
+ register: iam_role
+ ignore_errors: yes
+ - assert:
+ that:
+ - iam_role is failed
+ - '"path must begin and end with /" in iam_role.msg'
+
+ - name: 'Role Paths must end with /'
+ iam_role:
+ name: '{{ test_role }}'
+ path: '/test'
+ register: iam_role
+ ignore_errors: yes
+ - assert:
+ that:
+ - iam_role is failed
+ - '"path must begin and end with /" in iam_role.msg'
+
+ # ===================================================================
+ # Supplemental resource pre-creation
+ - name: 'Create Safe IAM Managed Policy'
+ iam_managed_policy:
+ state: present
+ policy_name: '{{ custom_policy_name }}'
+ policy_description: "A safe (deny-all) managed policy"
+ policy: "{{ lookup('file', 'deny-all.json') }}"
+ register: create_managed_policy
+ - assert:
+ that:
+ - create_managed_policy is succeeded
+
+ # ===================================================================
+ # Rapid Role Creation and deletion
+ - name: Try running some rapid fire create/delete tests
+ # We've previously seen issues with iam_role returning before creation's
+ # actually complete, if we think the issue's gone, let's try creating and
+ # deleting things in quick succession
+ when: not (standard_pauses | bool)
+ block:
+ - name: 'Minimal IAM Role without instance profile (rapid)'
+ iam_role:
+ name: '{{ test_role }}'
+ create_instance_profile: no
+ register: iam_role
+ - name: 'Minimal IAM Role without instance profile (rapid)'
+ iam_role:
+ name: '{{ test_role }}'
+ create_instance_profile: no
+ register: iam_role_again
+ - assert:
+ that:
+ - iam_role is changed
+ - iam_role_again is not changed
+ - name: 'Remove IAM Role (rapid)'
+ iam_role:
+ state: absent
+ name: '{{ test_role }}'
+ register: iam_role
+ - name: 'Remove IAM Role (rapid)'
+ iam_role:
+ state: absent
+ name: '{{ test_role }}'
+ register: iam_role_again
+ - assert:
+ that:
+ - iam_role is changed
+ - iam_role_again is not changed
+
+ - name: 'Minimal IAM Role without instance profile (rapid)'
+ iam_role:
+ name: '{{ test_role }}'
+ create_instance_profile: no
+ register: iam_role
+ - name: 'Remove IAM Role (rapid)'
+ iam_role:
+ state: absent
+ name: '{{ test_role }}'
+ register: iam_role_again
+ - assert:
+ that:
+ - iam_role is changed
+ - iam_role_again is changed
+
+ # ===================================================================
+ # Role Creation
+ # (without Instance profile)
+ - name: 'iam_role_info before Role creation (no args)'
+ iam_role_info:
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+
+ - name: 'iam_role_info before Role creation (search for test role)'
+ iam_role_info:
+ name: '{{ test_role }}'
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 0
+
+ - name: 'Minimal IAM Role (CHECK MODE)'
+ iam_role:
+ name: '{{ test_role }}'
+ create_instance_profile: no
+ check_mode: yes
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+ # Pause this first time, just in case we actually created something...
+ - name: Short pause for role creation to finish
+ pause:
+ seconds: 10
+ when: standard_pauses | bool
+
+ - name: 'iam_role_info after Role creation in check_mode'
+ iam_role_info:
+ name: '{{ test_role }}'
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 0
+
+ - name: 'Minimal IAM Role without instance profile'
+ iam_role:
+ name: '{{ test_role }}'
+ create_instance_profile: no
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+ - iam_role.iam_role.role_name == test_role
+ - 'iam_role.iam_role.arn.startswith("arn")'
+ - 'iam_role.iam_role.arn.endswith("role/" + test_role )'
+ # Would be nice to test the contents...
+ - '"assume_role_policy_document" in iam_role.iam_role'
+ - iam_role.iam_role.attached_policies | length == 0
+ - iam_role.iam_role.max_session_duration == 3600
+ - iam_role.iam_role.path == '/'
+ - iam_role.iam_role.role_name == test_role
+ - '"create_date" in iam_role.iam_role'
+ - '"role_id" in iam_role.iam_role'
+ - name: Short pause for role creation to finish
+ pause:
+ seconds: 10
+ when: standard_pauses | bool
+
+ - name: 'Minimal IAM Role without instance profile (no change)'
+ iam_role:
+ name: '{{ test_role }}'
+ create_instance_profile: no
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is not changed
+ - iam_role.iam_role.role_name == test_role
+
+ - name: 'iam_role_info after Role creation'
+ iam_role_info:
+ name: '{{ test_role }}'
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 1
+ - 'role_info.iam_roles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
+ - '"assume_role_policy_document" in role_info.iam_roles[0]'
+ - '"create_date" in role_info.iam_roles[0]'
+ - '"description" not in role_info.iam_roles[0]'
+ - role_info.iam_roles[0].inline_policies | length == 0
+ - role_info.iam_roles[0].instance_profiles | length == 0
+ - role_info.iam_roles[0].managed_policies | length == 0
+ - role_info.iam_roles[0].max_session_duration == 3600
+ - role_info.iam_roles[0].path == '/'
+ - '"permissions_boundary" not in role_info.iam_roles[0]'
+ - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
+ - role_info.iam_roles[0].role_name == test_role
+ - role_info.iam_roles[0].tags | length == 0
+
+ - name: 'Remove IAM Role'
+ iam_role:
+ state: absent
+ name: '{{ test_role }}'
+ delete_instance_profile: yes
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+ - name: Short pause for role removal to finish
+ pause:
+ seconds: 10
+ when: paranoid_pauses | bool
+
+ - name: 'iam_role_info after Role deletion'
+ iam_role_info:
+ name: '{{ test_role }}'
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 0
+
+ # (with path)
+ - name: 'Minimal IAM Role with path (CHECK MODE)'
+ iam_role:
+ name: '{{ test_role }}'
+ path: '{{ test_path }}'
+ register: iam_role
+ check_mode: yes
+ - assert:
+ that:
+ - iam_role is changed
+
+ - name: 'Minimal IAM Role with path'
+ iam_role:
+ name: '{{ test_role }}'
+ path: '{{ test_path }}'
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+ - iam_role.iam_role.role_name == test_role
+ - 'iam_role.iam_role.arn.startswith("arn")'
+ - 'iam_role.iam_role.arn.endswith("role" + test_path + test_role )'
+ # Would be nice to test the contents...
+ - '"assume_role_policy_document" in iam_role.iam_role'
+ - iam_role.iam_role.attached_policies | length == 0
+ - iam_role.iam_role.max_session_duration == 3600
+ - iam_role.iam_role.path == '{{ test_path }}'
+ - iam_role.iam_role.role_name == test_role
+ - '"create_date" in iam_role.iam_role'
+ - '"role_id" in iam_role.iam_role'
+ - name: Short pause for role creation to finish
+ pause:
+ seconds: 10
+ when: standard_pauses | bool
+
+ - name: 'Minimal IAM Role with path (no change)'
+ iam_role:
+ name: '{{ test_role }}'
+ path: '{{ test_path }}'
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is not changed
+ - iam_role.iam_role.role_name == test_role
+
+ - name: 'iam_role_info after Role creation'
+ iam_role_info:
+ name: '{{ test_role }}'
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 1
+ - 'role_info.iam_roles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].arn.endswith("role" + test_path + test_role )'
+ - '"assume_role_policy_document" in role_info.iam_roles[0]'
+ - '"create_date" in role_info.iam_roles[0]'
+ - '"description" not in role_info.iam_roles[0]'
+ - role_info.iam_roles[0].inline_policies | length == 0
+ - role_info.iam_roles[0].instance_profiles | length == 1
+ - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile" + test_path + test_role)'
+ - role_info.iam_roles[0].managed_policies | length == 0
+ - role_info.iam_roles[0].max_session_duration == 3600
+ - role_info.iam_roles[0].path == '{{ test_path }}'
+ - '"permissions_boundary" not in role_info.iam_roles[0]'
+ - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
+ - role_info.iam_roles[0].role_name == test_role
+ - role_info.iam_roles[0].tags | length == 0
+
+ - name: 'iam_role_info after Role creation (searching a path)'
+ iam_role_info:
+ path_prefix: '{{ test_path }}'
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 1
+ - 'role_info.iam_roles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].arn.endswith("role" + test_path + test_role )'
+ - '"assume_role_policy_document" in role_info.iam_roles[0]'
+ - '"create_date" in role_info.iam_roles[0]'
+ - '"description" not in role_info.iam_roles[0]'
+ - role_info.iam_roles[0].inline_policies | length == 0
+ - role_info.iam_roles[0].instance_profiles | length == 1
+ - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile" + test_path + test_role)'
+ - role_info.iam_roles[0].managed_policies | length == 0
+ - role_info.iam_roles[0].max_session_duration == 3600
+ - '"permissions_boundary" not in role_info.iam_roles[0]'
+ - role_info.iam_roles[0].path == '{{ test_path }}'
+ - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
+ - role_info.iam_roles[0].role_name == test_role
+ - role_info.iam_roles[0].tags | length == 0
+
+ - name: 'Remove IAM Role'
+ iam_role:
+ state: absent
+ name: '{{ test_role }}'
+ path: '{{ test_path }}'
+ # If we don't delete the existing profile it'll be reused (with the path)
+ # by the test below.
+ delete_instance_profile: yes
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+ - name: Short pause for role removal to finish
+ pause:
+ seconds: 10
+ when: paranoid_pauses | bool
+
+ - name: 'iam_role_info after Role deletion'
+ iam_role_info:
+ name: '{{ test_role }}'
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 0
+
+ # (with Instance profile)
+ - name: 'Minimal IAM Role with instance profile'
+ iam_role:
+ name: '{{ test_role }}'
+ create_instance_profile: yes
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+ - iam_role.iam_role.role_name == test_role
+ - 'iam_role.iam_role.arn.startswith("arn")'
+ - 'iam_role.iam_role.arn.endswith("role/" + test_role )'
+ # Would be nice to test the contents...
+ - '"assume_role_policy_document" in iam_role.iam_role'
+ - iam_role.iam_role.attached_policies | length == 0
+ - iam_role.iam_role.max_session_duration == 3600
+ - iam_role.iam_role.path == '/'
+ - iam_role.iam_role.role_name == test_role
+ - '"create_date" in iam_role.iam_role'
+ - '"role_id" in iam_role.iam_role'
+ - name: Short pause for role creation to finish
+ pause:
+ seconds: 10
+ when: standard_pauses | bool
+
+ - name: 'Minimal IAM Role wth instance profile (no change)'
+ iam_role:
+ name: '{{ test_role }}'
+ create_instance_profile: yes
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is not changed
+ - iam_role.iam_role.role_name == test_role
+
+ - name: 'iam_role_info after Role creation'
+ iam_role_info:
+ name: '{{ test_role }}'
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 1
+ - 'role_info.iam_roles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
+ - '"assume_role_policy_document" in role_info.iam_roles[0]'
+ - '"create_date" in role_info.iam_roles[0]'
+ - '"description" not in role_info.iam_roles[0]'
+ - role_info.iam_roles[0].inline_policies | length == 0
+ - role_info.iam_roles[0].instance_profiles | length == 1
+ - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)'
+ - role_info.iam_roles[0].managed_policies | length == 0
+ - role_info.iam_roles[0].max_session_duration == 3600
+ - role_info.iam_roles[0].path == '/'
+ - '"permissions_boundary" not in role_info.iam_roles[0]'
+ - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
+ - role_info.iam_roles[0].role_name == test_role
+ - role_info.iam_roles[0].tags | length == 0
+
+ # ===================================================================
+ # Max Session Duration Manipulation
+
+ - name: 'Update Max Session Duration (CHECK MODE)'
+ iam_role:
+ name: '{{ test_role }}'
+ max_session_duration: 43200
+ check_mode: yes
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+
+ - name: 'Update Max Session Duration'
+ iam_role:
+ name: '{{ test_role }}'
+ max_session_duration: 43200
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+ - iam_role.iam_role.role_name == test_role
+ - iam_role.iam_role.max_session_duration == 43200
+
+ - name: 'Update Max Session Duration (no change)'
+ iam_role:
+ name: '{{ test_role }}'
+ max_session_duration: 43200
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is not changed
+ - iam_role.iam_role.role_name == test_role
+
+ - name: 'iam_role_info after updating Max Session Duration'
+ iam_role_info:
+ name: '{{ test_role }}'
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 1
+ - 'role_info.iam_roles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
+ - '"assume_role_policy_document" in role_info.iam_roles[0]'
+ - '"create_date" in role_info.iam_roles[0]'
+ - '"description" not in role_info.iam_roles[0]'
+ - role_info.iam_roles[0].inline_policies | length == 0
+ - role_info.iam_roles[0].instance_profiles | length == 1
+ - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)'
+ - role_info.iam_roles[0].managed_policies | length == 0
+ - role_info.iam_roles[0].max_session_duration == 43200
+ - role_info.iam_roles[0].path == '/'
+ - '"permissions_boundary" not in role_info.iam_roles[0]'
+ - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
+ - role_info.iam_roles[0].role_name == test_role
+ - role_info.iam_roles[0].tags | length == 0
+
+ # ===================================================================
+ # Description Manipulation
+
+ - name: 'Add Description (CHECK MODE)'
+ iam_role:
+ name: '{{ test_role }}'
+ description: 'Ansible Test Role {{ resource_prefix }}'
+ check_mode: yes
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+
+ - name: 'Add Description'
+ iam_role:
+ name: '{{ test_role }}'
+ description: 'Ansible Test Role {{ resource_prefix }}'
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+ - iam_role.iam_role.role_name == test_role
+ - iam_role.iam_role.description == 'Ansible Test Role {{ resource_prefix }}'
+
+ - name: 'Add Description (no change)'
+ iam_role:
+ name: '{{ test_role }}'
+ description: 'Ansible Test Role {{ resource_prefix }}'
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is not changed
+ - iam_role.iam_role.role_name == test_role
+ - iam_role.iam_role.description == 'Ansible Test Role {{ resource_prefix }}'
+
+ - name: 'iam_role_info after adding Description'
+ iam_role_info:
+ name: '{{ test_role }}'
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 1
+ - 'role_info.iam_roles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
+ - '"assume_role_policy_document" in role_info.iam_roles[0]'
+ - '"create_date" in role_info.iam_roles[0]'
+ - 'role_info.iam_roles[0].description == "Ansible Test Role {{ resource_prefix }}"'
+ - role_info.iam_roles[0].inline_policies | length == 0
+ - role_info.iam_roles[0].instance_profiles | length == 1
+ - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)'
+ - role_info.iam_roles[0].managed_policies | length == 0
+ - role_info.iam_roles[0].max_session_duration == 43200
+ - role_info.iam_roles[0].path == '/'
+ - '"permissions_boundary" not in role_info.iam_roles[0]'
+ - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
+ - role_info.iam_roles[0].role_name == test_role
+ - role_info.iam_roles[0].tags | length == 0
+
+ - name: 'Update Description (CHECK MODE)'
+ iam_role:
+ name: '{{ test_role }}'
+ description: 'Ansible Test Role (updated) {{ resource_prefix }}'
+ check_mode: yes
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+
+ - name: 'Update Description'
+ iam_role:
+ name: '{{ test_role }}'
+ description: 'Ansible Test Role (updated) {{ resource_prefix }}'
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+ - iam_role.iam_role.role_name == test_role
+ - iam_role.iam_role.description == 'Ansible Test Role (updated) {{ resource_prefix }}'
+
+ - name: 'Update Description (no change)'
+ iam_role:
+ name: '{{ test_role }}'
+ description: 'Ansible Test Role (updated) {{ resource_prefix }}'
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is not changed
+ - iam_role.iam_role.role_name == test_role
+ - iam_role.iam_role.description == 'Ansible Test Role (updated) {{ resource_prefix }}'
+
+ - name: 'iam_role_info after updating Description'
+ iam_role_info:
+ name: '{{ test_role }}'
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 1
+ - 'role_info.iam_roles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
+ - '"assume_role_policy_document" in role_info.iam_roles[0]'
+ - '"create_date" in role_info.iam_roles[0]'
+ - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"'
+ - role_info.iam_roles[0].inline_policies | length == 0
+ - role_info.iam_roles[0].instance_profiles | length == 1
+ - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)'
+ - role_info.iam_roles[0].managed_policies | length == 0
+ - role_info.iam_roles[0].max_session_duration == 43200
+ - role_info.iam_roles[0].path == '/'
+ - '"permissions_boundary" not in role_info.iam_roles[0]'
+ - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
+ - role_info.iam_roles[0].role_name == test_role
+ - role_info.iam_roles[0].tags | length == 0
+
+
+ # ===================================================================
+ # Tag Manipulation
+
+ - name: 'Add Tag (CHECK MODE)'
+ iam_role:
+ name: '{{ test_role }}'
+ tags:
+ TagA: ValueA
+ check_mode: yes
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+
+ - name: 'Add Tag'
+ iam_role:
+ name: '{{ test_role }}'
+ tags:
+ TagA: ValueA
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+ - iam_role.iam_role.role_name == test_role
+ - iam_role.iam_role.tags | length == 1
+ - '"TagA" in iam_role.iam_role.tags'
+ - iam_role.iam_role.tags.TagA == "ValueA"
+
+ - name: 'Add Tag (no change)'
+ iam_role:
+ name: '{{ test_role }}'
+ tags:
+ TagA: ValueA
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is not changed
+ - iam_role.iam_role.role_name == test_role
+ - '"TagA" in iam_role.iam_role.tags'
+ - iam_role.iam_role.tags.TagA == "ValueA"
+
+ - name: 'iam_role_info after adding Tags'
+ iam_role_info:
+ name: '{{ test_role }}'
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 1
+ - 'role_info.iam_roles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
+ - '"assume_role_policy_document" in role_info.iam_roles[0]'
+ - '"create_date" in role_info.iam_roles[0]'
+ - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"'
+ - role_info.iam_roles[0].inline_policies | length == 0
+ - role_info.iam_roles[0].instance_profiles | length == 1
+ - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)'
+ - role_info.iam_roles[0].managed_policies | length == 0
+ - role_info.iam_roles[0].max_session_duration == 43200
+ - role_info.iam_roles[0].path == '/'
+ - '"permissions_boundary" not in role_info.iam_roles[0]'
+ - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
+ - role_info.iam_roles[0].role_name == test_role
+ - role_info.iam_roles[0].tags | length == 1
+ - '"TagA" in role_info.iam_roles[0].tags'
+ - role_info.iam_roles[0].tags.TagA == "ValueA"
+
+ - name: 'Update Tag (CHECK MODE)'
+ iam_role:
+ name: '{{ test_role }}'
+ tags:
+ TagA: AValue
+ check_mode: yes
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+
+ - name: 'Update Tag'
+ iam_role:
+ name: '{{ test_role }}'
+ tags:
+ TagA: AValue
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+ - iam_role.iam_role.role_name == test_role
+ - '"TagA" in iam_role.iam_role.tags'
+ - iam_role.iam_role.tags.TagA == "AValue"
+
+ - name: 'Update Tag (no change)'
+ iam_role:
+ name: '{{ test_role }}'
+ tags:
+ TagA: AValue
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is not changed
+ - iam_role.iam_role.role_name == test_role
+ - '"TagA" in iam_role.iam_role.tags'
+ - iam_role.iam_role.tags.TagA == "AValue"
+
+ - name: 'iam_role_info after updating Tag'
+ iam_role_info:
+ name: '{{ test_role }}'
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 1
+ - 'role_info.iam_roles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
+ - '"assume_role_policy_document" in role_info.iam_roles[0]'
+ - '"create_date" in role_info.iam_roles[0]'
+ - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"'
+ - role_info.iam_roles[0].inline_policies | length == 0
+ - role_info.iam_roles[0].instance_profiles | length == 1
+ - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)'
+ - role_info.iam_roles[0].managed_policies | length == 0
+ - role_info.iam_roles[0].max_session_duration == 43200
+ - role_info.iam_roles[0].path == '/'
+ - '"permissions_boundary" not in role_info.iam_roles[0]'
+ - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
+ - role_info.iam_roles[0].role_name == test_role
+ - role_info.iam_roles[0].tags | length == 1
+ - '"TagA" in role_info.iam_roles[0].tags'
+ - role_info.iam_roles[0].tags.TagA == "AValue"
+
+ - name: 'Add second Tag without purge (CHECK MODE)'
+ iam_role:
+ name: '{{ test_role }}'
+ purge_tags: no
+ tags:
+ TagB: ValueB
+ check_mode: yes
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+
+ - name: 'Add second Tag without purge'
+ iam_role:
+ name: '{{ test_role }}'
+ purge_tags: no
+ tags:
+ TagB: ValueB
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+ - iam_role.iam_role.role_name == test_role
+ - '"TagB" in iam_role.iam_role.tags'
+ - iam_role.iam_role.tags.TagB == "ValueB"
+
+ - name: 'Add second Tag without purge (no change)'
+ iam_role:
+ name: '{{ test_role }}'
+ purge_tags: no
+ tags:
+ TagB: ValueB
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is not changed
+ - iam_role.iam_role.role_name == test_role
+ - '"TagB" in iam_role.iam_role.tags'
+ - iam_role.iam_role.tags.TagB == "ValueB"
+
+ - name: 'iam_role_info after adding second Tag without purge'
+ iam_role_info:
+ name: '{{ test_role }}'
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 1
+ - 'role_info.iam_roles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
+ - '"assume_role_policy_document" in role_info.iam_roles[0]'
+ - '"create_date" in role_info.iam_roles[0]'
+ - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"'
+ - role_info.iam_roles[0].inline_policies | length == 0
+ - role_info.iam_roles[0].instance_profiles | length == 1
+ - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)'
+ - role_info.iam_roles[0].managed_policies | length == 0
+ - role_info.iam_roles[0].max_session_duration == 43200
+ - role_info.iam_roles[0].path == '/'
+ - '"permissions_boundary" not in role_info.iam_roles[0]'
+ - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
+ - role_info.iam_roles[0].role_name == test_role
+ - role_info.iam_roles[0].tags | length == 2
+ - '"TagA" in role_info.iam_roles[0].tags'
+ - role_info.iam_roles[0].tags.TagA == "AValue"
+ - '"TagB" in role_info.iam_roles[0].tags'
+ - role_info.iam_roles[0].tags.TagB == "ValueB"
+
+ - name: 'Purge first tag (CHECK MODE)'
+ iam_role:
+ name: '{{ test_role }}'
+ purge_tags: yes
+ tags:
+ TagB: ValueB
+ check_mode: yes
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+
+ - name: 'Purge first tag'
+ iam_role:
+ name: '{{ test_role }}'
+ purge_tags: yes
+ tags:
+ TagB: ValueB
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+ - iam_role.iam_role.role_name == test_role
+ - '"TagB" in iam_role.iam_role.tags'
+ - iam_role.iam_role.tags.TagB == "ValueB"
+
+ - name: 'Purge first tag (no change)'
+ iam_role:
+ name: '{{ test_role }}'
+ purge_tags: yes
+ tags:
+ TagB: ValueB
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is not changed
+ - iam_role.iam_role.role_name == test_role
+ - '"TagB" in iam_role.iam_role.tags'
+ - iam_role.iam_role.tags.TagB == "ValueB"
+
+ - name: 'iam_role_info after purging first Tag'
+ iam_role_info:
+ name: '{{ test_role }}'
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 1
+ - 'role_info.iam_roles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
+ - '"assume_role_policy_document" in role_info.iam_roles[0]'
+ - '"create_date" in role_info.iam_roles[0]'
+ - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"'
+ - role_info.iam_roles[0].inline_policies | length == 0
+ - role_info.iam_roles[0].instance_profiles | length == 1
+ - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)'
+ - role_info.iam_roles[0].managed_policies | length == 0
+ - role_info.iam_roles[0].max_session_duration == 43200
+ - role_info.iam_roles[0].path == '/'
+ - '"permissions_boundary" not in role_info.iam_roles[0]'
+ - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
+ - role_info.iam_roles[0].role_name == test_role
+ - role_info.iam_roles[0].tags | length == 1
+ - '"TagA" not in role_info.iam_roles[0].tags'
+ - '"TagB" in role_info.iam_roles[0].tags'
+ - role_info.iam_roles[0].tags.TagB == "ValueB"
+
+
+ # ===================================================================
+ # Policy Manipulation
+
+ - name: 'Add Managed Policy (CHECK MODE)'
+ iam_role:
+ name: '{{ test_role }}'
+ purge_policies: no
+ managed_policy:
+ - '{{ safe_managed_policy }}'
+ check_mode: yes
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+
+ - name: 'Add Managed Policy'
+ iam_role:
+ name: '{{ test_role }}'
+ purge_policies: no
+ managed_policy:
+ - '{{ safe_managed_policy }}'
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+ - iam_role.iam_role.role_name == test_role
+
+ - name: 'Add Managed Policy (no change)'
+ iam_role:
+ name: '{{ test_role }}'
+ purge_policies: no
+ managed_policy:
+ - '{{ safe_managed_policy }}'
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is not changed
+ - iam_role.iam_role.role_name == test_role
+
+ - name: 'iam_role_info after adding Managed Policy'
+ iam_role_info:
+ name: '{{ test_role }}'
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 1
+ - 'role_info.iam_roles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
+ - '"assume_role_policy_document" in role_info.iam_roles[0]'
+ - '"create_date" in role_info.iam_roles[0]'
+ - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"'
+ - role_info.iam_roles[0].inline_policies | length == 0
+ - role_info.iam_roles[0].instance_profiles | length == 1
+ - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)'
+ - role_info.iam_roles[0].managed_policies | length == 1
+ - safe_managed_policy in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten )
+ - custom_policy_name not in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten )
+ - role_info.iam_roles[0].max_session_duration == 43200
+ - role_info.iam_roles[0].path == '/'
+ - '"permissions_boundary" not in role_info.iam_roles[0]'
+ - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
+ - role_info.iam_roles[0].role_name == test_role
+ - role_info.iam_roles[0].tags | length == 1
+ - '"TagB" in role_info.iam_roles[0].tags'
+ - role_info.iam_roles[0].tags.TagB == "ValueB"
+
+ - name: 'Update Managed Policy without purge (CHECK MODE)'
+ iam_role:
+ name: '{{ test_role }}'
+ purge_policies: no
+ managed_policy:
+ - '{{ custom_policy_name }}'
+ check_mode: yes
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+
+ - name: 'Update Managed Policy without purge'
+ iam_role:
+ name: '{{ test_role }}'
+ purge_policies: no
+ managed_policy:
+ - '{{ custom_policy_name }}'
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+ - iam_role.iam_role.role_name == test_role
+
+ - name: 'Update Managed Policy without purge (no change)'
+ iam_role:
+ name: '{{ test_role }}'
+ purge_policies: no
+ managed_policy:
+ - '{{ custom_policy_name }}'
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is not changed
+ - iam_role.iam_role.role_name == test_role
+
+ - name: 'iam_role_info after updating Managed Policy without purge'
+ iam_role_info:
+ name: '{{ test_role }}'
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 1
+ - 'role_info.iam_roles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
+ - '"assume_role_policy_document" in role_info.iam_roles[0]'
+ - '"create_date" in role_info.iam_roles[0]'
+ - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"'
+ - role_info.iam_roles[0].inline_policies | length == 0
+ - role_info.iam_roles[0].instance_profiles | length == 1
+ - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)'
+ - role_info.iam_roles[0].managed_policies | length == 2
+ - safe_managed_policy in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten )
+ - custom_policy_name in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten )
+ - role_info.iam_roles[0].max_session_duration == 43200
+ - role_info.iam_roles[0].path == '/'
+ - '"permissions_boundary" not in role_info.iam_roles[0]'
+ - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
+ - role_info.iam_roles[0].role_name == test_role
+ - role_info.iam_roles[0].tags | length == 1
+ - '"TagB" in role_info.iam_roles[0].tags'
+ - role_info.iam_roles[0].tags.TagB == "ValueB"
+
+ # Managed Policies are purged by default
+ - name: 'Update Managed Policy with purge (CHECK MODE)'
+ iam_role:
+ name: '{{ test_role }}'
+ managed_policy:
+ - '{{ custom_policy_name }}'
+ check_mode: yes
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+
+ - name: 'Update Managed Policy with purge'
+ iam_role:
+ name: '{{ test_role }}'
+ managed_policy:
+ - '{{ custom_policy_name }}'
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+ - iam_role.iam_role.role_name == test_role
+
+ - name: 'Update Managed Policy with purge (no change)'
+ iam_role:
+ name: '{{ test_role }}'
+ managed_policy:
+ - '{{ custom_policy_name }}'
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is not changed
+ - iam_role.iam_role.role_name == test_role
+
+ - name: 'iam_role_info after updating Managed Policy with purge'
+ iam_role_info:
+ name: '{{ test_role }}'
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 1
+ - 'role_info.iam_roles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
+ - '"assume_role_policy_document" in role_info.iam_roles[0]'
+ - '"create_date" in role_info.iam_roles[0]'
+ - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"'
+ - role_info.iam_roles[0].inline_policies | length == 0
+ - role_info.iam_roles[0].instance_profiles | length == 1
+ - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)'
+ - role_info.iam_roles[0].managed_policies | length == 1
+ - safe_managed_policy not in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten )
+ - custom_policy_name in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten )
+ - role_info.iam_roles[0].max_session_duration == 43200
+ - role_info.iam_roles[0].path == '/'
+ - '"permissions_boundary" not in role_info.iam_roles[0]'
+ - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
+ - role_info.iam_roles[0].role_name == test_role
+ - role_info.iam_roles[0].tags | length == 1
+ - '"TagB" in role_info.iam_roles[0].tags'
+ - role_info.iam_roles[0].tags.TagB == "ValueB"
+
+ # ===================================================================
+ # Inline Policy (test _info behaviour)
+
+ # XXX Not sure if it's a bug in Ansible or a "quirk" of AWS, but these two
+ # policies need to have at least different Sids or the second doesn't show
+ # up...
+
+ - name: 'Attach inline policy a'
+ iam_policy:
+ state: present
+ iam_type: 'role'
+ iam_name: '{{ test_role }}'
+ policy_name: 'inline-policy-a'
+ policy_json: '{{ lookup("file", "deny-all-a.json") }}'
+
+ - name: 'Attach inline policy b'
+ iam_policy:
+ state: present
+ iam_type: 'role'
+ iam_name: '{{ test_role }}'
+ policy_name: 'inline-policy-b'
+ policy_json: '{{ lookup("file", "deny-all-b.json") }}'
+
+ - name: 'iam_role_info after attaching inline policies (using iam_policy)'
+ iam_role_info:
+ name: '{{ test_role }}'
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 1
+ - 'role_info.iam_roles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
+ - '"assume_role_policy_document" in role_info.iam_roles[0]'
+ - '"create_date" in role_info.iam_roles[0]'
+ - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"'
+ - role_info.iam_roles[0].inline_policies | length == 2
+ - '"inline-policy-a" in role_info.iam_roles[0].inline_policies'
+ - '"inline-policy-b" in role_info.iam_roles[0].inline_policies'
+ - role_info.iam_roles[0].instance_profiles | length == 1
+ - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)'
+ - role_info.iam_roles[0].managed_policies | length == 1
+ - safe_managed_policy not in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten )
+ - custom_policy_name in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten )
+ - role_info.iam_roles[0].max_session_duration == 43200
+ - role_info.iam_roles[0].path == '/'
+ - '"permissions_boundary" not in role_info.iam_roles[0]'
+ - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
+ - role_info.iam_roles[0].role_name == test_role
+ - role_info.iam_roles[0].tags | length == 1
+ - '"TagB" in role_info.iam_roles[0].tags'
+ - role_info.iam_roles[0].tags.TagB == "ValueB"
+
+ # XXX iam_role fails to remove inline policies before deleting the role
+ - name: 'Detach inline policy a'
+ iam_policy:
+ state: absent
+ iam_type: 'role'
+ iam_name: '{{ test_role }}'
+ policy_name: 'inline-policy-a'
+
+ - name: 'Detach inline policy b'
+ iam_policy:
+ state: absent
+ iam_type: 'role'
+ iam_name: '{{ test_role }}'
+ policy_name: 'inline-policy-b'
+
+ # ===================================================================
+ # Role Removal
+ - name: 'Remove IAM Role (CHECK MODE)'
+ iam_role:
+ state: absent
+ name: '{{ test_role }}'
+ delete_instance_profile: yes
+ check_mode: yes
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+ - name: 'Short pause for role removal to finish'
+ pause:
+ seconds: 10
+ when: paranoid_pauses | bool
+
+ - name: 'iam_role_info after deleting role in check mode'
+ iam_role_info:
+ name: '{{ test_role }}'
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 1
+
+ - name: 'Remove IAM Role'
+ iam_role:
+ state: absent
+ name: '{{ test_role }}'
+ delete_instance_profile: yes
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+ - name: 'Short pause for role removal to finish'
+ pause:
+ seconds: 10
+ when: paranoid_pauses | bool
+
+ - name: 'iam_role_info after deleting role'
+ iam_role_info:
+ name: '{{ test_role }}'
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 0
+
+ - name: 'Remove IAM Role (should be gone already)'
+ iam_role:
+ state: absent
+ name: '{{ test_role }}'
+ delete_instance_profile: yes
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is not changed
+ - name: 'Short pause for role removal to finish'
+ pause:
+ seconds: 10
+ when: paranoid_pauses | bool
+
+ # ===================================================================
+ # Boundary Policy (requires create_instance_profile: no)
+ - name: 'Create minimal role with no boundary policy'
+ iam_role:
+ name: '{{ test_role }}'
+ create_instance_profile: no
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+ - iam_role.iam_role.role_name == test_role
+
+ - name: 'Configure Boundary Policy (CHECK MODE)'
+ iam_role:
+ name: '{{ test_role }}'
+ create_instance_profile: no
+ boundary: '{{ boundary_policy }}'
+ check_mode: yes
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+
+ - name: 'Configure Boundary Policy'
+ iam_role:
+ name: '{{ test_role }}'
+ create_instance_profile: no
+ boundary: '{{ boundary_policy }}'
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+ - iam_role.iam_role.role_name == test_role
+
+ - name: 'Configure Boundary Policy (no change)'
+ iam_role:
+ name: '{{ test_role }}'
+ create_instance_profile: no
+ boundary: '{{ boundary_policy }}'
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is not changed
+ - iam_role.iam_role.role_name == test_role
+
+ - name: 'iam_role_info after adding boundary policy'
+ iam_role_info:
+ name: '{{ test_role }}'
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 1
+ - 'role_info.iam_roles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
+ - '"assume_role_policy_document" in role_info.iam_roles[0]'
+ - '"create_date" in role_info.iam_roles[0]'
+ - '"description" not in role_info.iam_roles[0]'
+ - role_info.iam_roles[0].inline_policies | length == 0
+ - role_info.iam_roles[0].instance_profiles | length == 0
+ - role_info.iam_roles[0].managed_policies | length == 0
+ - role_info.iam_roles[0].max_session_duration == 3600
+ - role_info.iam_roles[0].path == '/'
+ - role_info.iam_roles[0].permissions_boundary.permissions_boundary_arn == boundary_policy
+ - role_info.iam_roles[0].permissions_boundary.permissions_boundary_type == 'Policy'
+ - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
+ - role_info.iam_roles[0].role_name == test_role
+
+ - name: 'Remove IAM Role'
+ iam_role:
+ state: absent
+ name: '{{ test_role }}'
+ delete_instance_profile: yes
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+ - name: Short pause for role removal to finish
+ pause:
+ seconds: 10
+ when: paranoid_pauses | bool
+
+ # ===================================================================
+ # Complex role Creation
+ - name: 'Complex IAM Role (CHECK MODE)'
+ iam_role:
+ name: '{{ test_role }}'
+ assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}'
+ boundary: '{{ boundary_policy }}'
+ create_instance_profile: no
+ description: 'Ansible Test Role {{ resource_prefix }}'
+ managed_policy:
+ - '{{ safe_managed_policy }}'
+ - '{{ custom_policy_name }}'
+ max_session_duration: 43200
+ path: '{{ test_path }}'
+ tags:
+ TagA: 'ValueA'
+ check_mode: yes
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+ - name: Short pause for role creation to finish
+ pause:
+ seconds: 10
+ when: standard_pauses | bool
+
+ - name: 'iam_role_info after Complex Role creation in check_mode'
+ iam_role_info:
+ name: '{{ test_role }}'
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 0
+
+ - name: 'Complex IAM Role'
+ iam_role:
+ name: '{{ test_role }}'
+ assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}'
+ boundary: '{{ boundary_policy }}'
+ create_instance_profile: no
+ description: 'Ansible Test Role {{ resource_prefix }}'
+ managed_policy:
+ - '{{ safe_managed_policy }}'
+ - '{{ custom_policy_name }}'
+ max_session_duration: 43200
+ path: '{{ test_path }}'
+ tags:
+ TagA: 'ValueA'
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is changed
+ - iam_role.iam_role.role_name == test_role
+ - 'iam_role.iam_role.arn.startswith("arn")'
+ - 'iam_role.iam_role.arn.endswith("role" + test_path + test_role )'
+ # Would be nice to test the contents...
+ - '"assume_role_policy_document" in iam_role.iam_role'
+ - iam_role.iam_role.attached_policies | length == 2
+ - iam_role.iam_role.max_session_duration == 43200
+ - iam_role.iam_role.path == test_path
+ - iam_role.iam_role.role_name == test_role
+ - '"create_date" in iam_role.iam_role'
+ - '"role_id" in iam_role.iam_role'
+ - name: Short pause for role creation to finish
+ pause:
+ seconds: 10
+ when: standard_pauses | bool
+
+ - name: 'Complex IAM role (no change)'
+ iam_role:
+ name: '{{ test_role }}'
+ assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}'
+ boundary: '{{ boundary_policy }}'
+ create_instance_profile: no
+ description: 'Ansible Test Role {{ resource_prefix }}'
+ managed_policy:
+ - '{{ safe_managed_policy }}'
+ - '{{ custom_policy_name }}'
+ max_session_duration: 43200
+ path: '{{ test_path }}'
+ tags:
+ TagA: 'ValueA'
+ register: iam_role
+ - assert:
+ that:
+ - iam_role is not changed
+ - iam_role.iam_role.role_name == test_role
+
+ - name: 'iam_role_info after Role creation'
+ iam_role_info:
+ name: '{{ test_role }}'
+ register: role_info
+ - assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 1
+ - 'role_info.iam_roles[0].arn.startswith("arn")'
+ - 'role_info.iam_roles[0].arn.endswith("role" + test_path + test_role )'
+ - '"assume_role_policy_document" in role_info.iam_roles[0]'
+ - '"create_date" in role_info.iam_roles[0]'
+ - 'role_info.iam_roles[0].description == "Ansible Test Role {{ resource_prefix }}"'
+ - role_info.iam_roles[0].inline_policies | length == 0
+ - role_info.iam_roles[0].instance_profiles | length == 0
+ - role_info.iam_roles[0].managed_policies | length == 2
+ - safe_managed_policy in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten )
+ - custom_policy_name in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten )
+ - role_info.iam_roles[0].max_session_duration == 43200
+ - role_info.iam_roles[0].path == test_path
+ - role_info.iam_roles[0].permissions_boundary.permissions_boundary_arn == boundary_policy
+ - role_info.iam_roles[0].permissions_boundary.permissions_boundary_type == 'Policy'
+ - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
+ - role_info.iam_roles[0].role_name == test_role
+ - '"TagA" in role_info.iam_roles[0].tags'
+ - role_info.iam_roles[0].tags.TagA == "ValueA"
+
+ always:
+ # ===================================================================
+ # Cleanup
+
+ # XXX iam_role fails to remove inline policies before deleting the role
+ - name: 'Detach inline policy a'
+ iam_policy:
+ state: absent
+ iam_type: 'role'
+ iam_name: '{{ test_role }}'
+ policy_name: 'inline-policy-a'
+ ignore_errors: true
+
+ - name: 'Detach inline policy b'
+ iam_policy:
+ state: absent
+ iam_type: 'role'
+ iam_name: '{{ test_role }}'
+ policy_name: 'inline-policy-b'
+ ignore_errors: true
+
+ - name: 'Remove IAM Role'
+ iam_role:
+ state: absent
+ name: '{{ test_role }}'
+ delete_instance_profile: yes
+ ignore_errors: true
+
+ - name: 'Remove IAM Role (with path)'
+ iam_role:
+ state: absent
+ name: '{{ test_role }}'
+ path: '{{ test_path }}'
+ delete_instance_profile: yes
+ ignore_errors: true
+
+ - name: 'iam_role_info after Role deletion'
+ iam_role_info:
+ name: '{{ test_role }}'
+ ignore_errors: true
+
+ - name: 'Remove test managed policy'
+ iam_managed_policy:
+ state: absent
+ policy_name: '{{ custom_policy_name }}'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_saml_federation/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_saml_federation/aliases
new file mode 100644
index 00000000..56927195
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_saml_federation/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_saml_federation/files/example1.xml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_saml_federation/files/example1.xml
new file mode 100644
index 00000000..fa2130a5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_saml_federation/files/example1.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+<md:EntityDescriptor xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata" validUntil="2019-08-24T20:37:21Z" cacheDuration="PT1567111041S" entityID="AnsibleSAMLTest1">
+ <md:IDPSSODescriptor WantAuthnRequestsSigned="false" protocolSupportEnumeration="urn:oasis:names:tc:SAML:2.0:protocol">
+ <md:KeyDescriptor use="signing">
+ <ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
+ <ds:X509Data>
+ <ds:X509Certificate>MIIDJjCCAg4CCQCiwst2XYH7fTANBgkqhkiG9w0BAQsFADBVMQswCQYDVQQGEwJYWDEVMBMGA1UEBwwMRGVmYXVsdCBDaXR5MRwwGgYDVQQKDBNEZWZhdWx0IENvbXBhbnkgTHRkMREwDwYDVQQDDAhleGFtcGxlMTAeFw0xOTA4MjIyMDM2NTFaFw0yMDA4MjEyMDM2NTFaMFUxCzAJBgNVBAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxETAPBgNVBAMMCGV4YW1wbGUxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArLbBVE6E28bfvB/gUGjOmY2lxxxLZ9Fls4fOH9js/MhGG+hh4diyj/Kb7Coo6HehXMp93TXkYYbiKGAoykT6ULEACZnYi1V9XdUs619ibumi9pRSFygBrbyN+n9peMJxf4jvM1QS/DTPWxdkgeMkqb2SARJChd3azCHd0cdGwcsx1pTkYp34SL0gP79m6W8N3TIxyJmqi0Kc7mntPQUCVH/wFSyg59JXo8SUQDQNap/yd9UwLzxP9MhH8G3DBatwQj3ijYOPnAeUPbsw7GYiKQBh/SIH5DGzW4TNHo0PiQJqzymNp0mI0eKjRO98vfnsXkeQwotzeKVbkmJ63h3PHQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBvm+zYchto1NESDxCVDK96QKObklWrfiAgKDLb49Loox+pyWTvs2mu5DOgDe0rrgEDfxngbbupo9eSu5w7OPVfip8W9rsB8k6ak+P4G8MltqkYv5A0aXbka1da1NenbIXAC3/YbMjLnsidDWQiKYZ0i0HxjuhguW3lvOFd3Dzp2rNDydzA6ilSmBXFrAcKm0RHAfP4NGy3ECdU6SQ5OBSUcJprKADMODIykuds1qh0Gz8a0ukKKmp2yJvz9bIuC4+TRXKKZtgDZKPcN0MgtqYZJ2rttoFqkCWrNBCZSUgJEASUJ78NSC3Wy8WQr3NjZvQ86KG2/mcVQ3Lm1ci82Uue</ds:X509Certificate>
+ </ds:X509Data>
+ </ds:KeyInfo>
+ </md:KeyDescriptor>
+ <md:KeyDescriptor use="encryption">
+ <ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
+ <ds:X509Data>
+ <ds:X509Certificate>MIIDJjCCAg4CCQCiwst2XYH7fTANBgkqhkiG9w0BAQsFADBVMQswCQYDVQQGEwJYWDEVMBMGA1UEBwwMRGVmYXVsdCBDaXR5MRwwGgYDVQQKDBNEZWZhdWx0IENvbXBhbnkgTHRkMREwDwYDVQQDDAhleGFtcGxlMTAeFw0xOTA4MjIyMDM2NTFaFw0yMDA4MjEyMDM2NTFaMFUxCzAJBgNVBAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxETAPBgNVBAMMCGV4YW1wbGUxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArLbBVE6E28bfvB/gUGjOmY2lxxxLZ9Fls4fOH9js/MhGG+hh4diyj/Kb7Coo6HehXMp93TXkYYbiKGAoykT6ULEACZnYi1V9XdUs619ibumi9pRSFygBrbyN+n9peMJxf4jvM1QS/DTPWxdkgeMkqb2SARJChd3azCHd0cdGwcsx1pTkYp34SL0gP79m6W8N3TIxyJmqi0Kc7mntPQUCVH/wFSyg59JXo8SUQDQNap/yd9UwLzxP9MhH8G3DBatwQj3ijYOPnAeUPbsw7GYiKQBh/SIH5DGzW4TNHo0PiQJqzymNp0mI0eKjRO98vfnsXkeQwotzeKVbkmJ63h3PHQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBvm+zYchto1NESDxCVDK96QKObklWrfiAgKDLb49Loox+pyWTvs2mu5DOgDe0rrgEDfxngbbupo9eSu5w7OPVfip8W9rsB8k6ak+P4G8MltqkYv5A0aXbka1da1NenbIXAC3/YbMjLnsidDWQiKYZ0i0HxjuhguW3lvOFd3Dzp2rNDydzA6ilSmBXFrAcKm0RHAfP4NGy3ECdU6SQ5OBSUcJprKADMODIykuds1qh0Gz8a0ukKKmp2yJvz9bIuC4+TRXKKZtgDZKPcN0MgtqYZJ2rttoFqkCWrNBCZSUgJEASUJ78NSC3Wy8WQr3NjZvQ86KG2/mcVQ3Lm1ci82Uue</ds:X509Certificate>
+ </ds:X509Data>
+ </ds:KeyInfo>
+ </md:KeyDescriptor>
+ <md:SingleLogoutService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" Location="http://example.com/saml/logout"/>
+ <md:NameIDFormat>urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified</md:NameIDFormat>
+ <md:SingleSignOnService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" Location="http://example.com/saml/"/>
+ </md:IDPSSODescriptor>
+</md:EntityDescriptor>
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_saml_federation/files/example2.xml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_saml_federation/files/example2.xml
new file mode 100644
index 00000000..76a86c7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_saml_federation/files/example2.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+<md:EntityDescriptor xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata" validUntil="2019-08-24T20:38:34Z" cacheDuration="PT1567111114S" entityID="AnsibleSAMLTest2">
+ <md:IDPSSODescriptor WantAuthnRequestsSigned="false" protocolSupportEnumeration="urn:oasis:names:tc:SAML:2.0:protocol">
+ <md:KeyDescriptor use="signing">
+ <ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
+ <ds:X509Data>
+ <ds:X509Certificate>MIIDADCCAegCCQCgxBiDM2muazANBgkqhkiG9w0BAQsFADBCMQswCQYDVQQGEwJYWDEVMBMGA1UEBwwMRGVmYXVsdCBDaXR5MRwwGgYDVQQKDBNEZWZhdWx0IENvbXBhbnkgTHRkMB4XDTE5MDgyMjIwMzY1OFoXDTIwMDgyMTIwMzY1OFowQjELMAkGA1UEBhMCWFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoGA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMUJ3J1tzqoAgQwaJHx/MGl5yVTNpJLPfx8YCS0Z+RQWXIazZrssy/tpZcfgnek4+xvqrzRXR4nell31VTojIGItqR70lPhrsPES70SrN8egi+MLTZ4iddG5hjK4bn4wss88/3johi8/J85wc26/bkRz66lOvTaJ8k1pncQ3NekT9zZzWlW1LQk3uMbaPrVVocjFBEZyTsYUE9wZG+ggRBJlOMGEdhGsgPuR8Aj7OXO7X8/RolV8lB3GTzellX2GxiWnOhjnabSPBUUv5iVKcDOb2lIqxr5DScIvX1PcJSUCAGGLcd8wYK/lh3k+PFH9QNDLY6F5WHkoZq9LS46+8lkCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAWjX7E/BYAHaOKOXc5RAD9zwAaMxLMTSK5Cnq32TGIh1P4ap8jTNVaiCs9UJXHJpKwXUN+3DdVBIGMT17DzFwAeruZOzNBN0VJVl0yZ6dARgss4gpOBGvBD8blLidnVxEd5VRGldx5R5+I441ms6ASkohcHhGlF4WGbnabEZ/MtxhDIWUX2w4naOfFg6vOiPsE1C/ZXJeLDNP+dnjfueTN5DD38d+ND2mHweB7u0Qjpkd2K0TuCp0z4kXRuTgPzlfkPORNkgyU1hA3YClpT57aeUsHgO23sr/4d04jzI+hYeleGqjNM+3vDQYsOQyXx61/nANeF0Sp9ZIv3eJSTMXNw==</ds:X509Certificate>
+ </ds:X509Data>
+ </ds:KeyInfo>
+ </md:KeyDescriptor>
+ <md:KeyDescriptor use="encryption">
+ <ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
+ <ds:X509Data>
+ <ds:X509Certificate>MIIDADCCAegCCQCgxBiDM2muazANBgkqhkiG9w0BAQsFADBCMQswCQYDVQQGEwJYWDEVMBMGA1UEBwwMRGVmYXVsdCBDaXR5MRwwGgYDVQQKDBNEZWZhdWx0IENvbXBhbnkgTHRkMB4XDTE5MDgyMjIwMzY1OFoXDTIwMDgyMTIwMzY1OFowQjELMAkGA1UEBhMCWFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoGA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMUJ3J1tzqoAgQwaJHx/MGl5yVTNpJLPfx8YCS0Z+RQWXIazZrssy/tpZcfgnek4+xvqrzRXR4nell31VTojIGItqR70lPhrsPES70SrN8egi+MLTZ4iddG5hjK4bn4wss88/3johi8/J85wc26/bkRz66lOvTaJ8k1pncQ3NekT9zZzWlW1LQk3uMbaPrVVocjFBEZyTsYUE9wZG+ggRBJlOMGEdhGsgPuR8Aj7OXO7X8/RolV8lB3GTzellX2GxiWnOhjnabSPBUUv5iVKcDOb2lIqxr5DScIvX1PcJSUCAGGLcd8wYK/lh3k+PFH9QNDLY6F5WHkoZq9LS46+8lkCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAWjX7E/BYAHaOKOXc5RAD9zwAaMxLMTSK5Cnq32TGIh1P4ap8jTNVaiCs9UJXHJpKwXUN+3DdVBIGMT17DzFwAeruZOzNBN0VJVl0yZ6dARgss4gpOBGvBD8blLidnVxEd5VRGldx5R5+I441ms6ASkohcHhGlF4WGbnabEZ/MtxhDIWUX2w4naOfFg6vOiPsE1C/ZXJeLDNP+dnjfueTN5DD38d+ND2mHweB7u0Qjpkd2K0TuCp0z4kXRuTgPzlfkPORNkgyU1hA3YClpT57aeUsHgO23sr/4d04jzI+hYeleGqjNM+3vDQYsOQyXx61/nANeF0Sp9ZIv3eJSTMXNw==</ds:X509Certificate>
+ </ds:X509Data>
+ </ds:KeyInfo>
+ </md:KeyDescriptor>
+ <md:SingleLogoutService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" Location="http://example.com/saml/logout"/>
+ <md:NameIDFormat>urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified</md:NameIDFormat>
+ <md:SingleSignOnService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" Location="http://example.com/saml/"/>
+ </md:IDPSSODescriptor>
+</md:EntityDescriptor>
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_saml_federation/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_saml_federation/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_saml_federation/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_saml_federation/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_saml_federation/tasks/main.yml
new file mode 100644
index 00000000..5c662f56
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_saml_federation/tasks/main.yml
@@ -0,0 +1,89 @@
+- module_defaults:
+ group/aws:
+ region: "{{ aws_region }}"
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ collections:
+ - amazon.aws
+ block:
+ # ============================================================
+ # TESTS
+ - name: Create the identity provider
+ iam_saml_federation:
+ name: '{{ resource_prefix }}-saml'
+ state: present
+ saml_metadata_document: '{{ lookup("file", "example1.xml") }}'
+ register: create_result
+
+ - name: assert idp created
+ assert:
+ that:
+ - create_result is changed
+
+ - name: Test that nothing changes when we retry
+ iam_saml_federation:
+ name: '{{ resource_prefix }}-saml'
+ state: present
+ saml_metadata_document: '{{ lookup("file", "example1.xml") }}'
+ register: create_result
+
+ - name: assert the idp doesn't change when we retry
+ assert:
+ that:
+ - create_result is not changed
+
+ - name: Change the identity provider
+ iam_saml_federation:
+ name: '{{ resource_prefix }}-saml'
+ state: present
+ saml_metadata_document: '{{ lookup("file", "example2.xml") }}'
+ register: change_result
+
+ - name: assert idp created
+ assert:
+ that:
+ - change_result is changed
+
+ - name: Test that nothing changes when we retry
+ iam_saml_federation:
+ name: '{{ resource_prefix }}-saml'
+ state: present
+ saml_metadata_document: '{{ lookup("file", "example2.xml") }}'
+ register: change_result
+
+ - name: assert the idp doesn't change when we retry
+ assert:
+ that:
+ - change_result is not changed
+
+ - name: Delete the identity provider
+ iam_saml_federation:
+ name: '{{ resource_prefix }}-saml'
+ state: absent
+ register: destroy_result
+
+ - name: assert deleted
+ assert:
+ that:
+ - destroy_result is changed
+
+ - name: Attempt to re-delete the identity provider
+ iam_saml_federation:
+ name: '{{ resource_prefix }}-saml'
+ state: absent
+ register: destroy_result
+
+ - name: assert deleted
+ assert:
+ that:
+ - destroy_result is not changed
+
+ always:
+ # ============================================================
+ # CLEAN-UP
+ - name: finish off by deleting the identity provider
+ iam_saml_federation:
+ name: '{{ resource_prefix }}-saml'
+ state: absent
+ register: destroy_result
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_user/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_user/aliases
new file mode 100644
index 00000000..c7a4b8ab
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_user/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+iam_user_info
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_user/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_user/defaults/main.yml
new file mode 100644
index 00000000..8a69ca09
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_user/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+test_group: '{{ resource_prefix }}-group'
+test_path: '/'
+test_user: '{{ test_users[0] }}'
+test_users:
+ - '{{ resource_prefix }}-user-a'
+ - '{{ resource_prefix }}-user-b'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_user/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_user/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_user/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_user/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_user/tasks/main.yml
new file mode 100644
index 00000000..c5be49ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/iam_user/tasks/main.yml
@@ -0,0 +1,482 @@
+---
+- name: set up aws connection info
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ collections:
+ - amazon.aws
+ block:
+ - name: ensure improper usage of parameters fails gracefully
+ iam_user_info:
+ path: '{{ test_path }}'
+ group: '{{ test_group }}'
+ ignore_errors: yes
+ register: iam_user_info_path_group
+ - assert:
+ that:
+ - iam_user_info_path_group is failed
+ - 'iam_user_info_path_group.msg == "parameters are mutually exclusive: group|path"'
+
+ - name: ensure exception handling fails as expected
+ iam_user_info:
+ region: 'bogus'
+ path: ''
+ ignore_errors: yes
+ register: iam_user_info
+ - assert:
+ that:
+ - iam_user_info is failed
+ - '"user" in iam_user_info.msg'
+
+ - name: ensure exception handling fails as expected with group
+ iam_user_info:
+ region: 'bogus'
+ group: '{{ test_group }}'
+ ignore_errors: yes
+ register: iam_user_info
+ - assert:
+ that:
+ - iam_user_info is failed
+ - '"group" in iam_user_info.msg'
+
+ - name: ensure exception handling fails as expected with default path
+ iam_user_info:
+ region: 'bogus'
+ ignore_errors: yes
+ register: iam_user_info
+ - assert:
+ that:
+ - iam_user_info is failed
+ - '"path" in iam_user_info.msg'
+
+ - name: create test user (check mode)
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ check_mode: yes
+ register: iam_user
+
+ - name: assert that the user would be created
+ assert:
+ that:
+ - iam_user is changed
+
+ - name: create test user
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ register: iam_user
+
+ - name: assert that the user is created
+ assert:
+ that:
+ - iam_user is changed
+
+ - name: ensure test user exists (no change)
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ register: iam_user
+
+ - name: assert that the user wasn't changed
+ assert:
+ that:
+ - iam_user is not changed
+
+ - name: ensure the info used to validate other tests is valid
+ set_fact:
+ test_iam_user: '{{ iam_user.iam_user.user }}'
+ - assert:
+ that:
+ - 'test_iam_user.arn.startswith("arn:aws:iam")'
+ - 'test_iam_user.arn.endswith("user/" + test_user )'
+ - test_iam_user.create_date is not none
+ - test_iam_user.path == '{{ test_path }}'
+ - test_iam_user.user_id is not none
+ - test_iam_user.user_name == '{{ test_user }}'
+
+ - name: get info on IAM user(s)
+ iam_user_info:
+ register: iam_user_info
+ - assert:
+ that:
+ - iam_user_info.iam_users | length != 0
+
+ - name: get info on IAM user(s) with name
+ iam_user_info:
+ name: '{{ test_user }}'
+ register: iam_user_info
+ - debug: var=iam_user_info
+ - assert:
+ that:
+ - iam_user_info.iam_users | length == 1
+ - iam_user_info.iam_users[0].arn == test_iam_user.arn
+ - iam_user_info.iam_users[0].create_date == test_iam_user.create_date
+ - iam_user_info.iam_users[0].path == test_iam_user.path
+ - iam_user_info.iam_users[0].user_id == test_iam_user.user_id
+ - iam_user_info.iam_users[0].user_name == test_iam_user.user_name
+
+ - name: get info on IAM user(s) on path
+ iam_user_info:
+ path: '{{ test_path }}'
+ name: '{{ test_user }}'
+ register: iam_user_info
+ - assert:
+ that:
+ - iam_user_info.iam_users | length == 1
+ - iam_user_info.iam_users[0].arn == test_iam_user.arn
+ - iam_user_info.iam_users[0].create_date == test_iam_user.create_date
+ - iam_user_info.iam_users[0].path == test_iam_user.path
+ - iam_user_info.iam_users[0].user_id == test_iam_user.user_id
+ - iam_user_info.iam_users[0].user_name == test_iam_user.user_name
+
+ # ===========================================
+ # Test Managed Policy management
+ #
+ # Use a couple of benign policies for testing:
+ # - AWSDenyAll
+ # - ServiceQuotasReadOnlyAccess
+ #
+ - name: attach managed policy to user (check mode)
+ check_mode: yes
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - arn:aws:iam::aws:policy/AWSDenyAll
+ register: iam_user
+
+ - name: assert that the user is changed
+ assert:
+ that:
+ - iam_user is changed
+
+ - name: attach managed policy to user
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - arn:aws:iam::aws:policy/AWSDenyAll
+ register: iam_user
+
+ - name: assert that the user is changed
+ assert:
+ that:
+ - iam_user is changed
+
+ - name: ensure managed policy is attached to user (no change)
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - arn:aws:iam::aws:policy/AWSDenyAll
+ register: iam_user
+
+ - name: assert that the user hasn't changed
+ assert:
+ that:
+ - iam_user is not changed
+
+ - name: attach different managed policy to user (check mode)
+ check_mode: yes
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
+ purge_policy: no
+ register: iam_user
+
+ - name: assert that the user changed
+ assert:
+ that:
+ - iam_user is changed
+
+ - name: attach different managed policy to user
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
+ purge_policy: no
+ register: iam_user
+
+ - name: assert that the user changed
+ assert:
+ that:
+ - iam_user is changed
+
+ - name: Check first policy wasn't purged
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
+ - arn:aws:iam::aws:policy/AWSDenyAll
+ purge_policy: no
+ register: iam_user
+
+ - name: assert that the user hasn't changed
+ assert:
+ that:
+ - iam_user is not changed
+
+ - name: Check that managed policy order doesn't matter
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - arn:aws:iam::aws:policy/AWSDenyAll
+ - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
+ purge_policy: no
+ register: iam_user
+
+ - name: assert that the user hasn't changed
+ assert:
+ that:
+ - iam_user is not changed
+
+ - name: Check that policy doesn't require full ARN path
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - AWSDenyAll
+ - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
+ purge_policy: no
+ register: iam_user
+
+ - name: assert that the user hasn't changed
+ assert:
+ that:
+ - iam_user is not changed
+
+ - name: Remove one of the managed policies - with purge (check mode)
+ check_mode: yes
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
+ purge_policy: yes
+ register: iam_user
+
+ - name: assert that the user changed
+ assert:
+ that:
+ - iam_user is changed
+
+ - name: Remove one of the managed policies - with purge
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
+ purge_policy: yes
+ register: iam_user
+
+ - name: assert that the user changed
+ assert:
+ that:
+ - iam_user is changed
+
+ - name: Check we only have the one policy attached
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
+ purge_policy: yes
+ register: iam_user
+
+ - name: assert that the user changed
+ assert:
+ that:
+ - iam_user is not changed
+
+ - name: ensure group exists
+ iam_group:
+ name: '{{ test_group }}'
+ users:
+ - '{{ test_user }}'
+ state: present
+ register: iam_group
+
+ - assert:
+ that:
+ - iam_group.changed
+ - iam_group.iam_group.users
+
+ - name: get info on IAM user(s) in group
+ iam_user_info:
+ group: '{{ test_group }}'
+ name: '{{ test_user }}'
+ register: iam_user_info
+
+ - assert:
+ that:
+ - iam_user_info.iam_users | length == 1
+ - iam_user_info.iam_users[0].arn == test_iam_user.arn
+ - iam_user_info.iam_users[0].create_date == test_iam_user.create_date
+ - iam_user_info.iam_users[0].path == test_iam_user.path
+ - iam_user_info.iam_users[0].user_id == test_iam_user.user_id
+ - iam_user_info.iam_users[0].user_name == test_iam_user.user_name
+
+ - name: remove user from group
+ iam_group:
+ name: '{{ test_group }}'
+ purge_users: True
+ users: []
+ state: present
+ register: iam_group
+
+ - name: get info on IAM user(s) after removing from group
+ iam_user_info:
+ group: '{{ test_group }}'
+ name: '{{ test_user }}'
+ register: iam_user_info
+
+ - name: assert empty list of users for group are returned
+ assert:
+ that:
+ - iam_user_info.iam_users | length == 0
+
+ - name: ensure ansible users exist
+ iam_user:
+ name: '{{ item }}'
+ state: present
+ with_items: '{{ test_users }}'
+
+ - name: get info on multiple IAM user(s)
+ iam_user_info:
+ register: iam_user_info
+ - assert:
+ that:
+ - iam_user_info.iam_users | length != 0
+
+ - name: ensure multiple user group exists with single user
+ iam_group:
+ name: '{{ test_group }}'
+ users:
+ - '{{ test_user }}'
+ state: present
+ register: iam_group
+
+ - name: get info on IAM user(s) in group
+ iam_user_info:
+ group: '{{ test_group }}'
+ register: iam_user_info
+ - assert:
+ that:
+ - iam_user_info.iam_users | length == 1
+
+ - name: add all users to group
+ iam_group:
+ name: '{{ test_group }}'
+ users: '{{ test_users }}'
+ state: present
+ register: iam_group
+
+ - name: get info on multiple IAM user(s) in group
+ iam_user_info:
+ group: '{{ test_group }}'
+ register: iam_user_info
+ - assert:
+ that:
+ - iam_user_info.iam_users | length == test_users | length
+
+ - name: purge users from group
+ iam_group:
+ name: '{{ test_group }}'
+ purge_users: True
+ users: []
+ state: present
+ register: iam_group
+
+ - name: ensure info is empty for empty group
+ iam_user_info:
+ group: '{{ test_group }}'
+ register: iam_user_info
+ - assert:
+ that:
+ - iam_user_info.iam_users | length == 0
+
+ - name: get info on IAM user(s) after removing from group
+ iam_user_info:
+ group: '{{ test_group }}'
+ register: iam_user_info
+
+ - name: assert empty list of users for group are returned
+ assert:
+ that:
+ - iam_user_info.iam_users | length == 0
+
+ - name: remove group
+ iam_group:
+ name: '{{ test_group }}'
+ state: absent
+ register: iam_group
+
+ - name: assert that group was removed
+ assert:
+ that:
+ - iam_group.changed
+ - iam_group
+
+ - name: Test remove group again (idempotency)
+ iam_group:
+ name: "{{ test_group }}"
+ state: absent
+ register: iam_group
+
+ - name: assert that group remove is not changed
+ assert:
+ that:
+ - not iam_group.changed
+
+ - name: Remove user with attached policy
+ iam_user:
+ name: "{{ test_user }}"
+ state: absent
+ register: iam_user
+
+ - name: get info on IAM user(s) after deleting
+ iam_user_info:
+ group: '{{ test_user }}'
+ ignore_errors: yes
+ register: iam_user_info
+
+ - name: Assert user was removed
+ assert:
+ that:
+ - iam_user.changed
+ - "'cannot be found' in iam_user_info.msg"
+
+ - name: Remove user with attached policy (idempotent)
+ iam_user:
+ name: "{{ test_user }}"
+ state: absent
+ ignore_errors: yes
+ register: iam_user
+
+ - name: Assert user was removed
+ assert:
+ that:
+ - not iam_user.changed
+
+ always:
+ - name: remove group
+ iam_group:
+ name: '{{ test_group }}'
+ state: absent
+ ignore_errors: yes
+
+ - name: remove ansible users
+ iam_user:
+ name: '{{ item }}'
+ state: absent
+ with_items: '{{ test_users }}'
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/kinesis_stream/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/kinesis_stream/aliases
new file mode 100644
index 00000000..6e3860be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/kinesis_stream/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/kinesis_stream/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/kinesis_stream/defaults/main.yml
new file mode 100644
index 00000000..bdaddb34
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/kinesis_stream/defaults/main.yml
@@ -0,0 +1,18 @@
+---
+kinesis_stream_name: '{{ resource_prefix }}'
+
+kms_cmk_alias_1: '{{ resource_prefix }}-1'
+kms_cmk_alias_2: '{{ resource_prefix }}-2'
+
+# A variety of camelCase and PascalCase to test things don't get re-cased
+# underneath us
+kinesis_stream_tags_1:
+ tag: value
+ AnExample: AValue
+ somethingElse: Another Value
+ Bleep: bloop
+# Adds 2 values, Deletes 2 and keeps a value
+kinesis_stream_tags_2:
+ tag: value
+ foo: Bar
+ Baz: quuX
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/kinesis_stream/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/kinesis_stream/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/kinesis_stream/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/kinesis_stream/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/kinesis_stream/tasks/main.yml
new file mode 100644
index 00000000..c55d16af
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/kinesis_stream/tasks/main.yml
@@ -0,0 +1,713 @@
+---
+# ============================================================
+# Known issues:
+#
+# - (CM) check_mode returns changed (always?)
+# - (CM_snake) check_mode returns keys and values that don't directly
+# map to those from non-check_mode
+# - (Tag_snake) tag keys get snake_cased in return values
+# - (Tag_changed) changing tags doesn't return changed
+# - (Enc_snake) return values don't get snake_cased when updating encryption
+# - (Enc_disable) disabling encryption Requires key and type be set
+# - (Enc_idemp) Updating encryption settings isn't idempotent
+#
+# ============================================================
+- name: 'Setup AWS Module Defaults'
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ kinesis_stream:
+ # Number of shards is mandatory when state=present
+ shards: 1
+
+ block:
+ # ============================================================
+ # Set up some additional resources for later user
+
+ - name: 'KMS test preperation - only run when explicitly enabled'
+ when:
+ - run_kms_tests | default(False) | bool
+ block:
+ # KMS Keys
+ # Note: Because we're not a producer / consumer we don't actually need
+ # access to the keys
+ - name: 'Create KMS key 1'
+ aws_kms:
+ alias: '{{ kms_cmk_alias_1 }}'
+ state: present
+ enabled: yes
+ register: create_kms_1
+ - name: 'Create KMS key 2'
+ aws_kms:
+ alias: '{{ kms_cmk_alias_2 }}'
+ state: present
+ enabled: yes
+ register: create_kms_2
+ - name: 'Assert that we sucessfully created our keys'
+ assert:
+ that:
+ - create_kms_1 is success
+ - create_kms_2 is success
+ - name: 'Store the Key IDs for later'
+ set_fact:
+ kms_cmk_id_1: '{{ create_kms_1.key_id }}'
+ kms_cmk_arn_1: '{{ create_kms_1.key_arn }}'
+ kms_cmk_id_2: '{{ create_kms_2.key_id }}'
+ kms_cmk_arn_2: '{{ create_kms_2.key_arn }}'
+ # All of the valid ways to describe the CMK
+ kms_cmk_1:
+ - '{{ create_kms_1.key_id }}'
+ - 'alias/{{ kms_cmk_alias_1 }}'
+ - '{{ create_kms_1.key_arn }}'
+ kms_cmk_2:
+ - '{{ create_kms_2.key_id }}'
+ - 'alias/{{ kms_cmk_alias_2 }}'
+ - '{{ create_kms_2.key_arn }}'
+
+ # ============================================================
+ # Basic creation
+ - name: 'Create a basic Kinesis stream (CHECK_MODE)'
+ check_mode: yes
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ register: result
+ - name: 'Assert state is changed when first creating a stream (CHECK_MODE)'
+ assert:
+ that:
+ - result is success
+ - result is changed
+
+ - name: 'Create a basic Kinesis stream'
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ register: result
+ - name: 'Assert state is changed when first creating a stream'
+ assert:
+ that:
+ - result is success
+ - result is changed
+ - result.encryption_type == 'NONE'
+ - result.open_shards_count == 1
+ - result.retention_period_hours == 24
+ - result.stream_arn.startswith('arn:aws:kinesis:')
+ - result.stream_arn.endswith(':stream/' + kinesis_stream_name)
+ - result.stream_name == kinesis_stream_name
+ - result.stream_status == 'ACTIVE'
+ - result.tags == {}
+
+ # We've run tests that the ARN matches the pattern we expect, we can just test
+ # it doesn't change.
+ - name: 'Save Stream ARN for later comparison'
+ set_fact:
+ kinesis_stream_arn: '{{ result.stream_arn }}'
+
+ - name: 'Create a basic Kinesis stream - Idempotency (CHECK_MODE)'
+ check_mode: yes
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ register: result
+ - name: 'Assert state is not changed when re-running the create (CHECK_MODE)'
+ assert:
+ that:
+ - result is success
+ - result is not changed
+ # XXX BUG (CM)
+ ignore_errors: yes
+
+ - name: 'Create a basic Kinesis stream - Idempotency'
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ register: result
+ - name: 'Assert state is not changed when re-running the create'
+ assert:
+ that:
+ - result is success
+ - result is not changed
+ - result.encryption_type == 'NONE'
+ - result.open_shards_count == 1
+ - result.retention_period_hours == 24
+ - result.stream_arn == kinesis_stream_arn
+ - result.stream_name == kinesis_stream_name
+ - result.stream_status == 'ACTIVE'
+ - result.tags == {}
+
+ # ============================================================
+ # Retention Period
+ #
+ - name: 'Increase the retention period (CHECK_MODE)'
+ check_mode: yes
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ retention_period: 72
+ register: result
+ - name: 'Assert state is changed when changing the retention period (CHECK_MODE)'
+ assert:
+ that:
+ - result is success
+ - result is changed
+
+ - name: 'Increase the retention period'
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ retention_period: 72
+ register: result
+ - name: 'Assert state is changed when changing the retention period'
+ assert:
+ that:
+ - result is success
+ - result is changed
+ - result.encryption_type == 'NONE'
+ - result.open_shards_count == 1
+ - result.retention_period_hours == 72
+ - result.stream_arn == kinesis_stream_arn
+ - result.stream_name == kinesis_stream_name
+ - result.stream_status == 'ACTIVE'
+ - result.tags == {}
+
+ - name: 'Increase the retention period - Idempotency (CHECK_MODE)'
+ check_mode: yes
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ retention_period: 72
+ register: result
+ - name: 'Assert state is not changed when not changing the retention period (CHECK_MODE)'
+ assert:
+ that:
+ - result is success
+ - result is not changed
+ # XXX BUG (CM)
+ ignore_errors: yes
+
+ - name: 'Increase the retention period - Idempotency'
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ retention_period: 72
+ register: result
+ - name: 'Assert state is not changed when not changing the retention period'
+ assert:
+ that:
+ - result is success
+ - result is not changed
+ - result.encryption_type == 'NONE'
+ - result.open_shards_count == 1
+ - result.retention_period_hours == 72
+ - result.stream_arn == kinesis_stream_arn
+ - result.stream_name == kinesis_stream_name
+ - result.stream_status == 'ACTIVE'
+ - result.tags == {}
+
+ - name: 'Decrease the retention period (CHECK_MODE)'
+ check_mode: yes
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ retention_period: 48
+ register: result
+ - name: 'Assert state is changed when changing the retention period (CHECK_MODE)'
+ assert:
+ that:
+ - result is success
+ - result is changed
+
+ - name: 'Decrease the retention period'
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ retention_period: 48
+ register: result
+ - name: 'Assert state is changed when changing the retention period'
+ assert:
+ that:
+ - result is success
+ - result is changed
+ - result.encryption_type == 'NONE'
+ - result.open_shards_count == 1
+ - result.retention_period_hours == 48
+ - result.stream_arn == kinesis_stream_arn
+ - result.stream_name == kinesis_stream_name
+ - result.stream_status == 'ACTIVE'
+ - result.tags == {}
+
+ - name: 'Decrease the retention period - Idempotency (CHECK_MODE)'
+ check_mode: yes
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ retention_period: 48
+ register: result
+ - name: 'Assert state is not changed when not changing the retention period (CHECK_MODE)'
+ assert:
+ that:
+ - result is success
+ - result is not changed
+ # XXX BUG (CM)
+ ignore_errors: yes
+
+ - name: 'Decrease the retention period - Idempotency'
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ retention_period: 48
+ register: result
+ - name: 'Assert state is not changed when not changing the retention period'
+ assert:
+ that:
+ - result is success
+ - result is not changed
+ - result.encryption_type == 'NONE'
+ - result.open_shards_count == 1
+ - result.retention_period_hours == 48
+
+ # ============================================================
+ # Basic tagging
+
+ - name: 'Set some tags (CHECK_MODE)'
+ check_mode: yes
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ tags: '{{ kinesis_stream_tags_1 }}'
+ register: result
+ - name: 'Assert state is changed when adding tags (CHECK_MODE)'
+ assert:
+ that:
+ - result is success
+ - result is changed
+
+ - name: 'Set some tags'
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ tags: '{{ kinesis_stream_tags_1 }}'
+ register: result
+ - name: 'Assert state is changed when adding tags'
+ assert:
+ that:
+ - result is success
+ - result.encryption_type == 'NONE'
+ - result.open_shards_count == 1
+ - result.retention_period_hours == 48
+ - result.stream_arn == kinesis_stream_arn
+ - result.stream_name == kinesis_stream_name
+ - result.stream_status == 'ACTIVE'
+ - name: 'Assert tags return as expected'
+ assert:
+ that:
+ - result is changed
+ - result.tags == kinesis_stream_tags_1
+ # XXX BUG (CM)
+ ignore_errors: yes
+
+ - name: 'Set some tags - Idempotency (CHECK_MODE)'
+ check_mode: yes
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ tags: '{{ kinesis_stream_tags_1 }}'
+ register: result
+ - name: 'Assert state is not changed when not changing the tags (CHECK_MODE)'
+ assert:
+ that:
+ - result is success
+ - result is not changed
+ # XXX BUG (CM)
+ ignore_errors: yes
+
+ - name: 'Set some tags - Idempotency'
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ tags: '{{ kinesis_stream_tags_1 }}'
+ register: result
+ - name: 'Assert state is not changed when not changing the tags'
+ assert:
+ that:
+ - result is success
+ - result is not changed
+ - result.encryption_type == 'NONE'
+ - result.open_shards_count == 1
+ - result.retention_period_hours == 48
+ - result.stream_arn == kinesis_stream_arn
+ - result.stream_name == kinesis_stream_name
+ - result.stream_status == 'ACTIVE'
+ # Merge this into the main assertion when the return values are no longer
+ # snake_cased
+ - name: 'Assert tags return as expected'
+ assert:
+ that:
+ - result.tags == kinesis_stream_tags_1
+ # XXX BUG (Tag_snake)
+ ignore_errors: yes
+
+ - name: 'Change some tags (CHECK_MODE)'
+ check_mode: yes
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ tags: '{{ kinesis_stream_tags_2 }}'
+ register: result
+ - name: 'Assert state is changed when changing tags (CHECK_MODE)'
+ assert:
+ that:
+ - result is success
+ - result is changed
+
+ - name: 'Change some tags'
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ tags: '{{ kinesis_stream_tags_2 }}'
+ register: result
+ - name: 'Assert state is changed when changing tags'
+ assert:
+ that:
+ - result is success
+ - result.encryption_type == 'NONE'
+ - result.open_shards_count == 1
+ - result.retention_period_hours == 48
+ - result.stream_arn == kinesis_stream_arn
+ - result.stream_name == kinesis_stream_name
+ - result.stream_status == 'ACTIVE'
+ # Merge this into the main assertion when the return values are no longer
+ # snake_cased
+ - name: 'Assert tags return as expected (tags2)'
+ assert:
+ that:
+ - result is changed
+ - result.tags == kinesis_stream_tags_2
+ # XXX BUG (Tag_changed) (Tag_snake)
+ ignore_errors: yes
+
+ - name: 'Change some tags - Idempotency (CHECK_MODE)'
+ check_mode: yes
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ tags: '{{ kinesis_stream_tags_2 }}'
+ register: result
+ - name: 'Assert state is not changed when not changing the tags (CHECK_MODE)'
+ assert:
+ that:
+ - result is success
+ - result is not changed
+ # XXX BUG (CM)
+ ignore_errors: yes
+
+ - name: 'Change some tags - Idempotency'
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ tags: '{{ kinesis_stream_tags_2 }}'
+ register: result
+ - name: 'Assert state is not changed when not changing the tags'
+ assert:
+ that:
+ - result is success
+ - result is not changed
+ - result.encryption_type == 'NONE'
+ - result.open_shards_count == 1
+ - result.retention_period_hours == 48
+ - result.stream_arn == kinesis_stream_arn
+ - result.stream_name == kinesis_stream_name
+ - result.stream_status == 'ACTIVE'
+ # Merge this into the main assertion when the return values are no longer
+ # snake_cased
+ - name: 'Assert tags return as expected (tags2)'
+ assert:
+ that:
+ - result.tags == kinesis_stream_tags_2
+ # XXX BUG (Tag_snake)
+ ignore_errors: yes
+
+ # ============================================================
+ # Number of shards
+ #
+ - name: 'Change the number of shards (CHECK_MODE)'
+ check_mode: yes
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ shards: 2
+ register: result
+ - name: 'Assert state is changed when changing the number of shards (CHECK_MODE)'
+ assert:
+ that:
+ - result is success
+ - result is changed
+
+ - name: 'Change the number of shards'
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ shards: 2
+ register: result
+ - name: 'Assert state is changed when changing the number of shards'
+ assert:
+ that:
+ - result is success
+ - result is changed
+ - result.encryption_type == 'NONE'
+ - result.open_shards_count == 2
+ - result.retention_period_hours == 48
+ - result.stream_arn == kinesis_stream_arn
+ - result.stream_name == kinesis_stream_name
+ - result.stream_status == 'ACTIVE'
+ #- result.tags == kinesis_stream_tags_2
+ # Merge this into the main assertion when the tag keys are no longer
+ # snake_cased
+ - name: 'Assert tags return as expected (tags2)'
+ assert:
+ that:
+ - result.tags == kinesis_stream_tags_2
+ # XXX BUG (Tag_snake)
+ ignore_errors: yes
+
+ - name: 'Change the number of shards - Idempotency (CHECK_MODE)'
+ check_mode: yes
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ shards: 2
+ register: result
+ - name: 'Assert state is not changed when not changing the number of shards (CHECK_MODE)'
+ assert:
+ that:
+ - result is success
+ - result is not changed
+ # XXX BUG (CM)
+ ignore_errors: yes
+
+ - name: 'Change the number of shards - Idempotency'
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ shards: 2
+ register: result
+ - name: 'Assert state is not changed when not changing the number of shards'
+ assert:
+ that:
+ - result is success
+ - result is not changed
+ - result.encryption_type == 'NONE'
+ - result.open_shards_count == 2
+ - result.retention_period_hours == 48
+ - result.stream_arn == kinesis_stream_arn
+ - result.stream_name == kinesis_stream_name
+ - result.stream_status == 'ACTIVE'
+ #- result.tags == kinesis_stream_tags_2
+ # Merge this into the main assertion when the tag keys are no longer
+ # snake_cased
+ - name: 'Assert tags return as expected (tags2)'
+ assert:
+ that:
+ - result.tags == kinesis_stream_tags_2
+ # XXX BUG (Tag_snake)
+ ignore_errors: yes
+
+ # ============================================================
+ # Shards has to be passed we can't test that it's not updated when we're not
+ # setting it. Let's reset it to the value we set in the module_defaults
+
+ - name: 'Reset the number of shards'
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ register: result
+ - name: 'Assert the change was successful'
+ assert:
+ that:
+ - result is success
+ - result is changed
+ - result.open_shards_count == 1
+
+ # DISABLED BY DEFAULT - KMS key creation/deletion not supported in CI at this time
+ - name: 'KMS tests - only run when explicitly enabled'
+ when:
+ - run_kms_tests | default(False) | bool
+ block:
+ # ============================================================
+ # Encryption
+ - name: 'Test encryption'
+ vars:
+ key_type: '{{ item.type }}'
+ kinesis_key: '{{ item.key }}'
+ kinesis_key_id: '{{ kms_cmk_id_1 }}'
+ kinesis_key_alias: 'alias/{{ kms_cmk_alias_1 }}'
+ kinesis_key_arn: '{{ kms_cmk_arn_1 }}'
+ include_tasks: 'test_encryption.yml'
+ # Loop through and test the management and idempotency when using the
+ # various combinations of ID, alias and ARN of a CMK
+ loop:
+ - type: 'ID'
+ key: '{{ kms_cmk_id_1 }}'
+ - type: 'Alias'
+ key: 'alias/{{ kms_cmk_alias_1 }}'
+ - type: 'ARN'
+ key: '{{ kms_cmk_arn_1 }}'
+
+ - name: 'Disable encryption - Idempotency (CHECK_MODE)'
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ encryption_state: 'disabled'
+ - name: 'Assert state is not changed when encryption_state not changed (CHECK_MODE)'
+ ignore_errors: yes
+ assert:
+ that:
+ - result is success
+ - result is not changed
+ # XXX BUG (Enc_idemp)
+
+ - name: 'Disable encryption - Idempotency'
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ encryption_state: 'disabled'
+ - name: 'Assert state is not changed when encryption_state not changed (CHECK_MODE)'
+ assert:
+ that:
+ - result is success
+ - result is not changed
+ # XXX BUG (Enc_idemp)
+ ignore_errors: yes
+ # Merge this into the main assertion when the main return keys are
+ # snake_cased
+ - name: 'Assert expected return values'
+ assert:
+ that:
+ - result.encryption_type == 'NONE'
+ - result.open_shards_count == 1
+ - result.retention_period_hours == 48
+ - result.stream_arn == kinesis_stream_arn
+ - result.stream_name == kinesis_stream_name
+ - result.stream_status == 'ACTIVE'
+ #- result.tags == kinesis_stream_tags_2
+ # XXX BUG (Enc_snake)
+ ignore_errors: yes
+ # Merge this into the main assertion when the tag keys are no longer
+ # snake_cased
+ - name: 'Assert tags return as expected (tags2)'
+ assert:
+ that:
+ - result.tags == kinesis_stream_tags_2
+ # XXX BUG (Tag_snake)
+ ignore_errors: yes
+
+ - name: 'Enable encryption'
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ encryption_state: 'enabled'
+ encryption_type: 'KMS'
+ key_id: '{{ kms_cmk_id_1 }}'
+ - name: 'Assert that state is changed when enabling encryption'
+ assert:
+ that:
+ - result is success
+ - result is changed
+
+ - name: 'Test encryption changed state when updating key (CHECK_MODE)'
+ check_mode: yes
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ encryption_state: 'enabled'
+ encryption_type: 'KMS'
+ key_id: '{{ kms_cmk_id_2 }}'
+ - name: 'Assert state is changed when stream encryption key is changed (CHECK_MODE)'
+ assert:
+ that:
+ - result is success
+ - result is changed
+
+ - name: 'Test encryption changed state when updating key'
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ encryption_state: 'enabled'
+ encryption_type: 'KMS'
+ key_id: '{{ kms_cmk_id_2 }}'
+ - name: 'Assert state is changed when stream encryption key is changed'
+ assert:
+ that:
+ - result is success
+ - result is changed
+ # Merge this into the main assertion when the main return keys are
+ # snake_cased
+ - name: 'Assert expected return values'
+ assert:
+ that:
+ - result.encryption_type == 'KMS'
+ - result.key_id in kms_cmk_2
+ - result.open_shards_count == 1
+ - result.retention_period_hours == 48
+ - result.stream_arn == kinesis_stream_arn
+ - result.stream_name == kinesis_stream_name
+ - result.stream_status == 'ACTIVE'
+ #- result.tags == kinesis_stream_tags_2
+ # XXX BUG (Enc_snake)
+ ignore_errors: yes
+ # Merge this into the main assertion when the tag keys are no longer
+ # snake_cased
+ - name: 'Assert tags return as expected (tags2)'
+ assert:
+ that:
+ - result.tags == kinesis_stream_tags_2
+ # XXX BUG (Tag_snake)
+ ignore_errors: yes
+
+ # ============================================================
+
+ - name: 'Delete stream (CHECK_MODE)'
+ check_mode: yes
+ module_defaults: { kinesis_stream: {} }
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ state: absent
+ register: result
+ - name: 'Assert state is changed when deleting a stream (CHECK_MODE)'
+ assert:
+ that:
+ - result is success
+ - result is changed
+
+ - name: 'Delete stream'
+ module_defaults: { kinesis_stream: {} }
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ state: absent
+ register: result
+ - name: 'Assert state is changed when deleting a stream'
+ assert:
+ that:
+ - result is success
+ - result is changed
+
+ - name: 'Delete stream - Idempotency (CHECK_MODE)'
+ check_mode: yes
+ module_defaults: { kinesis_stream: {} }
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ state: absent
+ register: result
+ - name: 'Assert state is not changed when deleting a stream that was previously deleted (CHECK_MODE)'
+ assert:
+ that:
+ - result is success
+ - result is not changed
+ # XXX BUG (CM)
+ ignore_errors: yes
+
+ - name: 'Delete stream - Idempotency'
+ module_defaults: { kinesis_stream: {} }
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ state: absent
+ register: result
+ - name: 'Assert state is not changed when deleting a stream that was previously deleted'
+ assert:
+ that:
+ - result is success
+ - result is not changed
+
+ always:
+ # ============================================================
+ - name: 'Ensure Kinesis stream is gone'
+ ignore_errors: yes
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ state: absent
+
+ - name: 'KMS test preperation - only run when explicitly enabled'
+ when:
+ - run_kms_tests | default(False) | bool
+ block:
+ - name: 'Delete the KMS keys'
+ ignore_errors: yes
+ aws_kms:
+ state: absent
+ alias: '{{ item }}'
+ loop:
+ - '{{ kms_cmk_alias_1 }}'
+ - '{{ kms_cmk_alias_2 }}'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/kinesis_stream/tasks/test_encryption.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/kinesis_stream/tasks/test_encryption.yml
new file mode 100644
index 00000000..1ce24369
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/kinesis_stream/tasks/test_encryption.yml
@@ -0,0 +1,262 @@
+---
+# Run through the different ways we can enable/change encryption
+# Enable (check_mode)
+# Enable
+# Idempotency - compared to ID (idempotency)
+# Idempotency - compared to ID
+# Idempotency - compared to Alias (idempotency)
+# Idempotency - compared to Alias
+# Idempotency - compared to ARN (idempotency)
+# Idempotency - compared to ARN
+# Disable (check_mode)
+# Disable
+#
+- name: 'Enable encryption using {{ key_type }} (CHECK_MODE)'
+ check_mode: yes
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ encryption_state: 'enabled'
+ encryption_type: 'KMS'
+ key_id: '{{ kinesis_key }}'
+- name: 'Assert state is changed when enabling encryption (CHECK_MODE)'
+ assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: 'Enable encryption using {{ key_type }}'
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ encryption_state: 'enabled'
+ encryption_type: 'KMS'
+ key_id: '{{ kinesis_key }}'
+- name: 'Assert that state is changed when enabling encryption'
+ assert:
+ that:
+ - result is success
+ - result is changed
+# Merge this into the main assertion when the main return keys are
+# snake_cased
+- name: 'Assert expected return values'
+ assert:
+ that:
+ - result.encryption_type == 'KMS'
+ - result.key_id in kms_cmk_1
+ - result.open_shards_count == 1
+ - result.retention_period_hours == 48
+ - result.stream_arn == kinesis_stream_arn
+ - result.stream_name == kinesis_stream_name
+ - result.stream_status == 'ACTIVE'
+ #- result.tags == kinesis_stream_tags_2
+ # XXX BUG (Enc_snake)
+ ignore_errors: yes
+# Merge this into the main assertion when the tag keys are no longer
+# snake_cased
+- name: 'Assert tags return as expected (tags2)'
+ assert:
+ that:
+ - result.tags == kinesis_stream_tags_2
+ # XXX BUG (Tag_snake)
+ ignore_errors: yes
+
+- name: 'Test encryption idempotency comparing {{ key_type }} and ID (CHECK_MODE)'
+ check_mode: yes
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ encryption_state: 'enabled'
+ encryption_type: 'KMS'
+ key_id: '{{ kinesis_key_id }}'
+- name: 'Assert state is changed when enabling encryption (CHECK_MODE)'
+ assert:
+ that:
+ - result is success
+ - result is not changed
+ # XXX BUG (Enc_Idemp)
+ ignore_errors: yes
+
+- name: 'Test encryption idempotency comparing {{ key_type }} and ID'
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ encryption_state: 'enabled'
+ encryption_type: 'KMS'
+ key_id: '{{ kinesis_key_id }}'
+- name: 'Assert that state is changed when enabling encryption'
+ assert:
+ that:
+ - result is success
+ - result is not changed
+ # XXX BUG (Enc_Idemp)
+ ignore_errors: yes
+# Merge this into the main assertion when the main return keys are
+# snake_cased
+- name: 'Assert expected return values'
+ assert:
+ that:
+ - result.encryption_type == 'KMS'
+ - result.key_id in kms_cmk_1
+ - result.open_shards_count == 1
+ - result.retention_period_hours == 48
+ - result.stream_arn == kinesis_stream_arn
+ - result.stream_name == kinesis_stream_name
+ - result.stream_status == 'ACTIVE'
+ #- result.tags == kinesis_stream_tags_2
+ # XXX BUG (Enc_snake)
+ ignore_errors: yes
+# Merge this into the main assertion when the tag keys are no longer
+# snake_cased
+- name: 'Assert tags return as expected (tags2)'
+ assert:
+ that:
+ - result.tags == kinesis_stream_tags_2
+ # XXX BUG (Tag_snake)
+ ignore_errors: yes
+
+
+- name: 'Test encryption idempotency comparing {{ key_type }} and Alias (CHECK_MODE)'
+ check_mode: yes
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ encryption_state: 'enabled'
+ encryption_type: 'KMS'
+ key_id: '{{ kinesis_key_alias }}'
+- name: 'Assert state is changed when enabling encryption (CHECK_MODE)'
+ assert:
+ that:
+ - result is success
+ - result is not changed
+ # XXX BUG (Enc_Idemp)
+ ignore_errors: yes
+
+- name: 'Test encryption idempotency comparing {{ key_type }} and Alias'
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ encryption_state: 'enabled'
+ encryption_type: 'KMS'
+ key_id: '{{ kinesis_key_alias }}'
+- name: 'Assert that state is changed when enabling encryption'
+ assert:
+ that:
+ - result is success
+ - result is not changed
+ # XXX BUG (Enc_Idemp)
+ ignore_errors: yes
+# Merge this into the main assertion when the main return keys are
+# snake_cased
+- name: 'Assert expected return values'
+ assert:
+ that:
+ - result.encryption_type == 'KMS'
+ - result.key_id in kms_cmk_1
+ - result.open_shards_count == 1
+ - result.retention_period_hours == 48
+ - result.stream_arn == kinesis_stream_arn
+ - result.stream_name == kinesis_stream_name
+ - result.stream_status == 'ACTIVE'
+ #- result.tags == kinesis_stream_tags_2
+ # XXX BUG (Enc_snake)
+ ignore_errors: yes
+# Merge this into the main assertion when the tag keys are no longer
+# snake_cased
+- name: 'Assert tags return as expected (tags2)'
+ assert:
+ that:
+ - result.tags == kinesis_stream_tags_2
+ # XXX BUG (Tag_snake)
+ ignore_errors: yes
+
+- name: 'Test encryption idempotency comparing {{ key_type }} and ARN (CHECK_MODE)'
+ check_mode: yes
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ encryption_state: 'enabled'
+ encryption_type: 'KMS'
+ key_id: '{{ kinesis_key_arn }}'
+- name: 'Assert state is changed when enabling encryption (CHECK_MODE)'
+ assert:
+ that:
+ - result is success
+ - result is not changed
+ # XXX BUG (Enc_Idemp)
+ ignore_errors: yes
+
+- name: 'Test encryption idempotency comparing {{ key_type }} and ARN'
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ encryption_state: 'enabled'
+ encryption_type: 'KMS'
+ key_id: '{{ kinesis_key_arn }}'
+- name: 'Assert that state is changed when enabling encryption'
+ assert:
+ that:
+ - result is success
+ - result is not changed
+ # XXX BUG (Enc_Idemp)
+ ignore_errors: yes
+# Merge this into the main assertion when the main return keys are
+# snake_cased
+- name: 'Assert expected return values'
+ assert:
+ that:
+ - result.encryption_type == 'KMS'
+ - result.key_id in kms_cmk_1
+ - result.open_shards_count == 1
+ - result.retention_period_hours == 48
+ - result.stream_arn == kinesis_stream_arn
+ - result.stream_name == kinesis_stream_name
+ - result.stream_status == 'ACTIVE'
+ #- result.tags == kinesis_stream_tags_2
+ # XXX BUG (Enc_snake)
+ ignore_errors: yes
+# Merge this into the main assertion when the tag keys are no longer
+# snake_cased
+- name: 'Assert tags return as expected (tags2)'
+ assert:
+ that:
+ - result.tags == kinesis_stream_tags_2
+ # XXX BUG (Tag_snake)
+ ignore_errors: yes
+
+- name: 'Disable encryption (CHECK_MODE)'
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ encryption_state: 'disabled'
+ # XXX BUG (Enc_Disable)
+ encryption_type: 'KMS'
+ # XXX Oddity of Kinesis - This needs to match the existing setting
+ key_id: '{{ kinesis_key_arn }}'
+- name: 'Assert state is changed when disabling encryption (CHECK_MODE)'
+ # XXX BUG (CM)
+ ignore_errors: yes
+ assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: 'Disable encryption'
+ kinesis_stream:
+ name: '{{ kinesis_stream_name }}'
+ encryption_state: 'disabled'
+ # XXX BUG (Enc_Disable)
+ encryption_type: 'KMS'
+ # XXX Oddity of Kinesis - This needs to match the existing setting
+ key_id: '{{ kinesis_key_arn }}'
+- name: 'Assert state is not changed when disabling encryption (CHECK_MODE)'
+ assert:
+ that:
+ - result is success
+ - result is changed
+ - result.encryption_type == 'NONE'
+ - result.open_shards_count == 1
+ - result.retention_period_hours == 48
+ - result.stream_arn == kinesis_stream_arn
+ - result.stream_name == kinesis_stream_name
+ - result.stream_status == 'ACTIVE'
+ #- result.tags == kinesis_stream_tags_2
+# Merge this into the main assertion when the tag keys are no longer
+# snake_cased
+- name: 'Assert tags return as expected (tags2)'
+ assert:
+ that:
+ - result.tags == kinesis_stream_tags_2
+ # XXX BUG (Tag_snake)
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda/aliases
new file mode 100644
index 00000000..c11244e5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda/aliases
@@ -0,0 +1,4 @@
+cloud/aws
+shippable/aws/group2
+execute_lambda
+lambda_info
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda/defaults/main.yml
new file mode 100644
index 00000000..ea29794e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda/defaults/main.yml
@@ -0,0 +1,10 @@
+---
+# defaults file for lambda integration test
+lambda_function_name: '{{ resource_prefix }}'
+# IAM role names have to be less than 64 characters
+# The 8 digit identifier at the end of resource_prefix helps determine during
+# which test something was created and allows tests to be run in parallel
+# Shippable resource_prefixes are in the format shippable-123456-123, so in those cases
+# we need both sets of digits to keep the resource name unique
+unique_id: "{{ resource_prefix | regex_search('(\\d+-?)(\\d+)$') }}"
+lambda_role_name: 'ansible-test-{{ unique_id }}-lambda'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda/files/mini_lambda.py b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda/files/mini_lambda.py
new file mode 100644
index 00000000..901f6b55
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda/files/mini_lambda.py
@@ -0,0 +1,48 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+
+
+def handler(event, context):
+ """
+ The handler function is the function which gets called each time
+ the lambda is run.
+ """
+ # printing goes to the cloudwatch log allowing us to simply debug the lambda if we can find
+ # the log entry.
+ print("got event:\n" + json.dumps(event))
+
+ # if the name parameter isn't present this can throw an exception
+ # which will result in an amazon chosen failure from the lambda
+ # which can be completely fine.
+
+ name = event["name"]
+
+ # we can use environment variables as part of the configuration of the lambda
+ # which can change the behaviour of the lambda without needing a new upload
+
+ extra = os.environ.get("EXTRA_MESSAGE")
+ if extra is not None and len(extra) > 0:
+ greeting = "hello {0}. {1}".format(name, extra)
+ else:
+ greeting = "hello " + name
+
+ return {"message": greeting}
+
+
+def main():
+ """
+ This main function will normally never be called during normal
+ lambda use. It is here for testing the lambda program only.
+ """
+ event = {"name": "james"}
+ context = None
+ print(handler(event, context))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda/files/minimal_trust_policy.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda/files/minimal_trust_policy.json
new file mode 100644
index 00000000..fb84ae9d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda/files/minimal_trust_policy.json
@@ -0,0 +1,12 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "lambda.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda/tasks/main.yml
new file mode 100644
index 00000000..823f479e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda/tasks/main.yml
@@ -0,0 +1,433 @@
+- name: set connection information for AWS modules and run tests
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ collections:
+ - community.general
+ block:
+ # Preparation
+ - name: create minimal lambda role
+ iam_role:
+ name: '{{ lambda_role_name }}'
+ assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json") }}'
+ create_instance_profile: false
+ managed_policies:
+ - 'arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess'
+ register: iam_role
+ - name: wait 10 seconds for role to become available
+ pause:
+ seconds: 10
+ when: iam_role.changed
+ - name: move lambda into place for archive module
+ copy:
+ src: mini_lambda.py
+ dest: '{{ output_dir }}/mini_lambda.py'
+ mode: preserve
+ - name: bundle lambda into a zip
+ register: zip_res
+ archive:
+ format: zip
+ path: '{{ output_dir }}/mini_lambda.py'
+ dest: '{{ output_dir }}/mini_lambda.zip'
+
+ # Parameter tests
+ - name: test with no parameters
+ lambda:
+ register: result
+ ignore_errors: true
+ - name: assert failure when called with no parameters
+ assert:
+ that:
+ - result.failed
+ - 'result.msg.startswith("missing required arguments: name")'
+
+ - name: test with no parameters except state absent
+ lambda:
+ state: absent
+ register: result
+ ignore_errors: true
+ - name: assert failure when called with no parameters
+ assert:
+ that:
+ - result.failed
+ - 'result.msg.startswith("missing required arguments: name")'
+
+ - name: test with no role or handler
+ lambda:
+ name: ansible-testing-fake-should-not-be-created
+ runtime: python2.7
+ register: result
+ ignore_errors: true
+ - name: assert failure when called with no parameters
+ assert:
+ that:
+ - result.failed
+ - 'result.msg.startswith("state is present but all of the following are missing: handler")'
+
+ - name: test state=present with security group but no vpc
+ lambda:
+ name: '{{ lambda_function_name }}'
+ runtime: 'python2.7'
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{ zip_res.dest }}'
+ handler: '{{ omit }}'
+ description: '{{ omit }}'
+ vpc_subnet_ids: '{{ omit }}'
+ vpc_security_group_ids: 'sg-FA6E'
+ environment_variables: '{{ omit }}'
+ dead_letter_arn: '{{ omit }}'
+ register: result
+ ignore_errors: true
+ - name: assert lambda fails with proper message
+ assert:
+ that:
+ - result is failed
+ - result.msg != "MODULE FAILURE"
+ - result.changed == False
+ - '"parameters are required together" in result.msg'
+
+ # Prepare minimal Lambda
+
+ - name: test state=present - upload the lambda
+ lambda:
+ name: '{{ lambda_function_name }}'
+ runtime: python2.7
+ handler: mini_lambda.handler
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{ zip_res.dest }}'
+ register: result
+ - name: assert lambda upload succeeded
+ assert:
+ that:
+ - result is not failed
+ - result.configuration.tracing_config.mode == "PassThrough"
+
+ # Test basic operation of Uploaded lambda
+ - name: test lambda works
+ execute_lambda:
+ name: '{{lambda_function_name}}'
+ payload:
+ name: Mr Ansible Tests
+ register: result
+ - name: assert lambda manages to respond as expected
+ assert:
+ that:
+ - result is not failed
+ - result.result.output.message == "hello Mr Ansible Tests"
+
+ # Test updating Lambda
+ - name: test lambda config updates
+ lambda:
+ name: '{{lambda_function_name}}'
+ runtime: nodejs10.x
+ tracing_mode: Active
+ handler: mini_lambda.handler
+ role: '{{ lambda_role_name }}'
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not failed
+ - update_result.changed == True
+ - update_result.configuration.runtime == 'nodejs10.x'
+ - update_result.configuration.tracing_config.mode == 'Active'
+
+ - name: test no changes are made with the same parameters repeated
+ lambda:
+ name: '{{lambda_function_name}}'
+ runtime: nodejs10.x
+ tracing_mode: Active
+ handler: mini_lambda.handler
+ role: '{{ lambda_role_name }}'
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not failed
+ - update_result.changed == False
+ - update_result.configuration.runtime == 'nodejs10.x'
+ - update_result.configuration.tracing_config.mode == 'Active'
+
+ - name: reset config updates for the following tests
+ lambda:
+ name: '{{lambda_function_name}}'
+ runtime: python2.7
+ tracing_mode: PassThrough
+ handler: mini_lambda.handler
+ role: '{{ lambda_role_name }}'
+ register: result
+ - name: assert that reset succeeded
+ assert:
+ that:
+ - result is not failed
+ - result.changed == True
+ - result.configuration.runtime == 'python2.7'
+ - result.configuration.tracing_config.mode == 'PassThrough'
+
+ # Query the Lambda
+ - name: lambda_info | Gather all infos for given lambda function
+ lambda_info:
+ name: '{{ lambda_function_name }}'
+ query: all
+ register: lambda_infos_all
+ - name: lambda_info | Assert successfull retrieval of all information
+ assert:
+ that:
+ - lambda_infos_all is not failed
+ - lambda_infos_all.function[lambda_function_name].function_name == lambda_function_name
+ - lambda_infos_all.function[lambda_function_name].runtime == "python2.7"
+ - lambda_infos_all.function[lambda_function_name].versions is defined
+ - lambda_infos_all.function[lambda_function_name].aliases is defined
+ - lambda_infos_all.function[lambda_function_name].policy is defined
+ - lambda_infos_all.function[lambda_function_name].mappings is defined
+ - lambda_infos_all.function[lambda_function_name].description == ""
+ - lambda_infos_all.function[lambda_function_name].function_arn is defined
+ - lambda_infos_all.function[lambda_function_name].handler == "mini_lambda.handler"
+
+ - name: lambda_info | Gather version infos for given lambda function
+ lambda_info:
+ name: '{{ lambda_function_name }}'
+ query: versions
+ register: lambda_infos_versions
+ - name: lambda_info | Assert successfull retrieval of versions information
+ assert:
+ that:
+ - lambda_infos_versions is not failed
+ - lambda_infos_versions.function[lambda_function_name].versions|length > 0
+ - lambda_infos_versions.function[lambda_function_name].function_name is undefined
+
+ - name: lambda_info | Gather config infos for given lambda function
+ lambda_info:
+ name: '{{ lambda_function_name }}'
+ query: config
+ register: lambda_infos_config
+ - name: lambda_info | Assert successfull retrieval of config information
+ assert:
+ that:
+ - lambda_infos_config is not failed
+ - lambda_infos_config.function[lambda_function_name].function_name == lambda_function_name
+ - lambda_infos_config.function[lambda_function_name].description is defined
+ - lambda_infos_config.function[lambda_function_name].versions is undefined
+
+ - name: lambda_info | Gather policy infos for given lambda function
+ lambda_info:
+ name: '{{ lambda_function_name }}'
+ query: policy
+ register: lambda_infos_policy
+ - name: lambda_info | Assert successfull retrieval of policy information
+ assert:
+ that:
+ - lambda_infos_policy is not failed
+ - lambda_infos_policy.function[lambda_function_name].policy is defined
+ - lambda_infos_policy.function[lambda_function_name].versions is undefined
+
+ - name: lambda_info | Gather aliases infos for given lambda function
+ lambda_info:
+ name: '{{ lambda_function_name }}'
+ query: aliases
+ register: lambda_infos_aliases
+ - name: lambda_info | Assert successfull retrieval of aliases information
+ assert:
+ that:
+ - lambda_infos_aliases is not failed
+ - lambda_infos_aliases.function[lambda_function_name].aliases is defined
+
+ - name: lambda_info | Gather mappings infos for given lambda function
+ lambda_info:
+ name: '{{ lambda_function_name }}'
+ query: mappings
+ register: lambda_infos_mappings
+ - name: lambda_info | Assert successfull retrieval of mappings information
+ assert:
+ that:
+ - lambda_infos_mappings is not failed
+ - lambda_infos_mappings.function[lambda_function_name].mappings is defined
+
+ # More Lambda update tests
+ - name: test state=present with all nullable variables explicitly set to null
+ lambda:
+ name: '{{lambda_function_name}}'
+ runtime: python2.7
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{zip_res.dest}}'
+ handler: mini_lambda.handler
+ description: null
+ vpc_subnet_ids: null
+ vpc_security_group_ids: null
+ environment_variables: null
+ dead_letter_arn: null
+ register: result
+ - name: assert lambda remains as before
+ assert:
+ that:
+ - result is not failed
+ - result.changed == False
+
+ - name: test putting an environment variable changes lambda
+ lambda:
+ name: '{{lambda_function_name}}'
+ runtime: python2.7
+ handler: mini_lambda.handler
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{zip_res.dest}}'
+ environment_variables:
+ EXTRA_MESSAGE: I think you are great!!
+ register: result
+ - name: assert lambda upload succeeded
+ assert:
+ that:
+ - result is not failed
+ - result.changed == True
+ - name: test lambda works
+ execute_lambda:
+ name: '{{lambda_function_name}}'
+ payload:
+ name: Mr Ansible Tests
+ register: result
+ - name: assert lambda manages to respond as expected
+ assert:
+ that:
+ - result is not failed
+ - result.result.output.message == "hello Mr Ansible Tests. I think you are great!!"
+
+ # Deletion behavious
+ - name: test state=absent (expect changed=True)
+ lambda:
+ name: '{{lambda_function_name}}'
+ state: absent
+ register: result
+
+ - name: assert state=absent
+ assert:
+ that:
+ - result is not failed
+ - result is changed
+
+ - name: test state=absent (expect changed=False) when already deleted
+ lambda:
+ name: '{{lambda_function_name}}'
+ state: absent
+ register: result
+
+ - name: assert state=absent
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+
+ # Parallel creations and deletions
+ - name: parallel lambda creation 1/4
+ lambda:
+ name: '{{lambda_function_name}}_1'
+ runtime: python2.7
+ handler: mini_lambda.handler
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{zip_res.dest}}'
+ async: 1000
+ register: async_1
+ - name: parallel lambda creation 2/4
+ lambda:
+ name: '{{lambda_function_name}}_2'
+ runtime: python2.7
+ handler: mini_lambda.handler
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{zip_res.dest}}'
+ async: 1000
+ register: async_2
+ - name: parallel lambda creation 3/4
+ lambda:
+ name: '{{lambda_function_name}}_3'
+ runtime: python2.7
+ handler: mini_lambda.handler
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{zip_res.dest}}'
+ async: 1000
+ register: async_3
+ - name: parallel lambda creation 4/4
+ lambda:
+ name: '{{lambda_function_name}}_4'
+ runtime: python2.7
+ handler: mini_lambda.handler
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{zip_res.dest}}'
+ register: result
+ - name: assert lambda manages to respond as expected
+ assert:
+ that:
+ - result is not failed
+ - name: wait for async job 1
+ async_status: jid={{ async_1.ansible_job_id }}
+ register: job_result
+ until: job_result is finished
+ retries: 30
+ - name: wait for async job 2
+ async_status: jid={{ async_1.ansible_job_id }}
+ register: job_result
+ until: job_result is finished
+ retries: 30
+ - name: wait for async job 3
+ async_status: jid={{ async_3.ansible_job_id }}
+ register: job_result
+ until: job_result is finished
+ retries: 30
+ - name: parallel lambda deletion 1/4
+ lambda:
+ name: '{{lambda_function_name}}_1'
+ state: absent
+ zip_file: '{{zip_res.dest}}'
+ async: 1000
+ register: async_1
+ - name: parallel lambda deletion 2/4
+ lambda:
+ name: '{{lambda_function_name}}_2'
+ state: absent
+ zip_file: '{{zip_res.dest}}'
+ async: 1000
+ register: async_2
+ - name: parallel lambda deletion 3/4
+ lambda:
+ name: '{{lambda_function_name}}_3'
+ state: absent
+ zip_file: '{{zip_res.dest}}'
+ async: 1000
+ register: async_3
+ - name: parallel lambda deletion 4/4
+ lambda:
+ name: '{{lambda_function_name}}_4'
+ state: absent
+ zip_file: '{{zip_res.dest}}'
+ register: result
+ - name: assert lambda creation has succeeded
+ assert:
+ that:
+ - result is not failed
+ - name: wait for async job 1
+ async_status: jid={{ async_1.ansible_job_id }}
+ register: job_result
+ until: job_result is finished
+ retries: 30
+ - name: wait for async job 2
+ async_status: jid={{ async_1.ansible_job_id }}
+ register: job_result
+ until: job_result is finished
+ retries: 30
+ - name: wait for async job 3
+ async_status: jid={{ async_3.ansible_job_id }}
+ register: job_result
+ until: job_result is finished
+ retries: 30
+
+ always:
+ - name: ensure function is absent at end of test
+ lambda:
+ name: '{{lambda_function_name}}'
+ state: absent
+ ignore_errors: true
+ - name: ensure role has been removed at end of test
+ iam_role:
+ name: '{{ lambda_role_name }}'
+ state: absent
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/aliases
new file mode 100644
index 00000000..a112c3d1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group1
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/defaults/main.yml
new file mode 100644
index 00000000..5bcb2027
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/defaults/main.yml
@@ -0,0 +1,10 @@
+---
+# defaults file for lambda_policy integration test
+lambda_function_name: '{{ resource_prefix }}-api-endpoint'
+# IAM role names have to be less than 64 characters
+# The 8 digit identifier at the end of resource_prefix helps determine during
+# which test something was created and allows tests to be run in parallel
+# Shippable resource_prefixes are in the format shippable-123456-123, so in those cases
+# we need both sets of digits to keep the resource name unique
+unique_id: "{{ resource_prefix | regex_search('(\\d+-?)(\\d+)$') }}"
+lambda_role_name: 'ansible-test-{{ unique_id }}-lambda-policy'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/files/mini_http_lambda.py b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/files/mini_http_lambda.py
new file mode 100644
index 00000000..caccac90
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/files/mini_http_lambda.py
@@ -0,0 +1,40 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def handler(event, context):
+ """
+ The handler function is the function which gets called each time
+ the lambda is run.
+ """
+ # printing goes to the cloudwatch log allowing us to simply debug the lambda if we can find
+ # the log entry.
+ print("got event:\n" + json.dumps(event))
+
+ # if the name parameter isn't present this can throw an exception
+ # which will result in an amazon chosen failure from the lambda
+ # which can be completely fine.
+
+ name = event["pathParameters"]["greet_name"]
+
+ return {"statusCode": 200,
+ "body": 'hello: "' + name + '"',
+ "headers": {}}
+
+
+def main():
+ """
+ This main function will normally never be called during normal
+ lambda use. It is here for testing the lambda program only.
+ """
+ event = {"name": "james"}
+ context = None
+ print(handler(event, context))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/files/minimal_trust_policy.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/files/minimal_trust_policy.json
new file mode 100644
index 00000000..fb84ae9d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/files/minimal_trust_policy.json
@@ -0,0 +1,12 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "lambda.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/tasks/main.yml
new file mode 100644
index 00000000..855e9fba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/tasks/main.yml
@@ -0,0 +1,176 @@
+- name: Integration testing for lambda_policy
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ collections:
+ - community.general
+ - amazon.aws
+
+ block:
+ - name: create minimal lambda role
+ iam_role:
+ name: '{{ lambda_role_name }}'
+ assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json") }}'
+ create_instance_profile: false
+ managed_policies:
+ - 'arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess'
+ register: iam_role
+ - name: wait 10 seconds for role to become available
+ pause:
+ seconds: 10
+ when: iam_role.changed
+
+ - name: test with no parameters
+ module_defaults: { group/aws: {} }
+ lambda_policy: null
+ register: result
+ ignore_errors: true
+ - name: assert failure when called with no parameters
+ assert:
+ that:
+ - result.failed
+ - 'result.msg.startswith("missing required arguments: ")'
+ - name: test with all required dummy parameters but no region
+ module_defaults: { group/aws: {} }
+ lambda_policy:
+ statement_id: dummy
+ principal: api_fakeway
+ action: fake:do_something_fake
+ function_name: dummy_fake_function
+ ignore_errors: true
+ register: result
+ - name: assert failure and appropriate message when called without region
+ assert:
+ that:
+ - result.failed
+ - '"requires a region and none was found" in result.msg'
+ - name: test exceptions generated by forcing bad ec2 url
+ module_defaults: { group/aws: {} }
+ lambda_policy:
+ function_name: '{{ lambda_function_name }}'
+ state: present
+ statement_id: api-gateway-invoke-lambdas
+ action: lambda:InvokeFunction
+ principal: apigateway.amazonaws.com
+ source_arn: arn:aws:execute-api:no-north-0:1234567:*/*
+ ec2_url: https://noexist.example.com
+ ec2_region: no-north-0
+ ec2_access_key: iamnotreallyanaccesskey
+ ec2_secret_key: thisisabadsecretkey
+ security_token: andthisisabadsecuritytoken
+ register: result
+ ignore_errors: true
+ - name: assert lambda manages to respond as expected
+ assert:
+ that:
+ - result is failed
+ - result.msg != "MODULE FAILURE"
+ - result.changed == False
+ - name: move lambda into place for archive module
+ copy:
+ src: mini_http_lambda.py
+ dest: '{{ output_dir }}/mini_http_lambda.py'
+ mode: preserve
+ - name: bundle lambda into a zip
+ register: zip_res
+ archive:
+ format: zip
+ path: '{{ output_dir }}/mini_http_lambda.py'
+ dest: '{{ output_dir }}/mini_http_lambda.zip'
+ - name: create minimal lambda role
+ iam_role:
+ name: ansible_lambda_role
+ assume_role_policy_document: '{{ lookup(''file'', ''minimal_trust_policy.json'', convert_data=False) }}'
+ create_instance_profile: false
+ register: iam_role
+ - name: wait 10 seconds for role to become available
+ pause:
+ seconds: 10
+ when: iam_role.changed
+ - name: test state=present - upload the lambda
+ lambda:
+ name: '{{lambda_function_name}}'
+ runtime: python2.7
+ handler: mini_http_lambda.handler
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{zip_res.dest}}'
+ register: lambda_result
+ - name: get the aws account ID for use in future commands
+ aws_caller_info: {}
+ register: aws_caller_info
+ - name: register lambda uri for use in template
+ set_fact:
+ mini_lambda_uri: arn:aws:apigateway:{{ aws_region }}:lambda:path/2015-03-31/functions/arn:aws:lambda:{{ aws_region }}:{{ aws_caller_info.account }}:function:{{ lambda_result.configuration.function_name }}/invocations
+ - name: build API file
+ template:
+ src: endpoint-test-swagger-api.yml.j2
+ dest: '{{output_dir}}/endpoint-test-swagger-api.yml.j2'
+ - name: deploy new API
+ aws_api_gateway:
+ api_file: '{{output_dir}}/endpoint-test-swagger-api.yml.j2'
+ stage: lambdabased
+ register: create_result
+ - name: register api id for later
+ set_fact:
+ api_id: '{{ create_result.api_id }}'
+ - name: check API fails with permissions failure
+ uri:
+ url: https://{{create_result.api_id}}.execute-api.{{aws_region}}.amazonaws.com/lambdabased/mini/Mr_Ansible_Tester
+ register: unauth_uri_result
+ ignore_errors: true
+ - name: assert internal server error due to permissions
+ assert:
+ that:
+ - unauth_uri_result is failed
+ - unauth_uri_result.status == 500
+ - name: give api gateway execute permissions on lambda
+ lambda_policy:
+ function_name: '{{ lambda_function_name }}'
+ state: present
+ statement_id: api-gateway-invoke-lambdas
+ action: lambda:InvokeFunction
+ principal: apigateway.amazonaws.com
+ source_arn: arn:aws:execute-api:{{ aws_region }}:{{ aws_caller_info.account }}:*/*
+ - name: try again but with ARN
+ lambda_policy:
+ function_name: '{{ lambda_result.configuration.function_arn }}'
+ state: present
+ statement_id: api-gateway-invoke-lambdas
+ action: lambda:InvokeFunction
+ principal: apigateway.amazonaws.com
+ source_arn: arn:aws:execute-api:{{ aws_region }}:{{ aws_caller_info.account }}:*/*
+ - name: check API works with execute permissions
+ uri:
+ url: https://{{create_result.api_id}}.execute-api.{{aws_region}}.amazonaws.com/lambdabased/mini/Mr_Ansible_Tester
+ register: uri_result
+ - name: assert API works success
+ assert:
+ that:
+ - uri_result
+ - name: deploy new API
+ aws_api_gateway:
+ api_file: '{{output_dir}}/endpoint-test-swagger-api.yml.j2'
+ stage: lambdabased
+ register: create_result
+ ignore_errors: true
+ always:
+ - name: destroy lambda for test cleanup if created
+ lambda:
+ name: '{{lambda_function_name}}'
+ state: absent
+ register: result
+ ignore_errors: true
+ - name: destroy API for test cleanup if created
+ aws_api_gateway:
+ state: absent
+ api_id: '{{api_id}}'
+ register: destroy_result
+ ignore_errors: true
+ - name: Clean up test role
+ iam_role:
+ name: '{{ lambda_role_name }}'
+ state: absent
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/templates/endpoint-test-swagger-api.yml.j2 b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/templates/endpoint-test-swagger-api.yml.j2
new file mode 100644
index 00000000..d6218847
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lambda_policy/templates/endpoint-test-swagger-api.yml.j2
@@ -0,0 +1,39 @@
+---
+swagger: "2.0"
+info:
+ version: "2017-05-11T12:14:59Z"
+ title: "{{resource_prefix}}LambdaBased_API"
+host: "fakeexample.execute-api.us-east-1.amazonaws.com"
+basePath: "/lambdabased"
+schemes:
+- "https"
+paths:
+ /mini/{greet_name}:
+ get:
+ produces:
+ - "application/json"
+ parameters:
+ - name: "greet_name"
+ in: "path"
+ required: true
+ type: "string"
+ responses:
+ 200:
+ description: "200 response"
+ schema:
+ $ref: "#/definitions/Empty"
+ x-amazon-apigateway-integration:
+ responses:
+ default:
+ statusCode: "200"
+ uri: "{{mini_lambda_uri}}"
+ requestTemplates:
+ application/json: "{\"statusCode\": 200}"
+ passthroughBehavior: "when_no_match"
+ httpMethod: "POST"
+ contentHandling: "CONVERT_TO_TEXT"
+ type: "aws_proxy"
+definitions:
+ Empty:
+ type: "object"
+ title: "Empty Schema"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lightsail/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lightsail/aliases
new file mode 100644
index 00000000..6e3860be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lightsail/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lightsail/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lightsail/defaults/main.yml
new file mode 100644
index 00000000..46f5b34e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lightsail/defaults/main.yml
@@ -0,0 +1,2 @@
+instance_name: "{{ resource_prefix }}_instance"
+zone: "{{ aws_region }}a"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lightsail/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lightsail/tasks/main.yml
new file mode 100644
index 00000000..91f13a8b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/lightsail/tasks/main.yml
@@ -0,0 +1,122 @@
+---
+
+- module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key | default(omit) }}'
+ aws_secret_key: '{{ aws_secret_key | default(omit) }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region | default(omit) }}'
+
+ block:
+
+ # ==== Tests ===================================================
+
+ - name: Create a new instance
+ lightsail:
+ name: "{{ instance_name }}"
+ zone: "{{ zone }}"
+ blueprint_id: amazon_linux
+ bundle_id: nano_2_0
+ wait: yes
+ register: result
+
+ - assert:
+ that:
+ - result.changed == True
+ - "'instance' in result and result.instance.name == instance_name"
+ - "result.instance.state.name == 'running'"
+
+ - name: Make sure create is idempotent
+ lightsail:
+ name: "{{ instance_name }}"
+ zone: "{{ zone }}"
+ blueprint_id: amazon_linux
+ bundle_id: nano_2_0
+ register: result
+
+ - assert:
+ that:
+ - result.changed == False
+
+ - name: Start the running instance
+ lightsail:
+ name: "{{ instance_name }}"
+ state: running
+ register: result
+
+ - assert:
+ that:
+ - result.changed == False
+
+ - name: Stop the instance
+ lightsail:
+ name: "{{ instance_name }}"
+ state: stopped
+ wait: yes
+ register: result
+
+ - assert:
+ that:
+ - result.changed == True
+ - "result.instance.state.name == 'stopped'"
+
+ - name: Stop the stopped instance
+ lightsail:
+ name: "{{ instance_name }}"
+ state: stopped
+ register: result
+
+ - assert:
+ that:
+ - result.changed == False
+
+ - name: Start the instance
+ lightsail:
+ name: "{{ instance_name }}"
+ state: running
+ register: result
+
+ - assert:
+ that:
+ - result.changed == True
+ - "result.instance.state.name == 'running'"
+
+ - name: Restart the instance
+ lightsail:
+ name: "{{ instance_name }}"
+ state: restarted
+ register: result
+
+ - assert:
+ that:
+ - result.changed == True
+
+ - name: Delete the instance
+ lightsail:
+ name: "{{ instance_name }}"
+ state: absent
+ register: result
+
+ - assert:
+ that:
+ - result.changed == True
+
+ - name: Make sure instance deletion is idempotent
+ lightsail:
+ name: "{{ instance_name }}"
+ state: absent
+ register: result
+
+ - assert:
+ that:
+ - result.changed == False
+
+ # ==== Cleanup ====================================================
+
+ always:
+
+ - name: Cleanup - delete instance
+ lightsail:
+ name: "{{ instance_name }}"
+ state: absent
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/prepare_tests/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/prepare_tests/tasks/main.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/prepare_tests/tasks/main.yml
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/aliases
new file mode 100644
index 00000000..56927195
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/defaults/main.yml
new file mode 100644
index 00000000..a2d215ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/defaults/main.yml
@@ -0,0 +1,23 @@
+---
+instance_id: "{{ resource_prefix }}"
+modified_instance_id: "{{ resource_prefix }}-updated"
+username: test
+password: test12345678
+db_instance_class: db.t2.micro
+storage_encrypted_db_instance_class: db.t2.small
+modified_db_instance_class: db.t2.medium
+allocated_storage: 20
+modified_allocated_storage: 30
+
+# For aurora tests
+cluster_id: "{{ resource_prefix }}-cluster"
+aurora_db_instance_class: db.t2.medium
+
+# For oracle tests
+oracle_ee_db_instance_class: db.r3.xlarge
+processor_features:
+ coreCount: 1
+ threadsPerCore: 1
+modified_processor_features:
+ coreCount: 2
+ threadsPerCore: 2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/credential_tests.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/credential_tests.yml
new file mode 100644
index 00000000..1aa1c3a2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/credential_tests.yml
@@ -0,0 +1,36 @@
+---
+- name: test without credentials
+ rds_instance:
+ db_instance_identifier: test-rds-instance
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result.failed
+ - 'result.msg == "The rds_instance module requires a region and none was found in configuration, environment variables or module parameters"'
+
+- name: test without credentials
+ rds_instance:
+ db_instance_identifier: test-rds-instance
+ region: us-east-1
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result.failed
+ - '"Unable to locate credentials" in result.msg'
+
+- name: test with invalid credentials
+ rds_instance:
+ db_instance_identifier: test-rds-instance
+ region: us-east-1
+ profile: doesnotexist
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result.failed
+ - 'result.msg == "The config profile (doesnotexist) could not be found"'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/main.yml
new file mode 100644
index 00000000..bb368c47
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/main.yml
@@ -0,0 +1,29 @@
+---
+- block:
+
+ - include: ./credential_tests.yml
+ tags: credentials
+ - include: ./test_states.yml
+ tags: states
+ - include: ./test_tags.yml
+ tags: tags
+ - include: ./test_modification.yml # TODO: test availability_zone and multi_az
+ tags: modification
+ - include: ./test_bad_options.yml
+ tags: bad_options
+ - include: ./test_processor_features.yml
+ tags: processor_features
+ - include: ./test_encryption.yml
+ tags: encryption
+ - include: ./test_final_snapshot.yml
+ tags: final_snapshot
+ - include: ./test_read_replica.yml
+ tags: read_replica
+ - include: ./test_vpc_security_groups.yml
+ tags: vpc_security_groups
+ - include: ./test_restore_instance.yml # TODO: snapshot, s3
+ tags: restore
+ - include: ./test_snapshot.yml
+ tags: snapshot
+ # TODO: uncomment after adding rds_cluster module
+ #- include: ./test_aurora.yml
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_aurora.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_aurora.yml
new file mode 100644
index 00000000..14d28b24
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_aurora.yml
@@ -0,0 +1,144 @@
+---
+ - block:
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ ignore_errors: yes
+
+ - name: Create minimal aurora cluster in default VPC and default subnet group
+ rds_cluster:
+ state: present
+ engine: aurora
+ cluster_id: "{{ cluster_id }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ <<: *aws_connection_info
+
+ - name: Create an Aurora instance
+ rds_instance:
+ id: "{{ instance_id }}"
+ cluster_id: "{{ cluster_id }}"
+ engine: aurora
+ state: present
+ db_instance_class: "{{ aurora_db_instance_class }}"
+ tags:
+ CreatedBy: rds_instance integration tests
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - "result.db_instance_identifier == '{{ instance_id }}'"
+ - "result.tags | length == 1"
+
+ - name: Modify tags
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: present
+ tags:
+ Test: rds_instance
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.tags | length == 1
+ - "result.tags.Test == 'rds_instance'"
+
+ - name: Test idempotence
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: present
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: Attempt to modify password (a cluster-managed attribute)
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: present
+ password: "{{ password }}"
+ force_update_password: True
+ apply_immediately: True
+ <<: *aws_connection_info
+ register: result
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result.failed
+ - "'Modify master user password for the DB Cluster using the ModifyDbCluster API' in result.msg"
+ - "'Please see rds_cluster' in result.msg"
+
+ - name: Modify aurora instance port (a cluster-managed attribute)
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: present
+ port: 1150
+ <<: *aws_connection_info
+ register: result
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - not result.changed
+ - "'Modify database endpoint port number for the DB Cluster using the ModifyDbCluster API' in result.msg"
+ - "'Please see rds_cluster' in result.msg"
+
+ - name: Modify Aurora instance identifier
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: present
+ purge_tags: False
+ new_id: "{{ modified_instance_id }}"
+ apply_immediately: True
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - "result.db_instance_identifier == '{{ modified_instance_id }}'"
+
+ always:
+
+ - name: Delete the instance
+ rds_instance:
+ id: "{{ item }}"
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ loop:
+ - "{{ instance_id }}"
+ - "{{ modified_instance_id }}"
+ ignore_errors: yes
+
+ - name: Delete the cluster
+ rds_cluster:
+ cluster_id: "{{ cluster_id }}"
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_bad_options.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_bad_options.yml
new file mode 100644
index 00000000..21de862d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_bad_options.yml
@@ -0,0 +1,41 @@
+---
+ - block:
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ ignore_errors: yes
+
+ - name: Create a DB instance with an invalid engine
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: present
+ engine: thisisnotavalidengine
+ username: "{{ username }}"
+ password: "{{ password }}"
+ db_instance_class: "{{ db_instance_class }}"
+ allocated_storage: "{{ allocated_storage }}"
+ <<: *aws_connection_info
+ register: result
+ ignore_errors: True
+
+ - assert:
+ that:
+ - result.failed
+ - '"DB engine thisisnotavalidengine should be one of" in result.msg'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_encryption.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_encryption.yml
new file mode 100644
index 00000000..dc9a8d96
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_encryption.yml
@@ -0,0 +1,53 @@
+---
+ - block:
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ ignore_errors: yes
+
+ - name: Create a mariadb instance
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: present
+ engine: mariadb
+ username: "{{ username }}"
+ password: "{{ password }}"
+ db_instance_class: "{{ storage_encrypted_db_instance_class }}"
+ allocated_storage: "{{ allocated_storage }}"
+ storage_encrypted: True
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - "result.db_instance_identifier == '{{ instance_id }}'"
+ - result.kms_key_id
+ - result.storage_encrypted == true
+
+ always:
+
+ - name: Delete DB instance
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ register: result
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_final_snapshot.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_final_snapshot.yml
new file mode 100644
index 00000000..bbada420
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_final_snapshot.yml
@@ -0,0 +1,75 @@
+---
+ - block:
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ ignore_errors: yes
+
+ - name: Create a mariadb instance
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: present
+ engine: mariadb
+ username: "{{ username }}"
+ password: "{{ password }}"
+ db_instance_class: "{{ db_instance_class }}"
+ allocated_storage: "{{ allocated_storage }}"
+ <<: *aws_connection_info
+ register: result
+
+ - name: Delete the DB instance
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: absent
+ final_snapshot_identifier: "{{ instance_id }}"
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - "result.final_snapshot.db_instance_identifier == '{{ instance_id }}'"
+
+ - name: Check that snapshot exists
+ rds_snapshot_info:
+ db_snapshot_identifier: "{{ instance_id }}"
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - "result.snapshots | length == 1"
+ - "result.snapshots.0.engine == 'mariadb'"
+
+ always:
+ - name: Remove the snapshot
+ rds_snapshot:
+ db_snapshot_identifier: "{{ instance_id }}"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: Remove the DB instance
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_modification.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_modification.yml
new file mode 100644
index 00000000..ff874447
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_modification.yml
@@ -0,0 +1,195 @@
+---
+ - block:
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ ignore_errors: yes
+
+ - name: Create a mariadb instance
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: present
+ engine: mariadb
+ engine_version: "10.1.26"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ db_instance_class: "{{ db_instance_class }}"
+ allocated_storage: "{{ allocated_storage }}"
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - "result.db_instance_identifier == '{{ instance_id }}'"
+
+ - name: Modify the instance name without immediate application
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: present
+ new_id: "{{ modified_instance_id }}"
+ apply_immediately: False
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - 'result.db_instance_identifier == "{{ instance_id }}"'
+
+ - name: Immediately apply the pending update
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: present
+ new_id: "{{ modified_instance_id }}"
+ apply_immediately: True
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - 'result.db_instance_identifier == "{{ modified_instance_id }}"'
+
+ - name: Modify the instance immediately
+ rds_instance:
+ id: '{{ modified_instance_id }}'
+ state: present
+ new_id: '{{ instance_id }}'
+ apply_immediately: True
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - 'result.db_instance_identifier == "{{ instance_id }}"'
+
+ - name: Check mode - modify the password
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ password: '{{ password }}'
+ force_update_password: True
+ apply_immediately: True
+ <<: *aws_connection_info
+ register: result
+ check_mode: True
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Modify the password
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ password: '{{ password }}'
+ force_update_password: True
+ apply_immediately: True
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+
+ # TODO: test modifying db_subnet_group_name, db_security_groups, db_parameter_group_name, option_group_name,
+ # monitoring_role_arn, monitoring_interval, domain, domain_iam_role_name, cloudwatch_logs_export_configuration
+
+ - name: Modify several attributes
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ allocated_storage: 30
+ db_instance_class: "{{ modified_db_instance_class }}"
+ backup_retention_period: 2
+ preferred_backup_window: "05:00-06:00"
+ preferred_maintenance_window: "mon:06:20-mon:06:50"
+ engine_version: "10.2.21"
+ allow_major_version_upgrade: true
+ auto_minor_version_upgrade: false
+ port: 1150
+ max_allocated_storage: 100
+ apply_immediately: True
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - '"allocated_storage" in result.pending_modified_values or result.allocated_storage == 30'
+ - '"max_allocated_storage" in result.pending_modified_values or result.max_allocated_storage == 100'
+ - '"port" in result.pending_modified_values or result.endpoint.port == 1150'
+ - '"db_instance_class" in result.pending_modified_values or result.db_instance_class == "db.t2.medium"'
+ - '"engine_version" in result.pending_modified_values or result.engine_version == "10.2.21"'
+
+ - name: Idempotence modifying several pending attributes
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ allocated_storage: 30
+ db_instance_class: "{{ modified_db_instance_class }}"
+ backup_retention_period: 2
+ preferred_backup_window: "05:00-06:00"
+ preferred_maintenance_window: "mon:06:20-mon:06:50"
+ engine_version: "10.2.21"
+ allow_major_version_upgrade: true
+ auto_minor_version_upgrade: false
+ port: 1150
+ max_allocated_storage: 100
+ <<: *aws_connection_info
+ register: result
+ retries: 30
+ delay: 10
+ until: result is not failed
+
+ - assert:
+ that:
+ - not result.changed
+ - '"allocated_storage" in result.pending_modified_values or result.allocated_storage == 30'
+ - '"max_allocated_storage" in result.pending_modified_values or result.max_allocated_storage == 100'
+ - '"port" in result.pending_modified_values or result.endpoint.port == 1150'
+ - '"db_instance_class" in result.pending_modified_values or result.db_instance_class == "db.t2.medium"'
+ - '"engine_version" in result.pending_modified_values or result.engine_version == "10.2.21"'
+
+ - name: Delete the instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - '"pending_modified_values" not in result'
+
+ always:
+
+ - name: Delete the instance
+ rds_instance:
+ id: '{{ item }}'
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ loop: ['{{ instance_id }}', '{{ modified_instance_id }}']
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_processor_features.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_processor_features.yml
new file mode 100644
index 00000000..2fb3d895
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_processor_features.yml
@@ -0,0 +1,126 @@
+---
+ - block:
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ ignore_errors: yes
+
+ - name: Create an oracle-ee DB instance
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: present
+ engine: oracle-ee
+ username: "{{ username }}"
+ password: "{{ password }}"
+ db_instance_class: "{{ oracle_ee_db_instance_class }}"
+ allocated_storage: "{{ allocated_storage }}"
+ storage_encrypted: True
+ processor_features: "{{ processor_features }}"
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - 'result.processor_features.coreCount == "{{ processor_features.coreCount }}"'
+ - 'result.processor_features.threadsPerCore == "{{ processor_features.threadsPerCore }}"'
+
+ - name: Check mode - modify the processor features
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: present
+ engine: oracle-ee
+ username: "{{ username }}"
+ password: "{{ password }}"
+ db_instance_class: "{{ oracle_ee_db_instance_class }}"
+ allocated_storage: "{{ allocated_storage }}"
+ storage_encrypted: True
+ processor_features: "{{ modified_processor_features }}"
+ apply_immediately: true
+ <<: *aws_connection_info
+ register: result
+ check_mode: True
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Modify the processor features
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: present
+ engine: oracle-ee
+ username: "{{ username }}"
+ password: "{{ password }}"
+ db_instance_class: "{{ oracle_ee_db_instance_class }}"
+ allocated_storage: "{{ allocated_storage }}"
+ storage_encrypted: True
+ processor_features: "{{ modified_processor_features }}"
+ apply_immediately: true
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - 'result.pending_modified_values.processor_features.coreCount == "{{ modified_processor_features.coreCount }}"'
+ - 'result.pending_modified_values.processor_features.threadsPerCore == "{{ modified_processor_features.threadsPerCore }}"'
+
+ - name: Check mode - use the default processor features
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: present
+ processor_features: {}
+ apply_immediately: True
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Use the default processor features
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: present
+ processor_features: {}
+ apply_immediately: True
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - 'result.pending_modified_values.processor_features.coreCount == "DEFAULT"'
+ - 'result.pending_modified_values.processor_features.threadsPerCore == "DEFAULT"'
+
+ always:
+
+ - name: Delete the DB instance
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_read_replica.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_read_replica.yml
new file mode 100644
index 00000000..3ed091b3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_read_replica.yml
@@ -0,0 +1,162 @@
+---
+ - block:
+
+ - name: set the two regions for the source DB and the replica
+ set_fact:
+ region_src: "{{ aws_region }}"
+ region_dest: "{{ aws_region }}"
+
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ no_log: yes
+
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: absent
+ skip_final_snapshot: True
+ region: "{{ region_src }}"
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ ignore_errors: yes
+
+ - name: Create a source DB instance
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: present
+ engine: mysql
+ backup_retention_period: 1
+ username: "{{ username }}"
+ password: "{{ password }}"
+ db_instance_class: "{{ db_instance_class }}"
+ allocated_storage: "{{ allocated_storage }}"
+ region: "{{ region_src }}"
+ tags:
+ Name: "{{ instance_id }}"
+ Created_by: Ansible rds_instance tests
+ <<: *aws_connection_info
+ register: source_db
+
+ - assert:
+ that:
+ - source_db.changed
+ - "source_db.db_instance_identifier == '{{ instance_id }}'"
+
+ - name: Create a read replica in a different region
+ rds_instance:
+ id: "{{ instance_id }}-replica"
+ state: present
+ source_db_instance_identifier: "{{ instance_id }}"
+ engine: mysql
+ username: "{{ username }}"
+ password: "{{ password }}"
+ read_replica: True
+ db_instance_class: "{{ db_instance_class }}"
+ allocated_storage: "{{ allocated_storage }}"
+ region: "{{ region_dest }}"
+ tags:
+ Name: "{{ instance_id }}"
+ Created_by: Ansible rds_instance tests
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - "result.db_instance_identifier == '{{ instance_id }}-replica'"
+ - "result.tags | length == 2"
+ - "result.tags.Name == '{{ instance_id }}'"
+ - "result.tags.Created_by == 'Ansible rds_instance tests'"
+
+ - name: Test idempotence with a read replica
+ rds_instance:
+ id: "{{ instance_id }}-replica"
+ state: present
+ source_db_instance_identifier: "{{ instance_id }}"
+ engine: mysql
+ username: "{{ username }}"
+ password: "{{ password }}"
+ db_instance_class: "{{ db_instance_class }}"
+ allocated_storage: "{{ allocated_storage }}"
+ region: "{{ region_dest }}"
+ tags:
+ Name: "{{ instance_id }}"
+ Created_by: Ansible rds_instance tests
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: Test idempotence with read_replica=True
+ rds_instance:
+ id: "{{ instance_id }}-replica"
+ state: present
+ read_replica: True
+ source_db_instance_identifier: "{{ instance_id }}"
+ engine: mysql
+ username: "{{ username }}"
+ password: "{{ password }}"
+ db_instance_class: "{{ db_instance_class }}"
+ allocated_storage: "{{ allocated_storage }}"
+ region: "{{ region_dest }}"
+ tags:
+ Name: "{{ instance_id }}"
+ Created_by: Ansible rds_instance tests
+ <<: *aws_connection_info
+ register: result
+
+ - name: Promote the read replica
+ rds_instance:
+ id: "{{ instance_id }}-replica"
+ state: present
+ read_replica: False
+ region: "{{ region_dest }}"
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Test idempotence
+ rds_instance:
+ id: "{{ instance_id }}-replica"
+ state: present
+ read_replica: False
+ region: "{{ region_dest }}"
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+
+ always:
+
+ - name: Remove the DB instance
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: absent
+ skip_final_snapshot: True
+ region: "{{ region_src }}"
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: Remove the DB replica
+ rds_instance:
+ id: "{{ instance_id }}-replica"
+ state: absent
+ skip_final_snapshot: True
+ region: "{{ region_dest }}"
+ <<: *aws_connection_info
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_restore_instance.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_restore_instance.yml
new file mode 100644
index 00000000..b40e487d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_restore_instance.yml
@@ -0,0 +1,95 @@
+---
+ - block:
+
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ ignore_errors: yes
+
+ - name: Create a source DB instance
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: present
+ engine: mysql
+ backup_retention_period: 1
+ username: "{{ username }}"
+ password: "{{ password }}"
+ db_instance_class: "{{ db_instance_class }}"
+ allocated_storage: "{{ allocated_storage }}"
+ <<: *aws_connection_info
+ register: source_db
+
+ - assert:
+ that:
+ - source_db.changed
+ - "source_db.db_instance_identifier == '{{ instance_id }}'"
+
+ - name: Create a point in time DB instance
+ rds_instance:
+ id: "{{ instance_id }}-point-in-time"
+ state: present
+ source_db_instance_identifier: "{{ instance_id }}"
+ creation_source: instance
+ engine: mysql
+ username: "{{ username }}"
+ password: "{{ password }}"
+ db_instance_class: "{{ db_instance_class }}"
+ allocated_storage: "{{ allocated_storage }}"
+ use_latest_restorable_time: True
+ <<: *aws_connection_info
+ register: result
+
+ - name: Test idempotence with a point in time replica
+ rds_instance:
+ id: "{{ instance_id }}-point-in-time"
+ state: present
+ source_db_instance_identifier: "{{ instance_id }}"
+ creation_source: instance
+ engine: mysql
+ username: "{{ username }}"
+ password: "{{ password }}"
+ db_instance_class: "{{ db_instance_class }}"
+ allocated_storage: "{{ allocated_storage }}"
+ restore_time: "{{ result.latest_restorable_time }}"
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+
+ always:
+
+ - name: Remove the DB instance
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+
+ - name: Remove the point in time restored DB
+ rds_instance:
+ id: "{{ instance_id }}-point-in-time"
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_snapshot.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_snapshot.yml
new file mode 100644
index 00000000..7e88db43
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_snapshot.yml
@@ -0,0 +1,85 @@
+---
+ - block:
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: Getting shared snapshots
+ rds_snapshot_info:
+ snapshot_type: "shared"
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ - result.cluster_snapshots is defined
+ - result.snapshots is defined
+
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ db_instance_identifier: "{{ instance_id }}"
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ ignore_errors: yes
+
+ - name: Create a mariadb instance
+ rds_instance:
+ db_instance_identifier: "{{ instance_id }}"
+ state: present
+ engine: mariadb
+ username: "{{ username }}"
+ password: "{{ password }}"
+ db_instance_class: "{{ db_instance_class }}"
+ allocated_storage: "{{ allocated_storage }}"
+ tags:
+ Name: "{{ instance_id }}"
+ Created_by: Ansible rds_instance tests
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - "result.db_instance_identifier == '{{ instance_id }}'"
+ - "result.tags | length == 2"
+ - "result.tags.Name == '{{ instance_id }}'"
+ - "result.tags.Created_by == 'Ansible rds_instance tests'"
+
+ - name: Getting public snapshots
+ rds_snapshot_info:
+ db_instance_identifier: "{{ instance_id }}"
+ snapshot_type: "public"
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ - result.cluster_snapshots is not defined
+ - result.snapshots is defined
+
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ db_instance_identifier: "{{ instance_id }}"
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+
+ # TODO ideally we test with an actual shared snapshot - but we'd need a second account - making tests fairly complicated?
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_states.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_states.yml
new file mode 100644
index 00000000..f55ffe70
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_states.yml
@@ -0,0 +1,277 @@
+---
+ - block:
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ ignore_errors: yes
+
+ - name: Check Mode - Create a mariadb instance
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: present
+ engine: mariadb
+ username: "{{ username }}"
+ password: "{{ password }}"
+ db_instance_class: "{{ db_instance_class }}"
+ allocated_storage: "{{ allocated_storage }}"
+ <<: *aws_connection_info
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Create a mariadb instance
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: present
+ engine: mariadb
+ username: "{{ username }}"
+ password: "{{ password }}"
+ db_instance_class: "{{ db_instance_class }}"
+ allocated_storage: "{{ allocated_storage }}"
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - "result.db_instance_identifier == '{{ instance_id }}'"
+
+ - name: Idempotence
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: mariadb
+ username: "{{ username }}"
+ password: "{{ password }}"
+ db_instance_class: "{{ db_instance_class }}"
+ allocated_storage: "{{ allocated_storage }}"
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ - result.db_instance_identifier
+
+ - name: Idempotence with minimal options
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ - result.db_instance_identifier
+
+ - name: Check Mode - stop the instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: stopped
+ <<: *aws_connection_info
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Stop the instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: stopped
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Check Mode - idempotence
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: stopped
+ <<: *aws_connection_info
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: Idempotence
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: stopped
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: Check mode - reboot a stopped instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: rebooted
+ <<: *aws_connection_info
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Reboot a stopped instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: rebooted
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Check Mode - start the instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: started
+ <<: *aws_connection_info
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: Stop the instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: stopped
+ <<: *aws_connection_info
+
+ - name: Start the instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: started
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: take a snapshot
+ rds_snapshot:
+ db_instance_identifier: '{{ instance_id }}'
+ db_snapshot_identifier: '{{ resource_prefix }}-test-snapshot'
+ state: present
+ wait: yes
+ <<: *aws_connection_info
+
+ - name: take a snapshot - idempotence
+ rds_snapshot:
+ db_instance_identifier: '{{ instance_id }}'
+ db_snapshot_identifier: '{{ resource_prefix }}-test-snapshot'
+ state: present
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: check snapshot is ok
+ rds_snapshot_info:
+ db_snapshot_identifier: '{{ resource_prefix }}-test-snapshot'
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - (result.snapshots | length) == 1
+
+ - name: remove a snapshot without wait
+ rds_snapshot:
+ db_snapshot_identifier: '{{ resource_prefix }}-test-snapshot'
+ state: absent
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: remove a snapshot without wait - idempotence
+ rds_snapshot:
+ db_snapshot_identifier: '{{ resource_prefix }}-test-snapshot'
+ state: absent
+ wait: yes
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: remove a snapshot with wait - idempotence
+ rds_snapshot:
+ db_snapshot_identifier: '{{ resource_prefix }}-test-snapshot'
+ state: absent
+ wait: yes
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: check snapshot is removed
+ rds_snapshot_info:
+ db_snapshot_identifier: '{{ resource_prefix }}-test-snapshot'
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.snapshots
+
+ always:
+
+ - name: remove snapshot
+ rds_snapshot:
+ db_snapshot_identifier: '{{ resource_prefix }}-test-snapshot'
+ state: absent
+ wait: yes
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: Remove DB instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_tags.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_tags.yml
new file mode 100644
index 00000000..f5003ad7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_tags.yml
@@ -0,0 +1,265 @@
+---
+ - block:
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ db_instance_identifier: "{{ instance_id }}"
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ ignore_errors: yes
+
+ - name: Create a mariadb instance
+ rds_instance:
+ db_instance_identifier: "{{ instance_id }}"
+ state: present
+ engine: mariadb
+ username: "{{ username }}"
+ password: "{{ password }}"
+ db_instance_class: "{{ db_instance_class }}"
+ allocated_storage: "{{ allocated_storage }}"
+ tags:
+ Name: "{{ instance_id }}"
+ Created_by: Ansible rds_instance tests
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - "result.db_instance_identifier == '{{ instance_id }}'"
+ - "result.tags | length == 2"
+ - "result.tags.Name == '{{ instance_id }}'"
+ - "result.tags.Created_by == 'Ansible rds_instance tests'"
+
+ - name: Test idempotence omitting tags
+ rds_instance:
+ db_instance_identifier: "{{ instance_id }}"
+ state: present
+ engine: mariadb
+ username: "{{ username }}"
+ password: "{{ password }}"
+ db_instance_class: "{{ db_instance_class }}"
+ allocated_storage: "{{ allocated_storage }}"
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ - "result.tags | length == 2"
+
+ - name: Test tags are not purged if purge_tags is False
+ rds_instance:
+ db_instance_identifier: "{{ instance_id }}"
+ state: present
+ engine: mariadb
+ username: "{{ username }}"
+ password: "{{ password }}"
+ db_instance_class: "{{ db_instance_class }}"
+ allocated_storage: "{{ allocated_storage }}"
+ tags: {}
+ purge_tags: False
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ - "result.tags | length == 2"
+
+ - name: Add a tag and remove a tag
+ rds_instance:
+ db_instance_identifier: "{{ instance_id }}"
+ state: present
+ tags:
+ Name: "{{ instance_id }}-new"
+ Created_by: Ansible rds_instance tests
+ purge_tags: True
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - "result.tags | length == 2"
+ - "result.tags.Name == '{{ instance_id }}-new'"
+
+ - name: Remove all tags
+ rds_instance:
+ db_instance_identifier: "{{ instance_id }}"
+ state: present
+ engine: mariadb
+ username: "{{ username }}"
+ password: "{{ password }}"
+ db_instance_class: "{{ db_instance_class }}"
+ allocated_storage: "{{ allocated_storage }}"
+ tags: {}
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - not result.tags
+
+ - name: snapshot instance without tags
+ rds_snapshot:
+ db_instance_identifier: "{{ instance_id }}"
+ db_snapshot_identifier: "{{ resource_prefix }}-test-tags"
+ state: present
+ wait: yes
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - not result.tags
+
+ - name: add tags to snapshot
+ rds_snapshot:
+ db_instance_identifier: "{{ instance_id }}"
+ db_snapshot_identifier: "{{ resource_prefix }}-test-tags"
+ state: present
+ tags:
+ one: hello
+ two: world
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.tags | length == 2
+
+ - name: add tags to snapshot - idempotence
+ rds_snapshot:
+ db_instance_identifier: "{{ instance_id }}"
+ db_snapshot_identifier: "{{ resource_prefix }}-test-tags"
+ state: present
+ tags:
+ one: hello
+ two: world
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ - result.tags | length == 2
+
+ - name: add tag to snapshot using purge_tags False
+ rds_snapshot:
+ db_instance_identifier: "{{ instance_id }}"
+ db_snapshot_identifier: "{{ resource_prefix }}-test-tags"
+ state: present
+ tags:
+ one: hello
+ three: another
+ purge_tags: False
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.tags | length == 3
+
+ - name: rerun tags but not setting purge_tags
+ rds_snapshot:
+ db_instance_identifier: "{{ instance_id }}"
+ db_snapshot_identifier: "{{ resource_prefix }}-test-tags"
+ state: present
+ tags:
+ one: hello
+ three: another
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.tags | length == 2
+
+ - name: rerun tags but not setting purge_tags - idempotence
+ rds_snapshot:
+ db_instance_identifier: "{{ instance_id }}"
+ db_snapshot_identifier: "{{ resource_prefix }}-test-tags"
+ state: present
+ tags:
+ one: hello
+ three: another
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ - result.tags | length == 2
+
+ - name: remove snapshot
+ rds_snapshot:
+ db_instance_identifier: "{{ instance_id }}"
+ db_snapshot_identifier: "{{ resource_prefix }}-test-tags"
+ state: absent
+ wait: yes
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: create snapshot with tags
+ rds_snapshot:
+ db_instance_identifier: "{{ instance_id }}"
+ db_snapshot_identifier: "{{ resource_prefix }}-test-tags"
+ state: present
+ tags:
+ one: hello
+ three: another
+ purge_tags: yes
+ wait: yes
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.tags | length == 2
+
+ always:
+
+ - name: tidy up snapshot
+ rds_snapshot:
+ db_instance_identifier: "{{ instance_id }}"
+ db_snapshot_identifier: "{{ resource_prefix }}-test-tags"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ db_instance_identifier: "{{ instance_id }}"
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_vpc_security_groups.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_vpc_security_groups.yml
new file mode 100644
index 00000000..53c7e9d0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_instance/tasks/test_vpc_security_groups.yml
@@ -0,0 +1,169 @@
+---
+ - name: rds_instance test_vpc_security_groups tests
+ collections:
+ - amazon.aws
+ block:
+ - name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: create a VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ state: present
+ cidr_block: "10.122.122.128/26"
+ tags:
+ Name: "{{ resource_prefix }}-vpc"
+ Description: "created by rds_instance integration tests"
+ <<: *aws_connection_info
+ register: vpc_result
+
+ - name: create subnets
+ ec2_vpc_subnet:
+ cidr: "{{ item.cidr }}"
+ az: "{{ item.zone }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: "{{ resource_prefix }}-subnet"
+ Description: "created by rds_instance integration tests"
+ state: present
+ <<: *aws_connection_info
+ register: subnets_result
+ loop:
+ - {"cidr": "10.122.122.128/28", "zone": "{{ aws_region }}a"}
+ - {"cidr": "10.122.122.144/28", "zone": "{{ aws_region }}b"}
+ - {"cidr": "10.122.122.160/28", "zone": "{{ aws_region }}c"}
+ - {"cidr": "10.122.122.176/28", "zone": "{{ aws_region }}d"}
+
+ - name: Create security groups
+ ec2_group:
+ name: "{{ item }}"
+ description: "created by rds_instance integration tests"
+ state: present
+ <<: *aws_connection_info
+ register: sgs_result
+ loop:
+ - "{{ resource_prefix }}-sg-1"
+ - "{{ resource_prefix }}-sg-2"
+ - "{{ resource_prefix }}-sg-3"
+
+ - debug: var=sgs_result
+
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ ignore_errors: yes
+
+ - name: Create a DB instance in the VPC with two security groups
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: present
+ engine: mariadb
+ username: "{{ username }}"
+ password: "{{ password }}"
+ db_instance_class: "{{ db_instance_class }}"
+ allocated_storage: "{{ allocated_storage }}"
+ vpc_security_group_ids:
+ - "{{ sgs_result.results.0.group_id }}"
+ - "{{ sgs_result.results.1.group_id }}"
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - "result.db_instance_identifier == '{{ instance_id }}'"
+
+ - name: Add a new security group
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: present
+ vpc_security_group_ids:
+ - "{{ sgs_result.results.2.group_id }}"
+ <<: *aws_connection_info
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+
+ always:
+
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ register: result
+ ignore_errors: yes
+
+ - name: Remove security groups
+ ec2_group:
+ name: "{{ item }}"
+ description: "created by rds_instance integration tests"
+ state: absent
+ <<: *aws_connection_info
+ register: sgs_result
+ loop:
+ - "{{ resource_prefix }}-sg-1"
+ - "{{ resource_prefix }}-sg-2"
+ - "{{ resource_prefix }}-sg-3"
+
+ - name: remove subnets
+ ec2_vpc_subnet:
+ cidr: "{{ item.cidr }}"
+ az: "{{ item.zone }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: "{{ resource_prefix }}-subnet"
+ Description: "created by rds_instance integration tests"
+ state: absent
+ <<: *aws_connection_info
+ register: subnets
+ ignore_errors: yes
+ retries: 30
+ until: subnets is not failed
+ delay: 10
+ loop:
+ - {"cidr": "10.122.122.128/28", "zone": "{{ aws_region }}a"}
+ - {"cidr": "10.122.122.144/28", "zone": "{{ aws_region }}b"}
+ - {"cidr": "10.122.122.160/28", "zone": "{{ aws_region }}c"}
+ - {"cidr": "10.122.122.176/28", "zone": "{{ aws_region }}d"}
+
+ - name: create a VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ state: absent
+ cidr_block: "10.122.122.128/26"
+ tags:
+ Name: "{{ resource_prefix }}-vpc"
+ Description: "created by rds_instance integration tests"
+ <<: *aws_connection_info
+ register: vpc_result
+ ignore_errors: yes
+ retries: 30
+ until: vpc_result is not failed
+ delay: 10
+
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: absent
+ skip_final_snapshot: True
+ <<: *aws_connection_info
+ register: result
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_param_group/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_param_group/aliases
new file mode 100644
index 00000000..6e3860be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_param_group/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_param_group/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_param_group/defaults/main.yml
new file mode 100644
index 00000000..8f9de71f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_param_group/defaults/main.yml
@@ -0,0 +1,30 @@
+---
+rds_param_group:
+ name: "{{ resource_prefix}}rds-param-group"
+ description: "Test group for rds_param_group Ansible module"
+ engine: postgres9.6
+
+rds_long_param_list:
+ application_name: Test
+ logging_collector: on
+ log_directory: /var/log/postgresql
+ log_filename: postgresql.log.%Y-%m-%d-%H
+ log_file_mode: 0600
+ event_source: RDS
+ log_min_messages: INFO
+ log_min_duration_statement: 500
+ log_rotation_age: 60
+ debug_print_parse: on
+ debug_print_rewritten: on
+ debug_print_plan: on
+ debug_pretty_print: on
+ log_checkpoints: on
+ log_connections: on
+ log_disconnections: on
+ log_duration: on
+ log_error_verbosity: VERBOSE
+ log_lock_waits: on
+ log_temp_files: 10K
+ log_timezone: UTC
+ log_statement: 'all'
+ log_replication_commands: on
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_param_group/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_param_group/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_param_group/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_param_group/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_param_group/tasks/main.yml
new file mode 100644
index 00000000..9af2776b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_param_group/tasks/main.yml
@@ -0,0 +1,321 @@
+---
+# A Note about ec2 environment variable name preference:
+# - EC2_URL -> AWS_URL
+# - EC2_ACCESS_KEY -> AWS_ACCESS_KEY_ID -> AWS_ACCESS_KEY
+# - EC2_SECRET_KEY -> AWS_SECRET_ACCESS_KEY -> AWX_SECRET_KEY
+# - EC2_REGION -> AWS_REGION
+#
+# TODO - name: test 'region' parameter
+# TODO - name: test 'state=absent' parameter for existing key
+# TODO - name: test 'state=absent' parameter for missing key
+# TODO - name: test 'validate_certs' parameter
+
+# ============================================================
+# - include: ../../setup_ec2/tasks/common.yml module_name=rds_param_group
+
+- block:
+
+ # ============================================================
+ - name: test empty parameter group
+ rds_param_group:
+ name: "{{ rds_param_group.name }}"
+ engine: "{{ rds_param_group.engine }}"
+ description: "{{ rds_param_group.description }}"
+ state: present
+ region: "{{ ec2_region }}"
+ ec2_access_key: '{{ aws_access_key }}'
+ ec2_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: result
+ ignore_errors: true
+
+ - name: assert rds parameter group changed
+ assert:
+ that:
+ - 'result.changed'
+ - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name | lower }}'"
+ - 'result.tags == {}'
+
+ # ============================================================
+ - name: test empty parameter group with no arguments changes nothing
+ rds_param_group:
+ name: "{{ rds_param_group.name }}"
+ engine: "{{ rds_param_group.engine }}"
+ description: "{{ rds_param_group.description }}"
+ state: present
+ region: "{{ ec2_region }}"
+ ec2_access_key: '{{ aws_access_key }}'
+ ec2_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: result
+ ignore_errors: true
+
+ - name: assert no change when running empty parameter group a second time
+ assert:
+ that:
+ - 'not result.changed'
+
+ # ============================================================
+ - name: test adding numeric tag
+ rds_param_group:
+ name: "{{ rds_param_group.name }}"
+ engine: "{{ rds_param_group.engine }}"
+ description: "{{ rds_param_group.description }}"
+ state: present
+ tags:
+ Environment: test
+ Test: 123
+ region: "{{ ec2_region }}"
+ ec2_access_key: '{{ aws_access_key }}'
+ ec2_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: result
+ ignore_errors: true
+
+ - name: adding numeric tag just silently converts
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.tags.Test == "123"'
+
+ # ============================================================
+ - name: test tagging existing group
+ rds_param_group:
+ name: "{{ rds_param_group.name }}"
+ engine: "{{ rds_param_group.engine }}"
+ description: "{{ rds_param_group.description }}"
+ state: present
+ tags:
+ Environment: test
+ Test: "123"
+ NewTag: "hello"
+ region: "{{ ec2_region }}"
+ ec2_access_key: '{{ aws_access_key }}'
+ ec2_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: result
+ ignore_errors: true
+
+ - name: assert tagging existing group changes it and adds tags
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.tags.NewTag == "hello"'
+
+ # ============================================================
+ - name: test repeating tagging existing group
+ rds_param_group:
+ name: "{{ rds_param_group.name }}"
+ engine: "{{ rds_param_group.engine }}"
+ description: "{{ rds_param_group.description }}"
+ state: present
+ tags:
+ Environment: test
+ Test: "123"
+ NewTag: "hello"
+ region: "{{ ec2_region }}"
+ ec2_access_key: '{{ aws_access_key }}'
+ ec2_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: result
+ ignore_errors: true
+
+ - name: assert tagging existing group changes it and adds tags
+ assert:
+ that:
+ - 'not result.changed'
+ - 'result.tags.Test == "123"'
+
+ # ============================================================
+ - name: test deleting tags from existing group
+ rds_param_group:
+ name: "{{ rds_param_group.name }}"
+ engine: "{{ rds_param_group.engine }}"
+ description: "{{ rds_param_group.description }}"
+ state: present
+ tags:
+ Environment: test
+ region: "{{ ec2_region }}"
+ ec2_access_key: '{{ aws_access_key }}'
+ ec2_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ purge_tags: yes
+ register: result
+ ignore_errors: true
+
+ - name: assert removing tags from existing group changes it
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.tags.Environment == "test"'
+ - '"NewTag" not in result.tags'
+
+ # ============================================================
+ - name: test state=absent with engine defined (expect changed=true)
+ rds_param_group:
+ name: "{{ rds_param_group.name }}"
+ engine: "{{ rds_param_group.engine }}"
+ state: absent
+ region: "{{ ec2_region }}"
+ ec2_access_key: '{{ aws_access_key }}'
+ ec2_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: result
+ ignore_errors: true
+
+ - name: assert state=absent with engine defined (expect changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test creating group with parameters
+ rds_param_group:
+ name: "{{ rds_param_group.name }}"
+ engine: "{{ rds_param_group.engine }}"
+ description: "{{ rds_param_group.description }}"
+ state: present
+ params:
+ log_directory: /var/log/postgresql
+ log_statement: 'all'
+ log_duration: on
+ this_param_does_not_exist: oh_no
+ tags:
+ Environment: test
+ Test: "123"
+ region: "{{ ec2_region }}"
+ ec2_access_key: '{{ aws_access_key }}'
+ ec2_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: result
+ ignore_errors: true
+
+ - name: assert creating a new group with parameter changes it
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.tags.Test == "123"'
+ - 'result.errors|length == 2'
+
+ # ============================================================
+ - name: test repeating group with parameters
+ rds_param_group:
+ name: "{{ rds_param_group.name }}"
+ engine: "{{ rds_param_group.engine }}"
+ description: "{{ rds_param_group.description }}"
+ state: present
+ params:
+ log_directory: /var/log/postgresql
+ log_statement: 'all'
+ log_duration: on
+ this_param_does_not_exist: oh_no
+ tags:
+ Environment: test
+ Test: "123"
+ region: "{{ ec2_region }}"
+ ec2_access_key: '{{ aws_access_key }}'
+ ec2_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: result
+ ignore_errors: true
+
+ - name: assert repeating group with parameters does not change it
+ assert:
+ that:
+ - 'not result.changed'
+ - 'result.tags.Test == "123"'
+ - 'result.errors|length == 2'
+
+ # ============================================================
+ - name: test state=absent with engine defined (expect changed=true)
+ rds_param_group:
+ name: "{{ rds_param_group.name }}"
+ engine: "{{ rds_param_group.engine }}"
+ state: absent
+ region: "{{ ec2_region }}"
+ ec2_access_key: '{{ aws_access_key }}'
+ ec2_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: result
+ ignore_errors: true
+
+ - name: assert state=absent with engine defined (expect changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test repeating state=absent (expect changed=false)
+ rds_param_group:
+ name: "{{ rds_param_group.name }}"
+ engine: "{{ rds_param_group.engine }}"
+ state: absent
+ region: "{{ ec2_region }}"
+ ec2_access_key: '{{ aws_access_key }}'
+ ec2_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: result
+ ignore_errors: true
+
+ - name: assert repeating state=absent (expect changed=false)
+ assert:
+ that:
+ - 'not result.changed'
+
+ # ============================================================
+ - name: test creating group with more than 20 parameters
+ rds_param_group:
+ name: "{{ rds_param_group.name }}"
+ engine: "{{ rds_param_group.engine }}"
+ description: "{{ rds_param_group.description }}"
+ params: "{{ rds_long_param_list }}"
+ state: present
+ region: "{{ ec2_region }}"
+ ec2_access_key: '{{ aws_access_key }}'
+ ec2_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: result
+ ignore_errors: true
+
+ - name: assert creating a new group with lots of parameter changes it
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test creating group with more than 20 parameters
+ rds_param_group:
+ name: "{{ rds_param_group.name }}"
+ engine: "{{ rds_param_group.engine }}"
+ description: "{{ rds_param_group.description }}"
+ params: "{{ rds_long_param_list }}"
+ region: "{{ ec2_region }}"
+ state: present
+ ec2_access_key: '{{ aws_access_key }}'
+ ec2_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: result
+ ignore_errors: true
+
+ - name: assert repeating a group with lots of parameter does not change it
+ assert:
+ that:
+ - 'not result.changed'
+
+ always:
+ # ============================================================
+ - name: test state=absent (expect changed=false)
+ rds_param_group:
+ name: "{{ rds_param_group.name }}"
+ state: absent
+ region: "{{ ec2_region }}"
+ ec2_access_key: '{{ aws_access_key }}'
+ ec2_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: result
+ ignore_errors: true
+
+ - name: assert state=absent (expect changed=false)
+ assert:
+ that:
+ - 'result.changed'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_subnet_group/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_subnet_group/aliases
new file mode 100644
index 00000000..6e3860be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_subnet_group/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_subnet_group/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_subnet_group/defaults/main.yml
new file mode 100644
index 00000000..07e0fe93
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_subnet_group/defaults/main.yml
@@ -0,0 +1,8 @@
+vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16'
+subnet_a: '10.{{ 256 | random(seed=resource_prefix) }}.10.0/24'
+subnet_b: '10.{{ 256 | random(seed=resource_prefix) }}.11.0/24'
+subnet_c: '10.{{ 256 | random(seed=resource_prefix) }}.12.0/24'
+subnet_d: '10.{{ 256 | random(seed=resource_prefix) }}.13.0/24'
+
+group_description: 'Created by integration test : {{ resource_prefix }}'
+group_description_changed: 'Created by integration test : {{ resource_prefix }} - changed'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_subnet_group/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_subnet_group/meta/main.yml
new file mode 100644
index 00000000..9d91be17
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_subnet_group/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+- prepare_tests
+- setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_subnet_group/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_subnet_group/tasks/main.yml
new file mode 100644
index 00000000..664d78ed
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_subnet_group/tasks/main.yml
@@ -0,0 +1,115 @@
+---
+# Tests for rds_subnet_group
+#
+# Note: (From Amazon's documentation)
+# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.modify_db_subnet_group
+# DB subnet groups must contain at least one subnet in at least two AZs in the
+# AWS Region.
+
+- module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ collections:
+ - amazon.aws
+ block:
+
+ # ============================================================
+
+ - name: 'Fetch AZ availability'
+ aws_az_info:
+ register: az_info
+
+ - name: 'Assert that we have multiple AZs available to us'
+ assert:
+ that: az_info.availability_zones | length >= 2
+
+ - name: 'Pick AZs'
+ set_fact:
+ az_one: '{{ az_info.availability_zones[0].zone_name }}'
+ az_two: '{{ az_info.availability_zones[1].zone_name }}'
+
+ # ============================================================
+
+ - name: 'Create a VPC'
+ ec2_vpc_net:
+ state: present
+ cidr_block: '{{ vpc_cidr }}'
+ name: '{{ resource_prefix }}'
+ register: vpc
+
+ - name: 'Create subnets'
+ ec2_vpc_subnet:
+ state: present
+ cidr: '{{ item.cidr }}'
+ az: '{{ item.az }}'
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Name: '{{ item.name }}'
+ with_items:
+ - cidr: '{{ subnet_a }}'
+ az: '{{ az_one }}'
+ name: '{{ resource_prefix }}-subnet-a'
+ - cidr: '{{ subnet_b }}'
+ az: '{{ az_two }}'
+ name: '{{ resource_prefix }}-subnet-b'
+ - cidr: '{{ subnet_c }}'
+ az: '{{ az_one }}'
+ name: '{{ resource_prefix }}-subnet-c'
+ - cidr: '{{ subnet_d }}'
+ az: '{{ az_two }}'
+ name: '{{ resource_prefix }}-subnet-d'
+ register: subnets
+
+ - set_fact:
+ subnet_ids: '{{ subnets | community.general.json_query("results[].subnet.id") | list }}'
+
+ # ============================================================
+
+ - include_tasks: 'params.yml'
+
+ - include_tasks: 'tests.yml'
+
+ # ============================================================
+
+ always:
+ - name: 'Remove subnet group'
+ rds_subnet_group:
+ state: absent
+ name: '{{ resource_prefix }}'
+ ignore_errors: yes
+
+ - name: 'Remove subnets'
+ ec2_vpc_subnet:
+ state: absent
+ cidr: '{{ item.cidr }}'
+ vpc_id: '{{ vpc.vpc.id }}'
+ with_items:
+ - cidr: '{{ subnet_a }}'
+ name: '{{ resource_prefix }}-subnet-a'
+ - cidr: '{{ subnet_b }}'
+ name: '{{ resource_prefix }}-subnet-b'
+ - cidr: '{{ subnet_c }}'
+ name: '{{ resource_prefix }}-subnet-c'
+ - cidr: '{{ subnet_d }}'
+ name: '{{ resource_prefix }}-subnet-d'
+ ignore_errors: yes
+ register: removed_subnets
+ until: removed_subnets is succeeded
+ retries: 5
+ delay: 5
+
+ - name: 'Remove the VPC'
+ ec2_vpc_net:
+ state: absent
+ cidr_block: '{{ vpc_cidr }}'
+ name: '{{ resource_prefix }}'
+ ignore_errors: yes
+ register: removed_vpc
+ until: removed_vpc is success
+ retries: 5
+ delay: 5
+
+ # ============================================================
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_subnet_group/tasks/params.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_subnet_group/tasks/params.yml
new file mode 100644
index 00000000..e6b042f7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_subnet_group/tasks/params.yml
@@ -0,0 +1,30 @@
+---
+# Try creating without a description
+- name: "Create a subnet group (no description)"
+ rds_subnet_group:
+ state: present
+ name: "{{ resource_prefix }}"
+ subnets:
+ - "{{ subnet_ids[0] }}"
+ - "{{ subnet_ids[1] }}"
+ ignore_errors: yes
+ register: create_missing_param
+- assert:
+ that:
+ - create_missing_param is failed
+ - "'description' in create_missing_param.msg"
+ - "'state is present but all of the following are missing' in create_missing_param.msg"
+
+# Try creating without subnets
+- name: "Create a subnet group (no subnets)"
+ rds_subnet_group:
+ state: present
+ name: "{{ resource_prefix }}"
+ description: "{{ group_description }}"
+ ignore_errors: yes
+ register: create_missing_param
+- assert:
+ that:
+ - create_missing_param is failed
+ - "'subnets' in create_missing_param.msg"
+ - "'state is present but all of the following are missing' in create_missing_param.msg"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_subnet_group/tasks/tests.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_subnet_group/tasks/tests.yml
new file mode 100644
index 00000000..0b4e3d1b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/rds_subnet_group/tasks/tests.yml
@@ -0,0 +1,221 @@
+---
+# XXX rds_subnet_group doesn't support check_mode yet
+
+# ============================================================
+# Basic creation
+- name: 'Create a subnet group'
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.subnet_group.description == group_description
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 2
+ - subnet_ids[0] in result.subnet_group.subnet_ids
+ - subnet_ids[1] in result.subnet_group.subnet_ids
+
+- name: 'Create a subnet group (idempotency)'
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.subnet_group.description == group_description
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 2
+ - subnet_ids[0] in result.subnet_group.subnet_ids
+ - subnet_ids[1] in result.subnet_group.subnet_ids
+
+# ============================================================
+# Update description
+
+- name: 'Update subnet group description'
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description_changed }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.subnet_group.description == group_description_changed
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 2
+ - subnet_ids[0] in result.subnet_group.subnet_ids
+ - subnet_ids[1] in result.subnet_group.subnet_ids
+
+- name: 'Update subnet group description (idempotency)'
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description_changed }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.subnet_group.description == group_description_changed
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 2
+ - subnet_ids[0] in result.subnet_group.subnet_ids
+ - subnet_ids[1] in result.subnet_group.subnet_ids
+
+- name: 'Restore subnet group description'
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.subnet_group.description == group_description
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 2
+ - subnet_ids[0] in result.subnet_group.subnet_ids
+ - subnet_ids[1] in result.subnet_group.subnet_ids
+
+# ============================================================
+# Update subnets
+
+- name: 'Update subnet group list'
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description }}'
+ subnets:
+ - '{{ subnet_ids[2] }}'
+ - '{{ subnet_ids[3] }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.subnet_group.description == group_description
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 2
+ - subnet_ids[2] in result.subnet_group.subnet_ids
+ - subnet_ids[3] in result.subnet_group.subnet_ids
+
+- name: 'Update subnet group list (idempotency)'
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description }}'
+ subnets:
+ - '{{ subnet_ids[2] }}'
+ - '{{ subnet_ids[3] }}'
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.subnet_group.description == group_description
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 2
+ - subnet_ids[2] in result.subnet_group.subnet_ids
+ - subnet_ids[3] in result.subnet_group.subnet_ids
+
+- name: 'Add more subnets subnet group list'
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ - '{{ subnet_ids[2] }}'
+ - '{{ subnet_ids[3] }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.subnet_group.description == group_description
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 4
+ - subnet_ids[0] in result.subnet_group.subnet_ids
+ - subnet_ids[1] in result.subnet_group.subnet_ids
+ - subnet_ids[2] in result.subnet_group.subnet_ids
+ - subnet_ids[3] in result.subnet_group.subnet_ids
+
+- name: 'Add more members to subnet group list (idempotency)'
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ - '{{ subnet_ids[2] }}'
+ - '{{ subnet_ids[3] }}'
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.subnet_group.description == group_description
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 4
+ - subnet_ids[0] in result.subnet_group.subnet_ids
+ - subnet_ids[1] in result.subnet_group.subnet_ids
+ - subnet_ids[2] in result.subnet_group.subnet_ids
+ - subnet_ids[3] in result.subnet_group.subnet_ids
+
+# ============================================================
+# Deletion
+
+- name: 'Delete a subnet group'
+ rds_subnet_group:
+ state: absent
+ name: '{{ resource_prefix }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: 'Delete a subnet group (idempotency)'
+ rds_subnet_group:
+ state: absent
+ name: '{{ resource_prefix }}'
+ register: result
+
+- assert:
+ that:
+ - result is not changed
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/redshift/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/redshift/aliases
new file mode 100644
index 00000000..a112c3d1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/redshift/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group1
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/redshift/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/redshift/defaults/main.yml
new file mode 100644
index 00000000..f1cd2cb1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/redshift/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+# defaults file for test_redshift
+redshift_cluster_name: '{{ resource_prefix }}'
+reshift_master_password: "th1s_is_A_test"
+redshift_master_username: "master_user"
+node_type: "dc2.large"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/redshift/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/redshift/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/redshift/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/redshift/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/redshift/tasks/main.yml
new file mode 100644
index 00000000..83504b29
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/redshift/tasks/main.yml
@@ -0,0 +1,357 @@
+---
+# A Note about ec2 environment variable name preference:
+# - EC2_URL -> AWS_URL
+# - EC2_ACCESS_KEY -> AWS_ACCESS_KEY_ID -> AWS_ACCESS_KEY
+# - EC2_SECRET_KEY -> AWS_SECRET_ACCESS_KEY -> AWX_SECRET_KEY
+# - EC2_REGION -> AWS_REGION
+#
+
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+
+ # ============================================================
+ - name: test failure with no parameters
+ redshift:
+ register: result
+ ignore_errors: true
+
+ - name: assert failure with no parameters
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "missing required arguments: command, identifier"'
+
+ # ============================================================
+ - name: test failure with only identifier
+ redshift:
+ identifier: '{{ redshift_cluster_name }}'
+ register: result
+ ignore_errors: true
+
+ - name: assert failure with only identifier
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "missing required arguments: command"'
+
+ # ============================================================
+ - name: test create with no identifier
+ redshift:
+ command: create
+ register: result
+ ignore_errors: true
+
+ - name: assert failure with no identifier
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "missing required arguments: identifier"'
+
+ # ============================================================
+ - name: test create with missing node_type
+ redshift:
+ command: create
+ identifier: "{{ redshift_cluster_name }}"
+ register: result
+ ignore_errors: true
+
+ - name: assert failure with missing node_type
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "command is create but all of the following are missing: node_type, username, password"'
+
+ # ============================================================
+
+ - name: test create with missing password
+ redshift:
+ command: create
+ identifier: "{{ redshift_cluster_name }}"
+ username: "{{ redshift_master_username }}"
+ register: result
+ ignore_errors: true
+
+ - name: assert create failure with missing password
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "command is create but all of the following are missing: node_type, password"'
+
+ # ============================================================
+
+ - name: test create with missing username
+ redshift:
+ command: create
+ identifier: "{{ redshift_cluster_name }}"
+ password: "{{ reshift_master_password }}"
+ register: result
+ ignore_errors: true
+
+ - name: assert create failure with missing username
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "command is create but all of the following are missing: node_type, username"'
+
+ # ============================================================
+
+ - name: test create with default params
+ redshift:
+ command: create
+ identifier: "{{ redshift_cluster_name }}"
+ username: "{{ redshift_master_username }}"
+ password: "{{ reshift_master_password }}"
+ node_type: "{{ node_type }}"
+ wait: yes
+ wait_timeout: 1000
+ tags:
+ foo: bar
+ Tizio: Caio
+ register: result
+ - debug:
+ msg: "{{ result }}"
+ verbosity: 1
+ - name: assert create success
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.cluster.identifier == "{{ redshift_cluster_name }}"'
+ - 'result.cluster.tags.foo == "bar"'
+ - 'result.cluster.tags.Tizio == "Caio"'
+
+ # ============================================================
+
+ - name: test create again with default params
+ redshift:
+ command: create
+ identifier: "{{ redshift_cluster_name }}"
+ username: "{{ redshift_master_username }}"
+ password: "{{ reshift_master_password }}"
+ node_type: "{{ node_type }}"
+ tags:
+ foo: bar
+ Tizio: Caio
+ register: result
+
+ - name: assert no change gets made to the existing cluster
+ assert:
+ that:
+ - 'not result.changed'
+ - 'result.cluster.identifier == "{{ redshift_cluster_name }}"'
+ - 'result.cluster.tags.foo == "bar"'
+ - 'result.cluster.tags.Tizio == "Caio"'
+ - 'result.cluster.tags | count() == 2'
+
+ # ============================================================
+
+ - name: test modify cluster
+ redshift:
+ command: modify
+ identifier: "{{ redshift_cluster_name }}"
+ new_cluster_identifier: "{{ redshift_cluster_name }}-modified"
+ enhanced_vpc_routing: True
+ wait: yes
+ wait_timeout: 1000
+ tags:
+ foo: bar
+ register: result
+
+ - name: assert cluster was modified
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.cluster.identifier == "{{ redshift_cluster_name }}-modified"'
+ - 'result.cluster.enhanced_vpc_routing == True'
+ - 'result.cluster.tags | count() == 1'
+ - 'result.cluster.tags.foo == "bar"'
+
+ # ============================================================
+ - name: test delete with no cluster identifier
+ redshift:
+ command: delete
+ register: result
+ ignore_errors: true
+
+ - name: assert failure with no identifier
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "missing required arguments: identifier"'
+
+ # ============================================================
+ - name: test delete with no snapshot id
+ redshift:
+ command: delete
+ identifier: "{{ redshift_cluster_name }}"
+ register: result
+ ignore_errors: true
+
+ - name: assert failure for no snapshot identifier
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "Need to specify final_cluster_snapshot_identifier if skip_final_cluster_snapshot is False"'
+
+
+ # ============================================================
+ - name: test successful delete
+ redshift:
+ command: delete
+ identifier: "{{ redshift_cluster_name }}-modified"
+ skip_final_cluster_snapshot: true
+ wait: yes
+ wait_timeout: 1200
+ register: result
+
+ - name: assert delete
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+
+ - name: test create multi-node cluster with custom db-name
+ redshift:
+ command: create
+ identifier: "{{ redshift_cluster_name }}"
+ username: "{{ redshift_master_username }}"
+ password: "{{ reshift_master_password }}"
+ node_type: "{{ node_type }}"
+ cluster_type: multi-node
+ number_of_nodes: 3
+ wait: yes
+ db_name: "integration_test"
+ wait_timeout: 1800
+ register: result
+
+ - name: assert create
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.cluster.identifier == "{{ redshift_cluster_name }}"'
+ - 'result.cluster.db_name == "integration_test"'
+
+ # ============================================================
+
+ - name: test tag update on existing cluster
+ redshift:
+ command: create
+ identifier: "{{ redshift_cluster_name }}"
+ username: "{{ redshift_master_username }}"
+ password: "{{ reshift_master_password }}"
+ node_type: "{{ node_type }}"
+ cluster_type: multi-node
+ number_of_nodes: 3
+ wait: yes
+ db_name: "integration_test"
+ tags:
+ foo: bar
+ wait_timeout: 1800
+ register: result
+
+
+ - name: assert tags change
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.cluster.identifier == "{{ redshift_cluster_name }}"'
+ - 'result.cluster.db_name == "integration_test"'
+ - 'result.cluster.tags.foo == "bar"'
+
+
+ # ============================================================
+
+ - name: test purge tags
+ redshift:
+ command: create
+ identifier: "{{ redshift_cluster_name }}"
+ username: "{{ redshift_master_username }}"
+ password: "{{ reshift_master_password }}"
+ node_type: "{{ node_type }}"
+ cluster_type: multi-node
+ number_of_nodes: 3
+ wait: yes
+ db_name: "integration_test"
+ tags:
+ test1: value1
+ purge_tags: false
+ wait_timeout: 1800
+ register: result
+
+
+ - name: assert tags change
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.cluster.identifier == "{{ redshift_cluster_name }}"'
+ - 'result.cluster.db_name == "integration_test"'
+ - 'result.cluster.tags.test1 == "value1"'
+ - 'result.cluster.tags.foo == "bar"'
+ - 'result.cluster.tags | count() == 2'
+
+
+
+ # ============================================================
+
+ - name: test no change to tags when tags is None
+ redshift:
+ command: create
+ identifier: "{{ redshift_cluster_name }}"
+ username: "{{ redshift_master_username }}"
+ password: "{{ reshift_master_password }}"
+ node_type: "{{ node_type }}"
+ cluster_type: multi-node
+ number_of_nodes: 3
+ wait: yes
+ db_name: "integration_test"
+ wait_timeout: 1800
+ register: result
+
+
+ - name: assert create
+ assert:
+ that:
+ - 'not result.changed'
+ - 'result.cluster.identifier == "{{ redshift_cluster_name }}"'
+ - 'result.cluster.db_name == "integration_test"'
+ - 'result.cluster.tags | count() == 2'
+
+
+ # ============================================================
+
+ - name: test successful delete of multi-node cluster
+ redshift:
+ command: delete
+ identifier: "{{ redshift_cluster_name }}"
+ skip_final_cluster_snapshot: true
+ wait: yes
+ wait_timeout: 1200
+ register: result
+
+ - name: assert delete
+ assert:
+ that:
+ - 'result.changed'
+
+ always:
+
+ - name: Remove cluster if tests failed
+ redshift:
+ command: delete
+ identifier: "{{ item }}"
+ skip_final_cluster_snapshot: true
+ wait: yes
+ wait_timeout: 1200
+ register: cleanup
+ ignore_errors: yes
+ retries: 10
+ delay: 10
+ until: cleanup is success
+ loop:
+ - "{{ redshift_cluster_name }}"
+ - "{{ redshift_cluster_name }}-modified"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/route53/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/route53/aliases
new file mode 100644
index 00000000..f6cc7ad0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/route53/aliases
@@ -0,0 +1,3 @@
+route53_info
+cloud/aws
+shippable/aws/group2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/route53/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/route53/defaults/main.yml
new file mode 100644
index 00000000..cc0d3b78
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/route53/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+# defaults file for route53 tests
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/route53/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/route53/tasks/main.yml
new file mode 100644
index 00000000..de332a7b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/route53/tasks/main.yml
@@ -0,0 +1,252 @@
+---
+# tasks file for Route53 integration tests
+
+- set_fact:
+ zone_one: '{{ resource_prefix | replace("-", "") }}.one.fakeansible.com.'
+ zone_two: '{{ resource_prefix | replace("-", "") }}.two.fakeansible.com.'
+- debug: msg='Set zones {{ zone_one }} and {{ zone_two }}'
+
+- name: Test basics (new zone, A and AAAA records)
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ route53:
+ region: null
+ block:
+ - route53_zone:
+ zone: '{{ zone_one }}'
+ comment: Created in Ansible test {{ resource_prefix }}
+ register: z1
+
+ - assert:
+ that:
+ - z1 is success
+ - z1 is changed
+ - "z1.comment == 'Created in Ansible test {{ resource_prefix }}'"
+
+ - name: Get zone details
+ route53_info:
+ query: hosted_zone
+ hosted_zone_id: '{{ z1.zone_id }}'
+ hosted_zone_method: details
+ register: hosted_zones
+
+ - name: Assert newly created hosted zone only has NS and SOA records
+ assert:
+ that:
+ - hosted_zones.HostedZone.ResourceRecordSetCount == 2
+
+ - name: Create A record using zone fqdn
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: 'qdn_test.{{ zone_one }}'
+ type: A
+ value: 1.2.3.4
+ register: qdn
+ - assert:
+ that:
+ - qdn is not failed
+ - qdn is changed
+
+ - name: Create same A record using zone non-qualified domain
+ route53:
+ state: present
+ zone: '{{ zone_one[:-1] }}'
+ record: 'qdn_test.{{ zone_one[:-1] }}'
+ type: A
+ value: 1.2.3.4
+ register: non_qdn
+ - assert:
+ that:
+ - non_qdn is not failed
+ - non_qdn is not changed
+
+ - name: Create A record using zone ID
+ route53:
+ state: present
+ hosted_zone_id: '{{ z1.zone_id }}'
+ record: 'zid_test.{{ zone_one }}'
+ type: A
+ value: 1.2.3.4
+ register: zid
+ - assert:
+ that:
+ - zid is not failed
+ - zid is changed
+
+ - name: Create a multi-value A record with values in different order
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: 'order_test.{{ zone_one }}'
+ type: A
+ value:
+ - 4.5.6.7
+ - 1.2.3.4
+ register: mv_a_record
+ - assert:
+ that:
+ - mv_a_record is not failed
+ - mv_a_record is changed
+
+ - name: Create same multi-value A record with values in different order
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: 'order_test.{{ zone_one }}'
+ type: A
+ value:
+ - 4.5.6.7
+ - 1.2.3.4
+ register: mv_a_record
+ - assert:
+ that:
+ - mv_a_record is not failed
+ - mv_a_record is not changed
+
+ - name: get Route53 A record information
+ route53_info:
+ type: A
+ query: record_sets
+ hosted_zone_id: '{{ z1.zone_id }}'
+ start_record_name: 'order_test.{{ zone_one }}'
+ max_items: 50
+ register: records
+ - assert:
+ that:
+ - records.ResourceRecordSets|length == 3
+ - records.ResourceRecordSets[0].ResourceRecords|length == 2
+ - records.ResourceRecordSets[0].ResourceRecords[0].Value == "4.5.6.7"
+ - records.ResourceRecordSets[0].ResourceRecords[1].Value == "1.2.3.4"
+
+ - name: Remove a member from multi-value A record with values in different order
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: 'order_test.{{ zone_one }}'
+ type: A
+ value:
+ - 4.5.6.7
+ register: del_a_record
+ ignore_errors: true
+ - name: This should fail, because `overwrite` is false
+ assert:
+ that:
+ - del_a_record is failed
+
+ - name: Remove a member from multi-value A record with values in different order
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: 'order_test.{{ zone_one }}'
+ overwrite: true
+ type: A
+ value:
+ - 4.5.6.7
+ register: del_a_record
+ ignore_errors: true
+ - name: This should not fail, because `overwrite` is true
+ assert:
+ that:
+ - del_a_record is not failed
+ - del_a_record is changed
+
+ - name: get Route53 zone A record information
+ route53_info:
+ type: A
+ query: record_sets
+ hosted_zone_id: '{{ z1.zone_id }}'
+ start_record_name: 'order_test.{{ zone_one }}'
+ max_items: 50
+ register: records
+ - assert:
+ that:
+ - records.ResourceRecordSets|length == 3
+ - records.ResourceRecordSets[0].ResourceRecords|length == 1
+ - records.ResourceRecordSets[0].ResourceRecords[0].Value == "4.5.6.7"
+
+ - name: Create a LetsEncrypt CAA record
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: '{{ zone_one }}'
+ type: CAA
+ value:
+ - 0 issue "letsencrypt.org;"
+ - 0 issuewild "letsencrypt.org;"
+ overwrite: true
+ register: caa
+ - assert:
+ that:
+ - caa is not failed
+ - caa is changed
+
+ - name: Re-create the same LetsEncrypt CAA record
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: '{{ zone_one }}'
+ type: CAA
+ value:
+ - 0 issue "letsencrypt.org;"
+ - 0 issuewild "letsencrypt.org;"
+ overwrite: true
+ register: caa
+ - assert:
+ that:
+ - caa is not failed
+ - caa is not changed
+
+ - name: Re-create the same LetsEncrypt CAA record in opposite-order
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: '{{ zone_one }}'
+ type: CAA
+ value:
+ - 0 issuewild "letsencrypt.org;"
+ - 0 issue "letsencrypt.org;"
+ overwrite: true
+ register: caa
+ - name: This should not be changed, as CAA records are not order sensitive
+ assert:
+ that:
+ - caa is not failed
+ - caa is not changed
+
+
+ always:
+ - route53_info:
+ query: record_sets
+ hosted_zone_id: '{{ z1.zone_id }}'
+ register: z1_records
+ - debug: var=z1_records
+ - name: Loop over A/AAAA/CNAME records and delete them
+ route53:
+ state: absent
+ zone: '{{ zone_one }}'
+ record: '{{ item.Name }}'
+ type: '{{ item.Type }}'
+ value: '{{ item.ResourceRecords | map(attribute="Value") | join(",") }}'
+ loop: '{{ z1_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA", "CNAME", "CAA"]) | list }}'
+ - name: Delete test zone one '{{ zone_one }}'
+ route53_zone:
+ state: absent
+ zone: '{{ zone_one }}'
+ register: delete_one
+ ignore_errors: yes
+ retries: 10
+ until: delete_one is not failed
+ - name: Delete test zone two '{{ zone_two }}'
+ route53_zone:
+ state: absent
+ zone: '{{ zone_two }}'
+ register: delete_two
+ ignore_errors: yes
+ retries: 10
+ until: delete_two is not failed
+ when: false
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/route53/vars/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/route53/vars/main.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/route53/vars/main.yml
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/route53_zone/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/route53_zone/aliases
new file mode 100644
index 00000000..6e3860be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/route53_zone/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/route53_zone/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/route53_zone/tasks/main.yml
new file mode 100644
index 00000000..f6baa68f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/route53_zone/tasks/main.yml
@@ -0,0 +1,396 @@
+---
+- name: route53_zone tests
+ collections:
+ - amazon.aws
+ block:
+
+ # ============================================================
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: true
+
+ - name: Create VPC for use in testing
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ cidr_block: 10.22.32.0/23
+ tags:
+ Name: Ansible ec2_instance Testing VPC
+ tenancy: default
+ <<: *aws_connection_info
+ register: testing_vpc
+
+ # ============================================================
+ - name: Create a public zone
+ route53_zone:
+ zone: "{{ resource_prefix }}.public"
+ comment: original comment
+ state: present
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.comment == 'original comment'
+ - output.name == '{{ resource_prefix }}.public.'
+ - not output.private_zone
+
+ # ============================================================
+ - name: Create a public zone (CHECK MODE)
+ route53_zone:
+ zone: "{{ resource_prefix }}.check.public"
+ comment: original comment
+ state: present
+ <<: *aws_connection_info
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - output.changed
+ - output.comment == 'original comment'
+ - output.name == '{{ resource_prefix }}.check.public.'
+ - not output.private_zone
+
+ # ============================================================
+ - name: Do an idemptotent update of a public zone
+ route53_zone:
+ zone: "{{ resource_prefix }}.public"
+ comment: original comment
+ state: present
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - not output.changed
+ - output.comment == 'original comment'
+ - output.name == '{{ resource_prefix }}.public.'
+ - not output.private_zone
+
+ - name: Do an idemptotent update of a public zone (CHECK MODE)
+ route53_zone:
+ zone: "{{ resource_prefix }}.public"
+ comment: original comment
+ state: present
+ <<: *aws_connection_info
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - not output.changed
+ - output.comment == 'original comment'
+ - output.name == '{{ resource_prefix }}.public.'
+ - not output.private_zone
+
+ # ============================================================
+ - name: Update comment of a public zone
+ route53_zone:
+ zone: "{{ resource_prefix }}.public"
+ comment: updated comment
+ state: present
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.result.comment == "updated comment"
+
+ - name: Update comment of a public zone (CHECK MODE)
+ route53_zone:
+ zone: "{{ resource_prefix }}.public"
+ comment: updated comment for check
+ state: present
+ <<: *aws_connection_info
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - output.changed
+ - output.result.comment == "updated comment for check"
+
+ # ============================================================
+ - name: Delete public zone (CHECK MODE)
+ route53_zone:
+ zone: "{{ resource_prefix }}.public"
+ state: absent
+ <<: *aws_connection_info
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - output.changed
+ - "'Successfully deleted' in output.result"
+
+ - name: Delete public zone
+ route53_zone:
+ zone: "{{ resource_prefix }}.public"
+ state: absent
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - "'Successfully deleted' in output.result"
+
+ # ============================================================
+ - name: Create a private zone (CHECK MODE)
+ route53_zone:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ vpc_region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ comment: original comment
+ state: present
+ <<: *aws_connection_info
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - output.changed
+
+ - name: Create a private zone
+ route53_zone:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ vpc_region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ comment: original comment
+ state: present
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ # ============================================================
+ - name: Idemptotent update a private zone
+ route53_zone:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ vpc_region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ comment: original comment
+ state: present
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - not output.changed
+ - "'There is already a private hosted zone in the same region with the same VPC' in output.msg"
+
+ - name: Idemptotent update a private zone (CHECK MODE)
+ route53_zone:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ vpc_region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ comment: original comment
+ state: present
+ <<: *aws_connection_info
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - not output.changed
+ - "'There is already a private hosted zone in the same region with the same VPC' in output.msg"
+
+ # ============================================================
+ - name: Update private zone comment
+ route53_zone:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ vpc_region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ comment: updated_comment
+ state: present
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.result.comment == "updated_comment"
+
+ - name: Update private zone comment (CHECK MODE)
+ route53_zone:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ vpc_region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ comment: updated_comment check
+ state: present
+ <<: *aws_connection_info
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - output.changed
+ - output.result.comment == "updated_comment check"
+
+ # ============================================================
+ - name: Try to delete private zone without setting vpc_id and vpc_region
+ route53_zone:
+ zone: "{{ resource_prefix }}.private"
+ state: absent
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - not output.changed
+ - "output.result == 'No zone to delete.'"
+
+ - name: Try to delete private zone without setting vpc_id and vpc_region (CHECK MODE)
+ route53_zone:
+ zone: "{{ resource_prefix }}.private"
+ state: absent
+ <<: *aws_connection_info
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - not output.changed
+ - "output.result == 'No zone to delete.'"
+
+ # ============================================================
+ - name: Try to delete a public zone that does not exists
+ route53_zone:
+ zone: "{{ resource_prefix }}.publicfake"
+ comment: original comment
+ state: absent
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - not output.changed
+ - "output.result == 'No zone to delete.'"
+
+ - name: Try to delete a public zone that does not exists (CHECK MODE)
+ route53_zone:
+ zone: "{{ resource_prefix }}.publicfake"
+ comment: original comment
+ state: absent
+ <<: *aws_connection_info
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - not output.changed
+ - "output.result == 'No zone to delete.'"
+
+ # ============================================================
+ - name: Delete private zone (CHECK MODE)
+ route53_zone:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ vpc_region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ state: absent
+ <<: *aws_connection_info
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - output.changed
+ - "'Successfully deleted' in output.result"
+
+ - name: Delete private zone
+ route53_zone:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ vpc_region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ state: absent
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - "'Successfully deleted' in output.result"
+
+ # ============================================================
+ - name: Create a public zone
+ route53_zone:
+ zone: "{{ resource_prefix }}.public2"
+ comment: this is an example
+ state: present
+ <<: *aws_connection_info
+ register: new_zone
+
+ # Delete zone using its id
+ - name: Delete zone using attribute hosted_zone_id (CHECK MODE)
+ route53_zone:
+ zone: "{{ resource_prefix }}.public2"
+ hosted_zone_id: "{{new_zone.zone_id}}"
+ state: absent
+ <<: *aws_connection_info
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - output.changed
+ - "'Successfully deleted' in output.result"
+
+ - name: Delete zone using attribute hosted_zone_id
+ route53_zone:
+ zone: "{{ resource_prefix }}.public2"
+ hosted_zone_id: "{{new_zone.zone_id}}"
+ state: absent
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - "'Successfully deleted' in output.result"
+
+ # ============================================================
+ always:
+ - name: Ensure public zone is deleted
+ route53_zone:
+ zone: "{{ item }}"
+ state: absent
+ <<: *aws_connection_info
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+ with_items:
+ - "{{ resource_prefix }}.public"
+ - "{{ resource_prefix }}.public2"
+
+ - name: Ensure private zone is deleted
+ route53_zone:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ vpc_region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ state: absent
+ <<: *aws_connection_info
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+ - name: remove the VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ cidr_block: 10.22.32.0/23
+ state: absent
+ <<: *aws_connection_info
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/aliases
new file mode 100644
index 00000000..6e3860be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/defaults/main.yml
new file mode 100644
index 00000000..ad2a4c68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/defaults/main.yml
@@ -0,0 +1,10 @@
+---
+# defaults file for s3_bucket_notifications integration test
+lambda_function_name: '{{ resource_prefix }}'
+# IAM role names have to be less than 64 characters
+# The 8 digit identifier at the end of resource_prefix helps determine during
+# which test something was created and allows tests to be run in parallel
+# Shippable resource_prefixes are in the format shippable-123456-123, so in those cases
+# we need both sets of digits to keep the resource name unique
+unique_id: "{{ resource_prefix | regex_search('(\\d+-?)(\\d+)$') }}"
+lambda_role_name: 'ansible-test-{{ unique_id }}-s3-notifications'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/files/lambda-trust-policy.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/files/lambda-trust-policy.json
new file mode 100644
index 00000000..fb84ae9d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/files/lambda-trust-policy.json
@@ -0,0 +1,12 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "lambda.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/files/mini_lambda.py b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/files/mini_lambda.py
new file mode 100644
index 00000000..d0d08dae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/files/mini_lambda.py
@@ -0,0 +1,13 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def lambda_handler(event, context):
+ return {
+ 'statusCode': 200,
+ 'body': json.dumps('Hello from Lambda!')
+ }
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/tasks/main.yml
new file mode 100644
index 00000000..38ef3bb2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_bucket_notification/tasks/main.yml
@@ -0,0 +1,326 @@
+- name: test add s3 bucket notification
+ collections:
+ - amazon.aws
+ - community.general
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+ - name: create minimal lambda role
+ iam_role:
+ name: '{{ lambda_role_name }}'
+ assume_role_policy_document: '{{ lookup("file", "lambda-trust-policy.json") }}'
+ create_instance_profile: false
+ managed_policies:
+ - 'arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess'
+ register: iam_role
+ - name: pause if role was created
+ pause:
+ seconds: 10
+ when: iam_role is changed
+ - name: move lambda into place for archive module
+ copy:
+ src: mini_lambda.py
+ dest: '{{ output_dir }}/mini_lambda.py'
+ mode: preserve
+ - name: bundle lambda into a zip
+ register: function_res
+ archive:
+ format: zip
+ path: '{{ output_dir }}/mini_lambda.py'
+ dest: '{{ output_dir }}/mini_lambda.zip'
+ - name: register bucket
+ s3_bucket:
+ name: '{{resource_prefix}}-bucket'
+ state: present
+ register: bucket_info
+ - name: register lambda
+ lambda:
+ name: '{{resource_prefix}}-lambda'
+ state: present
+ role: '{{ lambda_role_name }}'
+ runtime: python3.7
+ zip_file: '{{function_res.dest}}'
+ handler: lambda_function.lambda_handler
+ memory_size: '128'
+ timeout: '30'
+ register: lambda_info
+ - name: register notification without invoke permissions
+ s3_bucket_notification:
+ state: present
+ event_name: '{{resource_prefix}}-on_file_add_or_remove'
+ bucket_name: '{{resource_prefix}}-bucket'
+ lambda_function_arn: '{{ lambda_info.configuration.function_arn }}'
+ events:
+ - s3:ObjectCreated:*
+ - s3:ObjectRemoved:*
+ prefix: images/
+ suffix: .jpg
+ register: result
+ ignore_errors: true
+ - name: assert nice message returned
+ assert:
+ that:
+ - result is failed
+ - result.msg != 'MODULE FAILURE'
+ - name: Add invocation permission of Lambda function on AWS
+ lambda_policy:
+ function_name: '{{ lambda_info.configuration.function_arn }}'
+ statement_id: allow_lambda_invoke
+ action: lambda:InvokeFunction
+ principal: s3.amazonaws.com
+ source_arn: arn:aws:s3:::{{bucket_info.name}}
+ - name: register s3 bucket notification
+ s3_bucket_notification:
+ state: present
+ event_name: '{{resource_prefix}}-on_file_add_or_remove'
+ bucket_name: '{{resource_prefix}}-bucket'
+ lambda_function_arn: '{{ lambda_info.configuration.function_arn }}'
+ events:
+ - s3:ObjectCreated:*
+ - s3:ObjectRemoved:*
+ prefix: images/
+ suffix: .jpg
+ register: result
+ - name: assert result.changed == True
+ assert:
+ that:
+ - result.changed == True
+ - name: test check_mode without change
+ s3_bucket_notification:
+ state: present
+ event_name: '{{resource_prefix}}-on_file_add_or_remove'
+ bucket_name: '{{resource_prefix}}-bucket'
+ lambda_function_arn: '{{ lambda_info.configuration.function_arn }}'
+ events:
+ - s3:ObjectCreated:*
+ - s3:ObjectRemoved:*
+ prefix: images/
+ suffix: .jpg
+ register: result
+ check_mode: true
+ - name: assert result.changed == False
+ assert:
+ that:
+ - result.changed == False
+ - name: test check_mode change events
+ s3_bucket_notification:
+ state: present
+ event_name: '{{resource_prefix}}-on_file_add_or_remove'
+ bucket_name: '{{resource_prefix}}-bucket'
+ lambda_function_arn: '{{ lambda_info.configuration.function_arn }}'
+ events:
+ - s3:ObjectCreated:*
+ prefix: images/
+ suffix: .jpg
+ register: result
+ check_mode: true
+ - name: assert result.changed == True
+ assert:
+ that:
+ - result.changed == True
+ - name: test that check_mode didn't change events
+ s3_bucket_notification:
+ state: present
+ event_name: '{{resource_prefix}}-on_file_add_or_remove'
+ bucket_name: '{{resource_prefix}}-bucket'
+ lambda_function_arn: '{{ lambda_info.configuration.function_arn }}'
+ events:
+ - s3:ObjectCreated:*
+ - s3:ObjectRemoved:*
+ prefix: images/
+ suffix: .jpg
+ register: result
+ - name: assert result.changed == False
+ assert:
+ that:
+ - result.changed == False
+ - name: test mutually exclusive parameters
+ s3_bucket_notification:
+ state: present
+ event_name: '{{resource_prefix}}-on_file_add_or_remove'
+ bucket_name: '{{resource_prefix}}-bucket'
+ lambda_function_arn: '{{ lambda_info.configuration.function_arn }}'
+ events:
+ - s3:ObjectCreated:Post
+ prefix: photos/
+ suffix: .gif
+ lambda_version: 0
+ lambda_alias: 0
+ register: result
+ ignore_errors: true
+ - name: assert task failed
+ assert:
+ that:
+ - result is failed
+ - 'result.msg == ''parameters are mutually exclusive: lambda_alias|lambda_version'''
+ - name: test configuration change on suffix
+ s3_bucket_notification:
+ state: present
+ event_name: '{{resource_prefix}}-on_file_add_or_remove'
+ bucket_name: '{{resource_prefix}}-bucket'
+ lambda_function_arn: '{{ lambda_info.configuration.function_arn }}'
+ events:
+ - s3:ObjectCreated:*
+ - s3:ObjectRemoved:*
+ prefix: images/
+ suffix: .gif
+ register: result
+ - name: assert result.changed == True
+ assert:
+ that:
+ - result.changed == True
+ - name: test configuration change on prefix
+ s3_bucket_notification:
+ state: present
+ event_name: '{{resource_prefix}}-on_file_add_or_remove'
+ bucket_name: '{{resource_prefix}}-bucket'
+ lambda_function_arn: '{{ lambda_info.configuration.function_arn }}'
+ events:
+ - s3:ObjectCreated:*
+ - s3:ObjectRemoved:*
+ prefix: photos/
+ suffix: .gif
+ register: result
+ - name: assert result.changed == True
+ assert:
+ that:
+ - result.changed == True
+ - name: test configuration change on new events added
+ s3_bucket_notification:
+ state: present
+ event_name: '{{resource_prefix}}-on_file_add_or_remove'
+ bucket_name: '{{resource_prefix}}-bucket'
+ lambda_function_arn: '{{ lambda_info.configuration.function_arn }}'
+ events:
+ - s3:ObjectCreated:*
+ - s3:ObjectRemoved:*
+ - s3:ObjectRestore:Post
+ prefix: photos/
+ suffix: .gif
+ register: result
+ - name: assert result.changed == True
+ assert:
+ that:
+ - result.changed == True
+ - name: test configuration change on events removed
+ s3_bucket_notification:
+ state: present
+ event_name: '{{resource_prefix}}-on_file_add_or_remove'
+ bucket_name: '{{resource_prefix}}-bucket'
+ lambda_function_arn: '{{ lambda_info.configuration.function_arn }}'
+ events:
+ - s3:ObjectCreated:Post
+ prefix: photos/
+ suffix: .gif
+ register: result
+ - name: assert result.changed == True
+ assert:
+ that:
+ - result.changed == True
+ - name: change events
+ s3_bucket_notification:
+ state: present
+ event_name: '{{resource_prefix}}-on_file_add_or_remove'
+ bucket_name: '{{resource_prefix}}-bucket'
+ lambda_function_arn: '{{ lambda_info.configuration.function_arn }}'
+ events:
+ - s3:ObjectCreated:*
+ - s3:ObjectRemoved:*
+ - s3:ObjectRestore:Post
+ prefix: photos/
+ suffix: .gif
+ register: result
+ - name: test that event order does not matter
+ s3_bucket_notification:
+ state: present
+ event_name: '{{resource_prefix}}-on_file_add_or_remove'
+ bucket_name: '{{resource_prefix}}-bucket'
+ lambda_function_arn: '{{ lambda_info.configuration.function_arn }}'
+ events:
+ - s3:ObjectRestore:Post
+ - s3:ObjectRemoved:*
+ - s3:ObjectCreated:*
+ prefix: photos/
+ suffix: .gif
+ register: result
+ - name: assert result.changed == False
+ assert:
+ that:
+ - result.changed == False
+ - name: test that configuration is the same as previous task
+ s3_bucket_notification:
+ state: present
+ event_name: '{{resource_prefix}}-on_file_add_or_remove'
+ bucket_name: '{{resource_prefix}}-bucket'
+ lambda_function_arn: '{{ lambda_info.configuration.function_arn }}'
+ events:
+ - s3:ObjectCreated:*
+ - s3:ObjectRemoved:*
+ - s3:ObjectRestore:Post
+ prefix: photos/
+ suffix: .gif
+ register: result
+ - name: assert result.changed == False
+ assert:
+ that:
+ - result.changed == False
+ - name: test remove notification
+ s3_bucket_notification:
+ state: absent
+ event_name: '{{resource_prefix}}-on_file_add_or_remove'
+ bucket_name: '{{resource_prefix}}-bucket'
+ register: result
+ - name: assert result.changed == True
+ assert:
+ that:
+ - result.changed == True
+ - name: test that events is already removed
+ s3_bucket_notification:
+ state: absent
+ event_name: '{{resource_prefix}}-on_file_add_or_remove'
+ bucket_name: '{{resource_prefix}}-bucket'
+ register: result
+ - name: assert result.changed == False
+ assert:
+ that:
+ - result.changed == False
+ always:
+ - name: clean-up bucket
+ s3_bucket:
+ name: '{{resource_prefix}}-bucket'
+ state: absent
+ ignore_errors: true
+ - name: clean-up lambda
+ lambda:
+ name: '{{resource_prefix}}-lambda'
+ state: absent
+ ignore_errors: true
+ - name: cleam-up role
+ iam_role:
+ name: '{{ lambda_role_name }}'
+ state: absent
+ ignore_errors: true
+
+- block:
+ - name: test with no parameters except state absent
+ s3_bucket_notification: state=absent
+ register: result
+ ignore_errors: true
+ - name: assert failure when called with no parameters
+ assert:
+ that:
+ - result.failed
+ - '"missing required arguments" in result.msg'
+ - name: test abesnt
+ s3_bucket_notification: state=absent
+ register: result
+ ignore_errors: true
+ - name: assert failure when called with no parameters
+ assert:
+ that:
+ - result.failed
+ - '"missing required arguments" in result.msg'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_lifecycle/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_lifecycle/aliases
new file mode 100644
index 00000000..0cc87f1a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_lifecycle/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+shippable/aws/group1
+disabled
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_lifecycle/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_lifecycle/tasks/main.yml
new file mode 100644
index 00000000..f6dc2ab3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_lifecycle/tasks/main.yml
@@ -0,0 +1,435 @@
+---
+
+- block:
+
+ # ============================================================
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: true
+
+ # ============================================================
+ - name: Create simple s3_bucket
+ s3_bucket:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ state: present
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.name == '{{ resource_prefix }}-testbucket-ansible'
+ - not output.requester_pays
+ # ============================================================
+ - name: Create a lifecycle policy
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ expiration_days: 300
+ prefix: ''
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ # ============================================================
+ - name: Create a lifecycle policy (idempotency)
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ expiration_days: 300
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ # ============================================================
+ - name: Create a second lifecycle policy
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ transition_days: 30
+ prefix: /something
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ # ============================================================
+ - name: Create a second lifecycle policy (idempotency)
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ transition_days: 30
+ prefix: /something
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ # ============================================================
+ - name: Disable the second lifecycle policy
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ status: disabled
+ transition_days: 30
+ prefix: /something
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ # ============================================================
+ - name: Disable the second lifecycle policy (idempotency)
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ status: disabled
+ transition_days: 30
+ prefix: /something
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ # ============================================================
+ - name: Re-enable the second lifecycle policy
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ status: enabled
+ transition_days: 300
+ prefix: /something
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ # ============================================================
+ - name: Re-enable the second lifecycle policy (idempotency)
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ status: enabled
+ transition_days: 300
+ prefix: /something
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ # ============================================================
+ - name: Delete the second lifecycle policy
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ state: absent
+ prefix: /something
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ # ============================================================
+ - name: Delete the second lifecycle policy (idempotency)
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ state: absent
+ prefix: /something
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ # ============================================================
+ - name: Create a second lifecycle policy, with infrequent access
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ transition_days: 30
+ storage_class: standard_ia
+ prefix: /something
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ # ============================================================
+ - name: Create a second lifecycle policy, with infrequent access (idempotency)
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ storage_class: standard_ia
+ transition_days: 30
+ prefix: /something
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ # ============================================================
+ - name: Create a second lifecycle policy, with glacier
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ transition_days: 300
+ prefix: /something
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ # ============================================================
+ - name: Create a second lifecycle policy, with glacier (idempotency)
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ transition_days: 300
+ prefix: /something
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ # ============================================================
+ - name: Create a lifecycle policy with infrequent access
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ transition_days: 30
+ storage_class: standard_ia
+ prefix: /something
+ <<: *aws_connection_info
+ register: output
+
+ - name: Create a second lifecycle policy, with glacier
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ transition_days: 300
+ prefix: /something
+ purge_transitions: false
+ <<: *aws_connection_info
+ register: output
+
+ - name: Create a lifecycle policy with infrequent access (idempotency)
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ storage_class: standard_ia
+ transition_days: 30
+ prefix: /something
+ purge_transitions: false
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+
+ - name: Create a second lifecycle policy, with glacier (idempotency)
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ transition_days: 300
+ prefix: /something
+ purge_transitions: false
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ # ============================================================
+ - name: Create a lifecycle policy, with noncurrent expiration
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ noncurrent_version_expiration_days: 300
+ prefix: /something
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ # ============================================================
+ - name: Create a lifecycle policy, with noncurrent expiration
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ noncurrent_version_expiration_days: 300
+ prefix: /something
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ # ============================================================
+ - name: Create a lifecycle policy, with noncurrent transition
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ noncurrent_version_transition_days: 300
+ prefix: /something
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ # ============================================================
+ - name: Create a lifecycle policy, with noncurrent transitions and expirations
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ noncurrent_version_transition_days: 300
+ prefix: /something
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ # ============================================================
+ - name: Create a lifecycle policy, with noncurrent transition
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ noncurrent_version_transition_days: 300
+ noncurrent_version_storage_class: standard_ia
+ prefix: /something
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ # ============================================================
+ - name: Create a lifecycle policy, with noncurrent transitions and expirations
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ noncurrent_version_storage_class: standard_ia
+ noncurrent_version_transition_days: 300
+ prefix: /something
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ # ============================================================
+ - name: Create a lifecycle policy, with noncurrent transitions
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ noncurrent_version_transitions:
+ - transition_days: 30
+ storage_class: standard_ia
+ - transition_days: 60
+ storage_class: onezone_ia
+ - transition_days: 90
+ storage_class: glacier
+ prefix: /something
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ # ============================================================
+ - name: Create a lifecycle policy, with noncurrent transitions
+ s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ noncurrent_version_transitions:
+ - transition_days: 30
+ storage_class: standard_ia
+ - transition_days: 60
+ storage_class: onezone_ia
+ - transition_days: 90
+ storage_class: glacier
+ prefix: /something
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ # ============================================================
+ # test all the examples
+ # Configure a lifecycle rule on a bucket to expire (delete) items with a prefix of /logs/ after 30 days
+ - s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ expiration_days: 30
+ prefix: /logs/
+ status: enabled
+ <<: *aws_connection_info
+ state: present
+
+ # Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier after 7 days and then delete after 90 days
+ - s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ transition_days: 7
+ expiration_days: 90
+ prefix: /logs/
+ status: enabled
+ <<: *aws_connection_info
+ state: present
+
+ # Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier on 31 Dec 2020 and then delete on 31 Dec 2030.
+ # Note that midnight GMT must be specified.
+ # Be sure to quote your date strings
+ - s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ transition_date: "2020-12-30T00:00:00.000Z"
+ expiration_date: "2030-12-30T00:00:00.000Z"
+ prefix: /logs/
+ status: enabled
+ <<: *aws_connection_info
+ state: present
+
+ # Disable the rule created above
+ - s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ prefix: /logs/
+ status: disabled
+ <<: *aws_connection_info
+ state: present
+
+ # Delete the lifecycle rule created above
+ - s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ <<: *aws_connection_info
+ prefix: /logs/
+ state: absent
+
+ # Configure a lifecycle rule to transition all backup files older than 31 days in /backups/ to standard infrequent access class.
+ - s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ prefix: /backups/
+ storage_class: standard_ia
+ transition_days: 31
+ state: present
+ <<: *aws_connection_info
+ status: enabled
+
+ # Configure a lifecycle rule to transition files to infrequent access after 30 days and glacier after 90
+ - s3_lifecycle:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ prefix: /other_logs/
+ state: present
+ <<: *aws_connection_info
+ status: enabled
+ transitions:
+ - transition_days: 30
+ storage_class: standard_ia
+ - transition_days: 90
+ storage_class: glacier
+ # ============================================================
+ always:
+ - name: Ensure all buckets are deleted
+ s3_bucket:
+ name: "{{item}}"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+ with_items:
+ - "{{ resource_prefix }}-testbucket-ansible"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_logging/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_logging/aliases
new file mode 100644
index 00000000..3431a6a5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_logging/aliases
@@ -0,0 +1,4 @@
+cloud/aws
+#shippable/aws/group1
+# when running tests we saw an ~20% failure rate
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_logging/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_logging/defaults/main.yml
new file mode 100644
index 00000000..a0f9b7b3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_logging/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+test_bucket: '{{ resource_prefix }}-testbucket'
+log_bucket_1: '{{ resource_prefix }}-logs-1'
+log_bucket_2: '{{ resource_prefix }}-logs-2'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_logging/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_logging/tasks/main.yml
new file mode 100644
index 00000000..3e7ffc7c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_logging/tasks/main.yml
@@ -0,0 +1,205 @@
+---
+# Integration tests for s3_logging
+#
+# Notes:
+# - s3_logging doesn't support check_mode and the only output is 'changed'
+# - During initial testing we hit issues with boto reporting
+# "You must give the log-delivery group WRITE and READ_ACP permissions
+# to the target bucket"
+# a long term solution might be to port s3_logging to AnsibleAWSModule
+# so we can add retries
+#
+- module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key | default(omit) }}'
+ aws_secret_key: '{{ aws_secret_key | default(omit) }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region | default(omit) }}'
+ collections:
+ - amazon.aws
+ block:
+
+ # ============================================================
+
+ - name: Try to enable logging without providing target_bucket
+ s3_logging:
+ state: present
+ name: '{{ test_bucket }}'
+ register: result
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result is failed
+
+ # ============================================================
+ - name: Create simple s3_bucket to be logged
+ s3_bucket:
+ state: present
+ name: '{{ test_bucket }}'
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ - output.name == test_bucket
+
+ - name: Create simple s3_bucket as target for logs
+ s3_bucket:
+ state: present
+ name: '{{ log_bucket_1 }}'
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ - output.name == log_bucket_1
+
+ - name: Create simple s3_bucket as second target for logs
+ s3_bucket:
+ state: present
+ name: '{{ log_bucket_2 }}'
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ - output.name == log_bucket_2
+
+# ============================================================
+
+ - name: Enable logging
+ s3_logging:
+ state: present
+ name: '{{ test_bucket }}'
+ target_bucket: '{{ log_bucket_1 }}'
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: Enable logging idempotency
+ s3_logging:
+ state: present
+ name: '{{ test_bucket }}'
+ target_bucket: '{{ log_bucket_1 }}'
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+# ============================================================
+
+ - name: Change logging bucket
+ s3_logging:
+ state: present
+ name: '{{ test_bucket }}'
+ target_bucket: '{{ log_bucket_2 }}'
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: Change logging bucket idempotency
+ s3_logging:
+ state: present
+ name: '{{ test_bucket }}'
+ target_bucket: '{{ log_bucket_2 }}'
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+# ============================================================
+
+ - name: Change logging prefix
+ s3_logging:
+ state: present
+ name: '{{ test_bucket }}'
+ target_bucket: '{{ log_bucket_2 }}'
+ target_prefix: '/{{ resource_prefix }}/'
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: Change logging prefix idempotency
+ s3_logging:
+ state: present
+ name: '{{ test_bucket }}'
+ target_bucket: '{{ log_bucket_2 }}'
+ target_prefix: '/{{ resource_prefix }}/'
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+# ============================================================
+
+ - name: Remove logging prefix
+ s3_logging:
+ state: present
+ name: '{{ test_bucket }}'
+ target_bucket: '{{ log_bucket_2 }}'
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: Remove logging prefix idempotency
+ s3_logging:
+ state: present
+ name: '{{ test_bucket }}'
+ target_bucket: '{{ log_bucket_2 }}'
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+# ============================================================
+
+ - name: Disable logging
+ s3_logging:
+ state: absent
+ name: '{{ test_bucket }}'
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: Disable logging idempotency
+ s3_logging:
+ state: absent
+ name: '{{ test_bucket }}'
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+# ============================================================
+ always:
+ - name: Delete bucket being logged
+ s3_bucket:
+ name: '{{ test_bucket }}'
+ state: absent
+ ignore_errors: yes
+ - name: Delete first bucket containing logs
+ s3_bucket:
+ name: '{{ log_bucket_1 }}'
+ state: absent
+ ignore_errors: yes
+ - name: Delete second bucket containing logs
+ s3_bucket:
+ name: '{{ log_bucket_2 }}'
+ state: absent
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_metrics_configuration/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_metrics_configuration/aliases
new file mode 100644
index 00000000..fd132af5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_metrics_configuration/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group1 \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_metrics_configuration/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_metrics_configuration/defaults/main.yml
new file mode 100644
index 00000000..844e5cc2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_metrics_configuration/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+test_bucket: '{{ resource_prefix }}-testbucket'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_metrics_configuration/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_metrics_configuration/tasks/main.yml
new file mode 100644
index 00000000..ba5cce9e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_metrics_configuration/tasks/main.yml
@@ -0,0 +1,243 @@
+---
+# Integration tests for s3_metrics_configuration
+#
+# Notes:
+# - The module only outputs 'changed' since its very simple
+#
+- module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key | default(omit) }}'
+ aws_secret_key: '{{ aws_secret_key | default(omit) }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region | default(omit) }}'
+ collections:
+ - amazon.aws
+ block:
+
+ # TODO: Until there's a module to get info s3 metrics configuration, awscli is needed
+ - name: Install awscli
+ pip:
+ state: present
+ name: awscli
+
+ # ============================================================
+ - name: Try to create metrics configuration for non-existing bucket
+ s3_metrics_configuration:
+ bucket_name: '{{ test_bucket }}'
+ id: 'EntireBucket'
+ state: present
+ register: result
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result is failed
+
+ # ============================================================
+ - name: Create simple s3_bucket to act upon it
+ s3_bucket:
+ name: '{{ test_bucket }}'
+ state: present
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ - output.name == test_bucket
+
+ # ============================================================
+ - name: Create a metrics configuration under check mode
+ s3_metrics_configuration:
+ bucket_name: '{{ test_bucket }}'
+ id: 'EntireBucket'
+ state: present
+ check_mode: yes
+ register: result
+
+ - include_tasks: './s3_metrics_info.yml'
+
+ - assert:
+ that:
+ - result is changed
+ - metrics_info | selectattr('Id', 'search', 'EntireBucket') | list | length == 0
+
+ # ============================================================
+ - name: Create a metrics configuration that enables metrics for an entire bucket
+ s3_metrics_configuration:
+ bucket_name: '{{ test_bucket }}'
+ id: 'EntireBucket'
+ state: present
+ register: result
+
+ - include_tasks: './s3_metrics_info.yml'
+
+ - assert:
+ that:
+ - result is changed
+ - metrics_info | selectattr('Id', 'search', 'EntireBucket') | list | length == 1
+
+ # ============================================================
+ - name: Create a metrics configuration idempotency under check mode
+ s3_metrics_configuration:
+ bucket_name: '{{ test_bucket }}'
+ id: 'EntireBucket'
+ state: present
+ check_mode: yes
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ # ============================================================
+ - name: Create a metrics configuration idempotency
+ s3_metrics_configuration:
+ bucket_name: '{{ test_bucket }}'
+ id: 'EntireBucket'
+ state: present
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ # ============================================================
+ - name: Put a metrics configuration that enables metrics for objects starting with a prefix
+ s3_metrics_configuration:
+ bucket_name: "{{ test_bucket }}"
+ id: Assets
+ filter_prefix: assets
+ state: present
+ register: result
+
+ - include_tasks: './s3_metrics_info.yml'
+
+ - assert:
+ that:
+ - result is changed
+ - (metrics_info | selectattr('Id', 'search', 'Assets') | list | first).Filter.Prefix == 'assets'
+
+ # ============================================================
+ - name: Update existing metrics configuration under check mode
+ s3_metrics_configuration:
+ bucket_name: "{{ test_bucket }}"
+ id: Assets
+ filter_tag:
+ kind: Asset
+ state: present
+ check_mode: yes
+ register: result
+
+ - include_tasks: './s3_metrics_info.yml'
+
+ - assert:
+ that:
+ - result is changed
+ - (metrics_info | selectattr('Id', 'search', 'Assets') | list | first).Filter.Prefix == 'assets'
+ - (metrics_info | selectattr('Id', 'search', 'Assets') | list | first).Filter.Tag is not defined
+
+ # ============================================================
+ - name: Update existing metrics configuration and enable metrics for objects with specific tag
+ s3_metrics_configuration:
+ bucket_name: "{{ test_bucket }}"
+ id: Assets
+ filter_tag:
+ kind: Asset
+ state: present
+ register: result
+
+ - include_tasks: './s3_metrics_info.yml'
+
+ - assert:
+ that:
+ - result is changed
+ - (metrics_info | selectattr('Id', 'search', 'Assets') | list | first).Filter.Prefix is not defined
+ - (metrics_info | selectattr('Id', 'search', 'Assets') | list | first).Filter.Tag.Key == 'kind'
+ - (metrics_info | selectattr('Id', 'search', 'Assets') | list | first).Filter.Tag.Value == 'Asset'
+
+ # ============================================================
+ - name: Put a metrics configuration that enables metrics for objects that start with a particular prefix and have specific tags applied
+ s3_metrics_configuration:
+ bucket_name: "{{ test_bucket }}"
+ id: ImportantBlueDocuments
+ filter_prefix: documents
+ filter_tags:
+ priority: High
+ class: Blue
+ state: present
+ register: result
+
+ - include_tasks: './s3_metrics_info.yml'
+
+ - assert:
+ that:
+ - result is changed
+ - (metrics_info | selectattr('Id', 'search', 'ImportantBlueDocuments') | list | first).Filter.And.Prefix == 'documents'
+ - (metrics_info | selectattr('Id', 'search', 'ImportantBlueDocuments') | list | first).Filter.And.Tags[0].Key == 'priority'
+ - (metrics_info | selectattr('Id', 'search', 'ImportantBlueDocuments') | list | first).Filter.And.Tags[0].Value == 'High'
+ - (metrics_info | selectattr('Id', 'search', 'ImportantBlueDocuments') | list | first).Filter.And.Tags[1].Key == 'class'
+ - (metrics_info | selectattr('Id', 'search', 'ImportantBlueDocuments') | list | first).Filter.And.Tags[1].Value == 'Blue'
+
+ # ============================================================
+ - name: Delete metrics configuration in check mode
+ s3_metrics_configuration:
+ bucket_name: '{{ test_bucket }}'
+ id: 'EntireBucket'
+ state: absent
+ check_mode: yes
+ register: result
+
+ - include_tasks: './s3_metrics_info.yml'
+
+ - assert:
+ that:
+ - result is changed
+ - metrics_info | selectattr('Id', 'search', 'EntireBucket') | list | length == 1 # still present
+
+ # ============================================================
+ - name: Delete metrics configuration
+ s3_metrics_configuration:
+ bucket_name: '{{ test_bucket }}'
+ id: 'EntireBucket'
+ state: absent
+ register: result
+
+ - include_tasks: './s3_metrics_info.yml'
+
+ - assert:
+ that:
+ - result is changed
+ - metrics_info | selectattr('Id', 'search', 'EntireBucket') | list | length == 0
+
+ # ============================================================
+ - name: Try to delete non-existing metrics configuration
+ s3_metrics_configuration:
+ bucket_name: '{{ test_bucket }}'
+ id: 'EntireBucket'
+ state: absent
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ # ============================================================
+ - name: Try to delete metrics configuration for non-existing bucket
+ s3_metrics_configuration:
+ bucket_name: '{{ test_bucket }}-non-existing'
+ id: 'EntireBucket'
+ state: absent
+ register: result
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result is failed
+
+ # ============================================================
+ always:
+ - name: Delete test bucket
+ s3_bucket:
+ name: '{{ test_bucket }}'
+ state: absent
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_metrics_configuration/tasks/s3_metrics_info.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_metrics_configuration/tasks/s3_metrics_info.yml
new file mode 100644
index 00000000..cca7cad0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_metrics_configuration/tasks/s3_metrics_info.yml
@@ -0,0 +1,16 @@
+---
+# Utility tasks to list bucket metrics configurations
+# TODO: Update this when an s3_metrics_configuration_info module exists
+- name: List s3 bucket metrics configurations
+ command: >
+ aws s3api list-bucket-metrics-configurations
+ --bucket {{ test_bucket }}
+ environment:
+ AWS_ACCESS_KEY_ID: "{{ aws_access_key | default(omit) }}"
+ AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key | default(omit) }}"
+ AWS_SESSION_TOKEN: "{{ security_token | default(omit) }}"
+ AWS_DEFAULT_REGION: "{{ aws_region | default(omit) }}"
+ register: list_comand_result
+
+- set_fact:
+ metrics_info: "{{ (list_comand_result.stdout | from_json)['MetricsConfigurationList'] | default([]) }}"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_sync/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_sync/aliases
new file mode 100644
index 00000000..efe35f38
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_sync/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+shippable/aws/group1
+
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_sync/files/test1.txt b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_sync/files/test1.txt
new file mode 100644
index 00000000..f079749c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_sync/files/test1.txt
@@ -0,0 +1 @@
+test1 \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_sync/files/test2.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_sync/files/test2.yml
new file mode 100644
index 00000000..b80aba06
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_sync/files/test2.yml
@@ -0,0 +1,2 @@
+---
+test2: example
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_sync/files/test3.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_sync/files/test3.json
new file mode 100644
index 00000000..23483fb2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_sync/files/test3.json
@@ -0,0 +1,3 @@
+{
+ "test3": "value"
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_sync/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_sync/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_sync/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_sync/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_sync/tasks/main.yml
new file mode 100644
index 00000000..965e1182
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/s3_sync/tasks/main.yml
@@ -0,0 +1,108 @@
+---
+- name: S3 bucket creation
+ collections:
+ - amazon.aws
+ - community.general
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+ # ============================================================
+ - name: Create simple s3_bucket
+ s3_bucket:
+ name: "{{ resource_prefix }}-testbucket-ansible"
+ state: present
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.name == '{{ resource_prefix }}-testbucket-ansible'
+ - not output.requester_pays
+ # ============================================================
+ - name: Prepare fixtures folder
+ file:
+ path: "{{ output_dir }}/s3_sync"
+ state: directory
+ mode: '0755'
+
+ - name: Prepare files to sync
+ copy:
+ src: "{{ item }}"
+ dest: "{{ output_dir }}/s3_sync/{{ item }}"
+ mode: preserve
+ with_items:
+ - test1.txt
+ - test2.yml
+ - test3.json
+
+ - name: Prepare file with size bigger than chunk size
+ shell: |
+ dd if=/dev/zero of=test4.txt bs=1M count=10
+ args:
+ chdir: "{{ output_dir }}/s3_sync"
+
+ - name: Sync files with remote bucket
+ s3_sync:
+ bucket: "{{ resource_prefix }}-testbucket-ansible"
+ file_root: "{{ output_dir }}/s3_sync"
+ register: output
+ - assert:
+ that:
+ - output is changed
+
+ # ============================================================
+ - name: Sync files already present
+ s3_sync:
+ bucket: "{{ resource_prefix }}-testbucket-ansible"
+ file_root: "{{ output_dir }}/s3_sync"
+ register: output
+ - assert:
+ that:
+ - output is not changed
+
+ # ============================================================
+ - name: Sync files with etag calculation
+ s3_sync:
+ bucket: "{{ resource_prefix }}-testbucket-ansible"
+ file_root: "{{ output_dir }}/s3_sync"
+ file_change_strategy: checksum
+ register: output
+ - assert:
+ that:
+ - output is not changed
+
+ # ============================================================
+ # DOCUMENTATION EXAMPLES
+ # ============================================================
+ - name: all the options
+ s3_sync:
+ bucket: "{{ resource_prefix }}-testbucket-ansible"
+ file_root: "{{ output_dir }}/s3_sync"
+ mime_map:
+ .yml: application/text
+ .json: application/text
+ key_prefix: config_files/web
+ file_change_strategy: force
+ permission: public-read
+ cache_control: "public, max-age=31536000"
+ include: "*"
+ exclude: "*.txt,.*"
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+
+ always:
+ - name: Ensure all buckets are deleted
+ s3_bucket:
+ name: "{{item}}"
+ state: absent
+ force: true
+ ignore_errors: yes
+ with_items:
+ - "{{ resource_prefix }}-testbucket-ansible"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/script_inventory_ec2/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/script_inventory_ec2/aliases
new file mode 100644
index 00000000..092d6ac6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/script_inventory_ec2/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group2
+needs/file/contrib/inventory/ec2.py
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/script_inventory_ec2/ec2.sh b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/script_inventory_ec2/ec2.sh
new file mode 100644
index 00000000..9ae9dee5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/script_inventory_ec2/ec2.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+# Wrapper to use the correct Python interpreter and support code coverage.
+ABS_SCRIPT=$(python -c "import os; print(os.path.abspath('../../../../contrib/inventory/ec2.py'))")
+cd "${OUTPUT_DIR}"
+python.py "${ABS_SCRIPT}" "$@"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/script_inventory_ec2/inventory_diff.py b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/script_inventory_ec2/inventory_diff.py
new file mode 100644
index 00000000..3aaeff50
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/script_inventory_ec2/inventory_diff.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+
+
+def check_hosts(contrib, plugin):
+ contrib_hosts = sorted(contrib['_meta']['hostvars'].keys())
+ plugin_hosts = sorted(plugin['_meta']['hostvars'].keys())
+ assert contrib_hosts == plugin_hosts
+ return contrib_hosts, plugin_hosts
+
+
+def check_groups(contrib, plugin):
+ contrib_groups = set(contrib.keys())
+ plugin_groups = set(plugin.keys())
+ missing_groups = contrib_groups.difference(plugin_groups)
+ if missing_groups:
+ print("groups: %s are missing from the plugin" % missing_groups)
+ assert not missing_groups
+ return contrib_groups, plugin_groups
+
+
+def check_host_vars(key, value, plugin, host):
+ # tags are a dict in the plugin
+ if key.startswith('ec2_tag'):
+ print('assert tag', key, value)
+ assert 'tags' in plugin['_meta']['hostvars'][host], 'b file does not have tags in host'
+ btags = plugin['_meta']['hostvars'][host]['tags']
+ tagkey = key.replace('ec2_tag_', '')
+ assert tagkey in btags, '%s tag not in b file host tags' % tagkey
+ assert value == btags[tagkey], '%s != %s' % (value, btags[tagkey])
+ else:
+ print('assert var', key, value, key in plugin['_meta']['hostvars'][host], plugin['_meta']['hostvars'][host].get(key))
+ assert key in plugin['_meta']['hostvars'][host], "%s not in b's %s hostvars" % (key, host)
+ assert value == plugin['_meta']['hostvars'][host][key], "%s != %s" % (value, plugin['_meta']['hostvars'][host][key])
+
+
+def main():
+ # a should be the source of truth (the script output)
+ a = sys.argv[1]
+ # b should be the thing to check (the plugin output)
+ b = sys.argv[2]
+
+ with open(a, 'r') as f:
+ adata = json.loads(f.read())
+ with open(b, 'r') as f:
+ bdata = json.loads(f.read())
+
+ # all hosts should be present obviously
+ ahosts, bhosts = check_hosts(adata, bdata)
+
+ # all groups should be present obviously
+ agroups, bgroups = check_groups(adata, bdata)
+
+ # check host vars can be reconstructed
+ for ahost in ahosts:
+ contrib_host_vars = adata['_meta']['hostvars'][ahost]
+ for key, value in contrib_host_vars.items():
+ check_host_vars(key, value, bdata, ahost)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/script_inventory_ec2/runme.sh b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/script_inventory_ec2/runme.sh
new file mode 100644
index 00000000..05772955
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/script_inventory_ec2/runme.sh
@@ -0,0 +1,151 @@
+#!/usr/bin/env bash
+
+set -eux
+
+source virtualenv.sh
+
+pip install "python-dateutil>=2.1,<2.7.0" jmespath "Jinja2==2.10"
+
+# create boto3 symlinks
+ln -s "$(pwd)/lib/boto" "$(pwd)/lib/boto3"
+ln -s "$(pwd)/lib/boto" "$(pwd)/lib/botocore"
+
+# override boto's import path(s)
+export PYTHONPATH
+PYTHONPATH="$(pwd)/lib:$PYTHONPATH"
+
+#################################################
+# RUN THE SCRIPT
+#################################################
+
+# run the script first
+cat << EOF > "$OUTPUT_DIR/ec2.ini"
+[ec2]
+regions = us-east-1
+cache_path = $(pwd)/.cache
+cache_max_age = 0
+group_by_tag_none = False
+
+[credentials]
+aws_access_key_id = FOO
+aws_secret_acccess_key = BAR
+EOF
+
+ANSIBLE_JINJA2_NATIVE=1 ansible-inventory -vvvv -i ./ec2.sh --list --output="$OUTPUT_DIR/script.out"
+RC=$?
+if [[ $RC != 0 ]]; then
+ exit $RC
+fi
+
+#################################################
+# RUN THE PLUGIN
+#################################################
+
+# run the plugin second
+export ANSIBLE_INVENTORY_ENABLED=aws_ec2
+export ANSIBLE_INVENTORY=test.aws_ec2.yml
+export AWS_ACCESS_KEY_ID=FOO
+export AWS_SECRET_ACCESS_KEY=BAR
+export ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS=never
+
+cat << EOF > "$OUTPUT_DIR/test.aws_ec2.yml"
+plugin: aws_ec2
+cache: False
+use_contrib_script_compatible_sanitization: True
+strict: True
+regions:
+ - us-east-1
+hostnames:
+ - network-interface.addresses.association.public-ip
+ - dns-name
+filters:
+ instance-state-name: running
+compose:
+ # vars that don't exist anymore in any meaningful way
+ ec2_item: undefined | default("")
+ ec2_monitoring: undefined | default("")
+ ec2_previous_state: undefined | default("")
+ ec2_previous_state_code: undefined | default(0)
+ ec2__in_monitoring_element: undefined | default(false)
+ # the following three will be accessible again after #53645
+ ec2_requester_id: undefined | default("")
+ ec2_eventsSet: undefined | default("")
+ ec2_persistent: undefined | default(false)
+
+ # vars that change
+ ansible_host: public_ip_address
+ ec2_block_devices: dict(block_device_mappings | map(attribute='device_name') | map('basename') | list | zip(block_device_mappings | map(attribute='ebs.volume_id') | list))
+ ec2_dns_name: public_dns_name
+ ec2_group_name: placement['group_name']
+ ec2_id: instance_id
+ ec2_instance_profile: iam_instance_profile | default("")
+ ec2_ip_address: public_ip_address
+ ec2_kernel: kernel_id | default("")
+ ec2_monitored: monitoring['state'] in ['enabled', 'pending']
+ ec2_monitoring_state: monitoring['state']
+ ec2_account_id: owner_id
+ ec2_placement: placement['availability_zone']
+ ec2_ramdisk: ramdisk_id | default("")
+ ec2_reason: state_transition_reason
+ ec2_security_group_ids: security_groups | map(attribute='group_id') | list | sort | join(',')
+ ec2_security_group_names: security_groups | map(attribute='group_name') | list | sort | join(',')
+ ec2_state: state['name']
+ ec2_state_code: state['code']
+ ec2_state_reason: state_reason['message'] if state_reason is defined else ""
+ ec2_sourceDestCheck: source_dest_check | lower | string # butchered snake_case case not a typo.
+
+ # vars that just need ec2_ prefix
+ ec2_ami_launch_index: ami_launch_index | string
+ ec2_architecture: architecture
+ ec2_client_token: client_token
+ ec2_ebs_optimized: ebs_optimized
+ ec2_hypervisor: hypervisor
+ ec2_image_id: image_id
+ ec2_instance_type: instance_type
+ ec2_key_name: key_name
+ ec2_launch_time: 'launch_time | regex_replace(" ", "T") | regex_replace("(\+)(\d\d):(\d)(\d)$", ".\g<2>\g<3>Z")'
+ ec2_platform: platform | default("")
+ ec2_private_dns_name: private_dns_name
+ ec2_private_ip_address: private_ip_address
+ ec2_public_dns_name: public_dns_name
+ ec2_region: placement['region']
+ ec2_root_device_name: root_device_name
+ ec2_root_device_type: root_device_type
+ ec2_spot_instance_request_id: spot_instance_request_id | default("")
+ ec2_subnet_id: subnet_id
+ ec2_virtualization_type: virtualization_type
+ ec2_vpc_id: vpc_id
+ tags: dict(tags.keys() | map('regex_replace', '[^A-Za-z0-9\_]', '_') | list | zip(tags.values() | list))
+
+keyed_groups:
+ - key: '"ec2"'
+ separator: ""
+ - key: 'instance_id'
+ separator: ""
+ - key: tags
+ prefix: tag
+ - key: key_name | regex_replace('-', '_')
+ prefix: key
+ - key: placement['region']
+ separator: ""
+ - key: placement['availability_zone']
+ separator: ""
+ - key: platform | default('undefined')
+ prefix: platform
+ - key: vpc_id | regex_replace('-', '_')
+ prefix: vpc_id
+ - key: instance_type
+ prefix: type
+ - key: "image_id | regex_replace('-', '_')"
+ separator: ""
+ - key: security_groups | map(attribute='group_name') | map("regex_replace", "-", "_") | list
+ prefix: security_group
+EOF
+
+ANSIBLE_JINJA2_NATIVE=1 ansible-inventory -vvvv -i "$OUTPUT_DIR/test.aws_ec2.yml" --list --output="$OUTPUT_DIR/plugin.out"
+
+#################################################
+# DIFF THE RESULTS
+#################################################
+
+./inventory_diff.py "$OUTPUT_DIR/script.out" "$OUTPUT_DIR/plugin.out"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_ec2/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_ec2/defaults/main.yml
new file mode 100644
index 00000000..fb1f88b1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_ec2/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+resource_prefix: 'ansible-testing-'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_ec2/tasks/common.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_ec2/tasks/common.yml
new file mode 100644
index 00000000..bf23f539
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_ec2/tasks/common.yml
@@ -0,0 +1,119 @@
+---
+
+# ============================================================
+- name: test with no parameters
+ action: "{{module_name}}"
+ register: result
+ ignore_errors: true
+
+- name: assert failure when called with no parameters
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "missing required arguments: name"'
+
+# ============================================================
+- name: test with only name
+ action: "{{module_name}} name={{ec2_key_name}}"
+ register: result
+ ignore_errors: true
+
+- name: assert failure when called with only 'name'
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "Either region or ec2_url must be specified"'
+
+# ============================================================
+- name: test invalid region parameter
+ action: "{{module_name}} name='{{ec2_key_name}}' region='asdf querty 1234'"
+ register: result
+ ignore_errors: true
+
+- name: assert invalid region parameter
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("value of region must be one of:")'
+
+# ============================================================
+- name: test valid region parameter
+ action: "{{module_name}} name='{{ec2_key_name}}' region='{{ec2_region}}'"
+ register: result
+ ignore_errors: true
+
+- name: assert valid region parameter
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("No handler was ready to authenticate.")'
+
+# ============================================================
+- name: test environment variable EC2_REGION
+ action: "{{module_name}} name='{{ec2_key_name}}'"
+ environment:
+ EC2_REGION: '{{ec2_region}}'
+ register: result
+ ignore_errors: true
+
+- name: assert environment variable EC2_REGION
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("No handler was ready to authenticate.")'
+
+# ============================================================
+- name: test invalid ec2_url parameter
+ action: "{{module_name}} name='{{ec2_key_name}}'"
+ environment:
+ EC2_URL: bogus.example.com
+ register: result
+ ignore_errors: true
+
+- name: assert invalid ec2_url parameter
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("No handler was ready to authenticate.")'
+
+# ============================================================
+- name: test valid ec2_url parameter
+ action: "{{module_name}} name='{{ec2_key_name}}'"
+ environment:
+ EC2_URL: '{{ec2_url}}'
+ register: result
+ ignore_errors: true
+
+- name: assert valid ec2_url parameter
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("No handler was ready to authenticate.")'
+
+# ============================================================
+- name: test credentials from environment
+ action: "{{module_name}} name='{{ec2_key_name}}'"
+ environment:
+ EC2_REGION: '{{ec2_region}}'
+ EC2_ACCESS_KEY: bogus_access_key
+ EC2_SECRET_KEY: bogus_secret_key
+ register: result
+ ignore_errors: true
+
+- name: assert ec2_key with valid ec2_url
+ assert:
+ that:
+ - 'result.failed'
+ - '"EC2ResponseError: 401 Unauthorized" in result.msg'
+
+# ============================================================
+- name: test credential parameters
+ action: "{{module_name}} name='{{ec2_key_name}}' ec2_region='{{ec2_region}}' ec2_access_key=bogus_access_key ec2_secret_key=bogus_secret_key"
+ register: result
+ ignore_errors: true
+
+- name: assert credential parameters
+ assert:
+ that:
+ - 'result.failed'
+ - '"EC2ResponseError: 401 Unauthorized" in result.msg'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_ec2/vars/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_ec2/vars/main.yml
new file mode 100644
index 00000000..3d7209ef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_ec2/vars/main.yml
@@ -0,0 +1,3 @@
+---
+ec2_url: ec2.amazonaws.com
+ec2_region: us-east-1
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml
new file mode 100644
index 00000000..229037c8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml
@@ -0,0 +1,5 @@
+- name: delete temporary directory
+ include_tasks: default-cleanup.yml
+
+- name: delete temporary directory (windows)
+ include_tasks: windows-cleanup.yml
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml
new file mode 100644
index 00000000..39872d74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml
@@ -0,0 +1,5 @@
+- name: delete temporary directory
+ file:
+ path: "{{ remote_tmp_dir }}"
+ state: absent
+ no_log: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml
new file mode 100644
index 00000000..1e0f51b8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml
@@ -0,0 +1,11 @@
+- name: create temporary directory
+ tempfile:
+ state: directory
+ suffix: .test
+ register: remote_tmp_dir
+ notify:
+ - delete temporary directory
+
+- name: record temporary directory
+ set_fact:
+ remote_tmp_dir: "{{ remote_tmp_dir.path }}"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml
new file mode 100644
index 00000000..f8df391b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml
@@ -0,0 +1,10 @@
+- name: make sure we have the ansible_os_family and ansible_distribution_version facts
+ setup:
+ gather_subset: distribution
+ when: ansible_facts == {}
+
+- include_tasks: "{{ lookup('first_found', files)}}"
+ vars:
+ files:
+ - "{{ ansible_os_family | lower }}.yml"
+ - "default.yml"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml
new file mode 100644
index 00000000..32f372d0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml
@@ -0,0 +1,4 @@
+- name: delete temporary directory (windows)
+ ansible.windows.win_file:
+ path: '{{ remote_tmp_dir }}'
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows.yml
new file mode 100644
index 00000000..317c146d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows.yml
@@ -0,0 +1,10 @@
+- name: create temporary directory
+ register: remote_tmp_dir
+ notify:
+ - delete temporary directory (windows)
+ ansible.windows.win_tempfile:
+ state: directory
+ suffix: .test
+- name: record temporary directory
+ set_fact:
+ remote_tmp_dir: '{{ remote_tmp_dir.path }}'
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns/aliases
new file mode 100644
index 00000000..56927195
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns/defaults/main.yml
new file mode 100644
index 00000000..59ef6564
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns/defaults/main.yml
@@ -0,0 +1 @@
+sns_topic_name: "{{ resource_prefix }}-topic"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns/tasks/main.yml
new file mode 100644
index 00000000..bc39b9ce
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns/tasks/main.yml
@@ -0,0 +1,50 @@
+- name: set up AWS connection info
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ collections:
+ - amazon.aws
+
+ block:
+ - name: Create an SNS topic
+ sns_topic:
+ name: "{{ sns_topic_name }}"
+ display_name: "Test topic"
+ register: sns_topic
+
+ - name: Publish to the topic by name
+ sns:
+ topic: "{{ sns_topic_name }}"
+ subject: Test message
+ msg: Default test message
+ http: Test message for HTTP
+ https: Test message for HTTPS
+ email: Test message for email
+ email_json: Test message for email-json
+ sms: Short test message for SMS
+ sqs: Test message for SQS
+ application: Test message for apps
+ lambda: Test message for Lambda
+ register: result
+
+ - name: Check for expected result structure
+ assert:
+ that:
+ - result is not changed
+ - "'message_id' in result"
+
+ - name: Publish to the topic by ARN
+ sns:
+ topic: "{{ sns_topic.sns_arn }}"
+ subject: Second test message
+ msg: Simple test message
+
+ always:
+ - name: Remove topic
+ sns_topic:
+ name: "{{ sns_topic_name }}"
+ state: absent
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/aliases
new file mode 100644
index 00000000..56927195
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/defaults/main.yml
new file mode 100644
index 00000000..4082236c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/defaults/main.yml
@@ -0,0 +1,15 @@
+sns_topic_topic_name: "{{ resource_prefix }}-topic"
+sns_topic_subscriptions:
+ - endpoint: "{{ sns_topic_subscriber_arn }}"
+ protocol: "lambda"
+sns_topic_third_party_topic_arn: "arn:aws:sns:us-east-1:806199016981:AmazonIpSpaceChanged"
+sns_topic_third_party_region: "{{ sns_topic_third_party_topic_arn.split(':')[3] }}"
+sns_topic_lambda_function: "sns_topic_lambda"
+sns_topic_lambda_name: "{{ resource_prefix }}-{{ sns_topic_lambda_function }}"
+# IAM role names have to be less than 64 characters
+# The 8 digit identifier at the end of resource_prefix helps determine during
+# which test something was created and allows tests to be run in parallel
+# Shippable resource_prefixes are in the format shippable-123456-123, so in those cases
+# we need both sets of digits to keep the resource name unique
+unique_id: "{{ resource_prefix | regex_search('(\\d+-?)(\\d+)$') }}"
+sns_topic_lambda_role: "ansible-test-{{ unique_id }}-sns-lambda"
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/files/lambda-trust-policy.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/files/lambda-trust-policy.json
new file mode 100644
index 00000000..fb84ae9d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/files/lambda-trust-policy.json
@@ -0,0 +1,12 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "lambda.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/files/sns_topic_lambda/sns_topic_lambda.py b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/files/sns_topic_lambda/sns_topic_lambda.py
new file mode 100644
index 00000000..98f65783
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/files/sns_topic_lambda/sns_topic_lambda.py
@@ -0,0 +1,9 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def handler(event, context):
+ print(event)
+ return True
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/tasks/main.yml
new file mode 100644
index 00000000..4d494f2e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/tasks/main.yml
@@ -0,0 +1,303 @@
+- module_defaults:
+ group/aws:
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_access_key: '{{ aws_access_key }}'
+ security_token: '{{ security_token|default(omit) }}'
+ region: '{{ aws_region }}'
+ collections:
+ - community.general
+ block:
+ - name: create minimal lambda role
+ iam_role:
+ name: '{{ sns_topic_lambda_role }}'
+ assume_role_policy_document: '{{ lookup("file", "lambda-trust-policy.json") }}'
+ create_instance_profile: false
+ managed_policies:
+ - 'arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess'
+ register: iam_role
+ - name: pause if role was created
+ pause:
+ seconds: 10
+ when: iam_role is changed
+
+ - name: create topic
+ sns_topic:
+ name: '{{ sns_topic_topic_name }}'
+ display_name: My topic name
+ register: sns_topic_create
+ - name: assert that creation worked
+ assert:
+ that:
+ - sns_topic_create.changed
+ - name: set sns_arn fact
+ set_fact:
+ sns_arn: '{{ sns_topic_create.sns_arn }}'
+ - name: create topic again (expect changed=False)
+ sns_topic:
+ name: '{{ sns_topic_topic_name }}'
+ display_name: My topic name
+ register: sns_topic_no_change
+ - name: assert that recreation had no effect
+ assert:
+ that:
+ - not sns_topic_no_change.changed
+ - sns_topic_no_change.sns_arn == sns_topic_create.sns_arn
+ - name: update display name
+ sns_topic:
+ name: '{{ sns_topic_topic_name }}'
+ display_name: My new topic name
+ register: sns_topic_update_name
+ - name: assert that updating name worked
+ assert:
+ that:
+ - sns_topic_update_name.changed
+ - sns_topic_update_name.sns_topic.display_name == "My new topic name"
+ - name: add policy
+ sns_topic:
+ name: '{{ sns_topic_topic_name }}'
+ display_name: My new topic name
+ policy: '{{ lookup(''template'', ''initial-policy.json'') }}'
+ register: sns_topic_add_policy
+ - name: assert that adding policy worked
+ assert:
+ that:
+ - sns_topic_add_policy.changed
+ - name: rerun same policy
+ sns_topic:
+ name: '{{ sns_topic_topic_name }}'
+ display_name: My new topic name
+ policy: '{{ lookup(''template'', ''initial-policy.json'') }}'
+ register: sns_topic_rerun_policy
+ - name: assert that rerunning policy had no effect
+ assert:
+ that:
+ - not sns_topic_rerun_policy.changed
+ - name: update policy
+ sns_topic:
+ name: '{{ sns_topic_topic_name }}'
+ display_name: My new topic name
+ policy: '{{ lookup(''template'', ''updated-policy.json'') }}'
+ register: sns_topic_update_policy
+ - name: assert that updating policy worked
+ assert:
+ that:
+ - sns_topic_update_policy.changed
+ - name: add delivery policy
+ sns_topic:
+ name: '{{ sns_topic_topic_name }}'
+ display_name: My new topic name
+ delivery_policy:
+ http:
+ defaultHealthyRetryPolicy:
+ minDelayTarget: 20
+ maxDelayTarget: 20
+ numRetries: 3
+ numMaxDelayRetries: 0
+ numNoDelayRetries: 0
+ numMinDelayRetries: 0
+ backoffFunction: linear
+ register: sns_topic_add_delivery_policy
+ - name: assert that adding delivery policy worked
+ vars:
+ delivery_policy: '{{ sns_topic_add_delivery_policy.sns_topic.delivery_policy | from_json }}'
+ assert:
+ that:
+ - sns_topic_add_delivery_policy.changed
+ - delivery_policy.http.defaultHealthyRetryPolicy.minDelayTarget == 20
+ - delivery_policy.http.defaultHealthyRetryPolicy.maxDelayTarget == 20
+ - delivery_policy.http.defaultHealthyRetryPolicy.numRetries == 3
+ - name: rerun same delivery policy
+ sns_topic:
+ name: '{{ sns_topic_topic_name }}'
+ display_name: My new topic name
+ delivery_policy:
+ http:
+ defaultHealthyRetryPolicy:
+ minDelayTarget: 20
+ maxDelayTarget: 20
+ numRetries: 3
+ numMaxDelayRetries: 0
+ numNoDelayRetries: 0
+ numMinDelayRetries: 0
+ backoffFunction: linear
+ register: sns_topic_rerun_delivery_policy
+ - name: assert that rerunning delivery_policy had no effect
+ vars:
+ delivery_policy: '{{ sns_topic_rerun_delivery_policy.sns_topic.delivery_policy | from_json }}'
+ assert:
+ that:
+ - not sns_topic_rerun_delivery_policy.changed
+ - delivery_policy.http.defaultHealthyRetryPolicy.minDelayTarget == 20
+ - delivery_policy.http.defaultHealthyRetryPolicy.maxDelayTarget == 20
+ - delivery_policy.http.defaultHealthyRetryPolicy.numRetries == 3
+ - name: rerun a slightly different delivery policy
+ sns_topic:
+ name: '{{ sns_topic_topic_name }}'
+ display_name: My new topic name
+ delivery_policy:
+ http:
+ defaultHealthyRetryPolicy:
+ minDelayTarget: 40
+ maxDelayTarget: 40
+ numRetries: 6
+ numMaxDelayRetries: 0
+ numNoDelayRetries: 0
+ numMinDelayRetries: 0
+ backoffFunction: linear
+ register: sns_topic_rerun_delivery_policy
+ - name: assert that rerunning delivery_policy worked
+ vars:
+ delivery_policy: '{{ sns_topic_rerun_delivery_policy.sns_topic.delivery_policy | from_json }}'
+ assert:
+ that:
+ - sns_topic_rerun_delivery_policy.changed
+ - delivery_policy.http.defaultHealthyRetryPolicy.minDelayTarget == 40
+ - delivery_policy.http.defaultHealthyRetryPolicy.maxDelayTarget == 40
+ - delivery_policy.http.defaultHealthyRetryPolicy.numRetries == 6
+ - name: create temp dir
+ tempfile:
+ state: directory
+ register: tempdir
+ - name: ensure zip file exists
+ archive:
+ path: '{{ lookup(''first_found'', sns_topic_lambda_function) }}'
+ dest: '{{ tempdir.path }}/{{ sns_topic_lambda_function }}.zip'
+ format: zip
+ - name: create lambda for subscribing (only auto-subscribing target available)
+ lambda:
+ name: '{{ sns_topic_lambda_name }}'
+ state: present
+ zip_file: '{{ tempdir.path }}/{{ sns_topic_lambda_function }}.zip'
+ runtime: python2.7
+ role: '{{ sns_topic_lambda_role }}'
+ handler: '{{ sns_topic_lambda_function }}.handler'
+ register: lambda_result
+ - set_fact:
+ sns_topic_subscriber_arn: '{{ lambda_result.configuration.function_arn }}'
+ - name: subscribe to topic
+ sns_topic:
+ name: '{{ sns_topic_topic_name }}'
+ display_name: My new topic name
+ purge_subscriptions: false
+ subscriptions: '{{ sns_topic_subscriptions }}'
+ register: sns_topic_subscribe
+ - name: assert that subscribing worked
+ assert:
+ that:
+ - sns_topic_subscribe.changed
+ - sns_topic_subscribe.sns_topic.subscriptions|length == 1
+ - name: run again with purge_subscriptions set to false
+ sns_topic:
+ name: '{{ sns_topic_topic_name }}'
+ display_name: My new topic name
+ purge_subscriptions: false
+ register: sns_topic_no_purge
+ - name: assert that not purging subscriptions had no effect
+ assert:
+ that:
+ - not sns_topic_no_purge.changed
+ - sns_topic_no_purge.sns_topic.subscriptions|length == 1
+ - name: run again with purge_subscriptions set to true
+ sns_topic:
+ name: '{{ sns_topic_topic_name }}'
+ display_name: My new topic name
+ purge_subscriptions: true
+ register: sns_topic_purge
+ - name: assert that purging subscriptions worked
+ assert:
+ that:
+ - sns_topic_purge.changed
+ - sns_topic_purge.sns_topic.subscriptions|length == 0
+ - name: delete topic
+ sns_topic:
+ name: '{{ sns_topic_topic_name }}'
+ state: absent
+ - name: no-op with third party topic (effectively get existing subscriptions)
+ sns_topic:
+ name: '{{ sns_topic_third_party_topic_arn }}'
+ region: '{{ sns_topic_third_party_region }}'
+ register: third_party_topic
+ - name: subscribe to third party topic
+ sns_topic:
+ name: '{{ sns_topic_third_party_topic_arn }}'
+ subscriptions: '{{ sns_topic_subscriptions }}'
+ region: '{{ sns_topic_third_party_region }}'
+ register: third_party_topic_subscribe
+ - name: assert that subscribing worked
+ assert:
+ that:
+ - third_party_topic_subscribe is changed
+ - (third_party_topic_subscribe.sns_topic.subscriptions|length) - (third_party_topic.sns_topic.subscriptions|length) == 1
+ - name: attempt to change name of third party topic
+ sns_topic:
+ name: '{{ sns_topic_third_party_topic_arn }}'
+ display_name: This should not work
+ subscriptions: '{{ sns_topic_subscriptions }}'
+ region: '{{ sns_topic_third_party_region }}'
+ ignore_errors: true
+ register: third_party_name_change
+ - name: assert that attempting to change display name does not work
+ assert:
+ that:
+ - third_party_name_change is failed
+ - name: unsubscribe from third party topic (purge_subscription defaults to true)
+ sns_topic:
+ name: '{{ sns_topic_third_party_topic_arn }}'
+ subscriptions: '{{ third_party_topic.sns_topic.subscriptions }}'
+ region: '{{ sns_topic_third_party_region }}'
+ register: third_party_unsubscribe
+ - name: assert that unsubscribing from third party topic works
+ assert:
+ that:
+ - third_party_unsubscribe.changed
+ - third_party_topic.sns_topic.subscriptions|length == third_party_unsubscribe.sns_topic.subscriptions|length
+ - name: attempt to delete third party topic
+ sns_topic:
+ name: '{{ sns_topic_third_party_topic_arn }}'
+ state: absent
+ subscriptions: '{{ subscriptions }}'
+ region: '{{ sns_topic_third_party_region }}'
+ ignore_errors: true
+ register: third_party_deletion
+ - name: no-op after third party deletion
+ sns_topic:
+ name: '{{ sns_topic_third_party_topic_arn }}'
+ region: '{{ sns_topic_third_party_region }}'
+ register: third_party_deletion_facts
+ - name: assert that attempting to delete third party topic does not work and preser
+ assert:
+ that:
+ - third_party_deletion is failed
+ - third_party_topic.sns_topic.subscriptions|length == third_party_deletion_facts.sns_topic.subscriptions|length
+ always:
+ - name: announce teardown start
+ debug:
+ msg: '************** TEARDOWN STARTS HERE *******************'
+ - name: remove topic
+ sns_topic:
+ name: '{{ sns_topic_topic_name }}'
+ state: absent
+ ignore_errors: true
+ - name: unsubscribe from third party topic
+ sns_topic:
+ name: '{{ sns_topic_third_party_topic_arn }}'
+ subscriptions: []
+ purge_subscriptions: true
+ region: '{{ sns_topic_third_party_region }}'
+ ignore_errors: true
+ - name: remove lambda
+ lambda:
+ name: '{{ sns_topic_lambda_name }}'
+ state: absent
+ ignore_errors: true
+ - name: remove tempdir
+ file:
+ path: '{{ tempdir.path }}'
+ state: absent
+ when: tempdir is defined
+ ignore_errors: true
+ - name: remove iam role
+ iam_role:
+ name: '{{ sns_topic_lambda_role }}'
+ state: absent
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/templates/initial-policy.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/templates/initial-policy.json
new file mode 100644
index 00000000..235c5995
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/templates/initial-policy.json
@@ -0,0 +1,20 @@
+{
+ "Version":"2012-10-17",
+ "Id":"SomePolicyId",
+ "Statement" :[
+ {
+ "Sid":"Statement1",
+ "Effect":"Allow",
+ "Principal" :{
+ "AWS":"{{ sns_arn.split(':')[4] }}"
+ },
+ "Action":["sns:Subscribe"],
+ "Resource": "{{ sns_arn }}",
+ "Condition" :{
+ "StringEquals" :{
+ "sns:Protocol":"email"
+ }
+ }
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/templates/updated-policy.json b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/templates/updated-policy.json
new file mode 100644
index 00000000..c796bb4d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sns_topic/templates/updated-policy.json
@@ -0,0 +1,20 @@
+{
+ "Version":"2012-10-17",
+ "Id":"SomePolicyId",
+ "Statement" :[
+ {
+ "Sid":"ANewSid",
+ "Effect":"Allow",
+ "Principal" :{
+ "AWS":"{{ sns_arn.split(':')[4] }}"
+ },
+ "Action":["sns:Subscribe"],
+ "Resource": "{{ sns_arn }}",
+ "Condition" :{
+ "StringEquals" :{
+ "sns:Protocol":"email"
+ }
+ }
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sqs_queue/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sqs_queue/aliases
new file mode 100644
index 00000000..6e3860be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sqs_queue/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sqs_queue/defaults/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sqs_queue/defaults/main.yml
new file mode 100644
index 00000000..ed97d539
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sqs_queue/defaults/main.yml
@@ -0,0 +1 @@
+---
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sqs_queue/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sqs_queue/tasks/main.yml
new file mode 100644
index 00000000..b689c9eb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sqs_queue/tasks/main.yml
@@ -0,0 +1,106 @@
+---
+- name: Main test block
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - block:
+ - name: Test creating SQS queue
+ sqs_queue:
+ name: "{{ resource_prefix }}{{ 1000 | random }}"
+ register: create_result
+ - name: Assert SQS queue created
+ assert:
+ that:
+ - create_result.changed
+ - create_result.region == "{{ aws_region }}"
+ always:
+ - name: Test deleting SQS queue
+ sqs_queue:
+ name: "{{ create_result.name }}"
+ state: absent
+ register: delete_result
+ retries: 3
+ delay: 3
+ until: delete_result.changed
+ - name: Assert SQS queue deleted
+ assert:
+ that:
+ - delete_result.changed
+ - name: Test delete SQS queue that doesn't exist
+ sqs_queue:
+ name: "{{ resource_prefix }}{{ 1000 | random }}"
+ state: absent
+ register: delete_result
+ - name: Assert delete non-existant queue returns cleanly
+ assert:
+ that:
+ - delete_result.changed == False
+ - name: Test queue features
+ block:
+ - name: Test create queue with attributes
+ sqs_queue:
+ name: "{{ resource_prefix }}{{ 1000 | random }}"
+ default_visibility_timeout: 900
+ delivery_delay: 900
+ maximum_message_size: 9009
+ message_retention_period: 900
+ receive_message_wait_time: 10
+ policy:
+ Version: "2012-10-17"
+ Statement:
+ Effect: Allow
+ Action: "*"
+ register: create_result
+ - name: Assert queue created with configuration
+ assert:
+ that:
+ - create_result.changed
+ - create_result.default_visibility_timeout == 900
+ - create_result.delivery_delay == 900
+ - create_result.maximum_message_size == 9009
+ - create_result.message_retention_period == 900
+ - create_result.receive_message_wait_time == 10
+ - create_result.policy.Version == "2012-10-17"
+ - create_result.policy.Statement[0].Effect == "Allow"
+ - create_result.policy.Statement[0].Action == "*"
+ always:
+ - name: Cleaning up queue
+ sqs_queue:
+ name: "{{ create_result.name }}"
+ state: absent
+ register: delete_result
+ retries: 3
+ delay: 3
+ until: delete_result.changed
+ - name: Test queue with redrive
+ block:
+ - name: Creating dead letter queue
+ sqs_queue:
+ name: "{{ resource_prefix }}{{ 1000 | random }}"
+ register: dead_letter_queue
+ - name: Test create queue with redrive_policy
+ sqs_queue:
+ name: "{{ resource_prefix }}{{ 1000 | random }}"
+ redrive_policy:
+ maxReceiveCount: 5
+ deadLetterTargetArn: "{{ dead_letter_queue.queue_arn }}"
+ register: create_result
+ - name: Assert queue created with configuration
+ assert:
+ that:
+ - create_result.changed
+ always:
+ - name: Cleaning up queue
+ sqs_queue:
+ name: "{{ item.name }}"
+ state: absent
+ register: delete_result
+ retries: 3
+ delay: 3
+ with_items:
+ - { name: "{{ create_result.name }}" }
+ - { name: "{{ dead_letter_queue.name }}" }
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/aliases b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/aliases
new file mode 100644
index 00000000..6e3860be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/meta/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/tasks/main.yml b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/tasks/main.yml
new file mode 100644
index 00000000..34545493
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/tasks/main.yml
@@ -0,0 +1,384 @@
+---
+# tasks file for sts_assume_role
+
+- block:
+
+ # ============================================================
+ # TODO create simple ansible sts_get_caller_identity module
+ - blockinfile:
+ path: "{{ output_dir }}/sts.py"
+ create: yes
+ block: |
+ #!/usr/bin/env python
+ import boto3
+ sts = boto3.client('sts')
+ response = sts.get_caller_identity()
+ print(response['Account'])
+
+ - name: get the aws account id
+ command: "{{ ansible_python.executable }} '{{ output_dir }}/sts.py'"
+ environment:
+ AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
+ AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
+ AWS_SESSION_TOKEN: "{{ security_token }}"
+ register: result
+
+ - name: register account id
+ set_fact:
+ aws_account: "{{ result.stdout | replace('\n', '') }}"
+
+ # ============================================================
+ - name: create test iam role
+ iam_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ name: "ansible-test-sts-{{ resource_prefix }}"
+ assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}"
+ create_instance_profile: False
+ managed_policy:
+ - arn:aws:iam::aws:policy/IAMReadOnlyAccess
+ state: present
+ register: test_role
+
+ # ============================================================
+ - name: pause to ensure role exists before using
+ pause:
+ seconds: 30
+
+ # ============================================================
+ - name: test with no parameters
+ sts_assume_role:
+ register: result
+ ignore_errors: true
+
+ - name: assert with no parameters
+ assert:
+ that:
+ - 'result.failed'
+ - "'missing required arguments:' in result.msg"
+
+ # ============================================================
+ - name: test with empty parameters
+ sts_assume_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region}}"
+ role_arn:
+ role_session_name:
+ policy:
+ duration_seconds:
+ external_id:
+ mfa_token:
+ mfa_serial_number:
+ register: result
+ ignore_errors: true
+
+ - name: assert with empty parameters
+ assert:
+ that:
+ - 'result.failed'
+ - "'Missing required parameter in input:' in result.msg"
+ when: result.module_stderr is not defined
+
+ - name: assert with empty parameters
+ assert:
+ that:
+ - 'result.failed'
+ - "'Member must have length greater than or equal to 20' in result.module_stderr"
+ when: result.module_stderr is defined
+
+ # ============================================================
+ - name: test with only 'role_arn' parameter
+ sts_assume_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ role_arn: "{{ test_role.iam_role.arn }}"
+ register: result
+ ignore_errors: true
+
+ - name: assert with only 'role_arn' parameter
+ assert:
+ that:
+ - 'result.failed'
+ - "'missing required arguments: role_session_name' in result.msg"
+
+ # ============================================================
+ - name: test with only 'role_session_name' parameter
+ sts_assume_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ role_session_name: "AnsibleTest"
+ register: result
+ ignore_errors: true
+
+ - name: assert with only 'role_session_name' parameter
+ assert:
+ that:
+ - 'result.failed'
+ - "'missing required arguments: role_arn' in result.msg"
+
+ # ============================================================
+ - name: test assume role with invalid policy
+ sts_assume_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ role_arn: "{{ test_role.iam_role.arn }}"
+ role_session_name: "AnsibleTest"
+ policy: "invalid policy"
+ register: result
+ ignore_errors: true
+
+ - name: assert assume role with invalid policy
+ assert:
+ that:
+ - 'result.failed'
+ - "'The policy is not in the valid JSON format.' in result.msg"
+ when: result.module_stderr is not defined
+
+ - name: assert assume role with invalid policy
+ assert:
+ that:
+ - 'result.failed'
+ - "'The policy is not in the valid JSON format.' in result.module_stderr"
+ when: result.module_stderr is defined
+
+ # ============================================================
+ - name: test assume role with invalid duration seconds
+ sts_assume_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region}}"
+ role_arn: "{{ test_role.iam_role.arn }}"
+ role_session_name: AnsibleTest
+ duration_seconds: invalid duration
+ register: result
+ ignore_errors: true
+
+ - name: assert assume role with invalid duration seconds
+ assert:
+ that:
+ - result is failed
+ - 'result.msg is search("argument \w+ is of type <.*> and we were unable to convert to int: <.*> cannot be converted to an int")'
+
+ # ============================================================
+ - name: test assume role with invalid external id
+ sts_assume_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region}}"
+ role_arn: "{{ test_role.iam_role.arn }}"
+ role_session_name: AnsibleTest
+ external_id: invalid external id
+ register: result
+ ignore_errors: true
+
+ - name: assert assume role with invalid external id
+ assert:
+ that:
+ - 'result.failed'
+ - "'Member must satisfy regular expression pattern:' in result.msg"
+ when: result.module_stderr is not defined
+
+ - name: assert assume role with invalid external id
+ assert:
+ that:
+ - 'result.failed'
+ - "'Member must satisfy regular expression pattern:' in result.module_stderr"
+ when: result.module_stderr is defined
+
+ # ============================================================
+ - name: test assume role with invalid mfa serial number
+ sts_assume_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region}}"
+ role_arn: "{{ test_role.iam_role.arn }}"
+ role_session_name: AnsibleTest
+ mfa_serial_number: invalid serial number
+ register: result
+ ignore_errors: true
+
+ - name: assert assume role with invalid mfa serial number
+ assert:
+ that:
+ - 'result.failed'
+ - "'Member must satisfy regular expression pattern:' in result.msg"
+ when: result.module_stderr is not defined
+
+ - name: assert assume role with invalid mfa serial number
+ assert:
+ that:
+ - 'result.failed'
+ - "'Member must satisfy regular expression pattern:' in result.module_stderr"
+ when: result.module_stderr is defined
+
+ # ============================================================
+ - name: test assume role with invalid mfa token code
+ sts_assume_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region}}"
+ role_arn: "{{ test_role.iam_role.arn }}"
+ role_session_name: AnsibleTest
+ mfa_token: invalid token code
+ register: result
+ ignore_errors: true
+
+ - name: assert assume role with invalid mfa token code
+ assert:
+ that:
+ - 'result.failed'
+ - "'Member must satisfy regular expression pattern:' in result.msg"
+ when: result.module_stderr is not defined
+
+ - name: assert assume role with invalid mfa token code
+ assert:
+ that:
+ - 'result.failed'
+ - "'Member must satisfy regular expression pattern:' in result.module_stderr"
+ when: result.module_stderr is defined
+
+ # ============================================================
+ - name: test assume role with invalid role_arn
+ sts_assume_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region}}"
+ role_arn: invalid role arn
+ role_session_name: AnsibleTest
+ register: result
+ ignore_errors: true
+
+ - name: assert assume role with invalid role_arn
+ assert:
+ that:
+ - result.failed
+ - "'Invalid length for parameter RoleArn' in result.msg"
+ when: result.module_stderr is not defined
+
+ - name: assert assume role with invalid role_arn
+ assert:
+ that:
+ - 'result.failed'
+ - "'Member must have length greater than or equal to 20' in result.module_stderr"
+ when: result.module_stderr is defined
+
+ # ============================================================
+ - name: test assume not existing sts role
+ sts_assume_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region}}"
+ role_arn: "arn:aws:iam::123456789:role/non-existing-role"
+ role_session_name: "AnsibleTest"
+ register: result
+ ignore_errors: true
+
+ - name: assert assume not existing sts role
+ assert:
+ that:
+ - 'result.failed'
+ - "'is not authorized to perform: sts:AssumeRole' in result.msg"
+ when: result.module_stderr is not defined
+
+ - name: assert assume not existing sts role
+ assert:
+ that:
+ - 'result.failed'
+ - "'is not authorized to perform: sts:AssumeRole' in result.msg"
+ when: result.module_stderr is defined
+
+ # ============================================================
+ - name: test assume role
+ sts_assume_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ role_arn: "{{ test_role.iam_role.arn }}"
+ role_session_name: AnsibleTest
+ register: assumed_role
+
+ - name: assert assume role
+ assert:
+ that:
+ - 'not assumed_role.failed'
+ - "'sts_creds' in assumed_role"
+ - "'access_key' in assumed_role.sts_creds"
+ - "'secret_key' in assumed_role.sts_creds"
+ - "'session_token' in assumed_role.sts_creds"
+
+ # ============================================================
+ - name: test that assumed credentials have IAM read-only access
+ iam_role:
+ aws_access_key: "{{ assumed_role.sts_creds.access_key }}"
+ aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}"
+ security_token: "{{ assumed_role.sts_creds.session_token }}"
+ region: "{{ aws_region}}"
+ name: "ansible-test-sts-{{ resource_prefix }}"
+ assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}"
+ create_instance_profile: False
+ state: present
+ register: result
+
+ - name: assert assumed role with privileged action (expect changed=false)
+ assert:
+ that:
+ - 'not result.failed'
+ - 'not result.changed'
+ - "'iam_role' in result"
+
+ # ============================================================
+ - name: test assumed role with unprivileged action
+ iam_role:
+ aws_access_key: "{{ assumed_role.sts_creds.access_key }}"
+ aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}"
+ security_token: "{{ assumed_role.sts_creds.session_token }}"
+ region: "{{ aws_region}}"
+ name: "ansible-test-sts-{{ resource_prefix }}-new"
+ assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}"
+ state: present
+ register: result
+ ignore_errors: true
+
+ - name: assert assumed role with unprivileged action (expect changed=false)
+ assert:
+ that:
+ - 'result.failed'
+ - "'is not authorized to perform: iam:CreateRole' in result.msg"
+ # runs on Python2
+ when: result.module_stderr is not defined
+
+ - name: assert assumed role with unprivileged action (expect changed=false)
+ assert:
+ that:
+ - 'result.failed'
+ - "'is not authorized to perform: iam:CreateRole' in result.module_stderr"
+ # runs on Python3
+ when: result.module_stderr is defined
+
+ # ============================================================
+ always:
+
+ - name: delete test iam role
+ iam_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ name: "ansible-test-sts-{{ resource_prefix }}"
+ assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}"
+ managed_policy:
+ - arn:aws:iam::aws:policy/IAMReadOnlyAccess
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/templates/policy.json.j2 b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/templates/policy.json.j2
new file mode 100644
index 00000000..559562fd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/tests/integration/targets/sts_assume_role/templates/policy.json.j2
@@ -0,0 +1,12 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "arn:aws:iam::{{ aws_account }}:root"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+} \ No newline at end of file