summaryrefslogtreecommitdiffstats
path: root/collections-debian-merged/ansible_collections/community/aws/plugins
diff options
context:
space:
mode:
Diffstat (limited to 'collections-debian-merged/ansible_collections/community/aws/plugins')
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/connection/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/connection/aws_ssm.py592
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_acm.py395
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_acm_facts.py299
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_acm_info.py299
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_api_gateway.py368
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_application_scaling_policy.py538
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_batch_compute_environment.py485
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_batch_job_definition.py456
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_batch_job_queue.py307
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_codebuild.py405
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_codecommit.py247
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_codepipeline.py317
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_config_aggregation_authorization.py159
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_config_aggregator.py228
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_config_delivery_channel.py215
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_config_recorder.py209
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_config_rule.py271
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_direct_connect_confirm_connection.py155
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_direct_connect_connection.py344
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_direct_connect_gateway.py369
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_direct_connect_link_aggregation_group.py466
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_direct_connect_virtual_interface.py515
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_eks_cluster.py303
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_elasticbeanstalk_app.py225
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_glue_connection.py335
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_glue_job.py371
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_inspector_target.py246
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_kms.py1076
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_kms_facts.py431
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_kms_info.py431
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_region_facts.py98
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_region_info.py98
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_s3_bucket_facts.py113
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_s3_bucket_info.py113
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_s3_cors.py167
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_secret.py401
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_ses_identity.py543
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_ses_identity_policy.py197
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_ses_rule_set.py251
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_sgw_facts.py359
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_sgw_info.py359
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_ssm_parameter_store.py260
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_step_functions_state_machine.py232
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_step_functions_state_machine_execution.py193
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_waf_condition.py733
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_waf_facts.py146
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_waf_info.py146
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_waf_rule.py357
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_waf_web_acl.py361
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudformation_exports_info.py85
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudformation_stack_set.py725
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py2258
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudfront_facts.py715
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudfront_info.py715
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudfront_invalidation.py274
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudfront_origin_access_identity.py278
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudtrail.py607
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudwatchevent_rule.py460
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudwatchlogs_log_group.py315
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudwatchlogs_log_group_facts.py131
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudwatchlogs_log_group_info.py131
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudwatchlogs_log_group_metric_filter.py222
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/data_pipeline.py638
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/dms_endpoint.py468
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/dms_replication_subnet_group.py234
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/dynamodb_table.py512
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/dynamodb_ttl.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_ami_copy.py223
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_asg.py1805
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_asg_facts.py410
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_asg_info.py410
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_asg_lifecycle_hook.py251
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway.py255
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway_facts.py139
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway_info.py139
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_eip.py638
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_eip_facts.py144
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_eip_info.py144
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_elb.py367
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_elb_facts.py253
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_elb_info.py253
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_instance.py1840
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_instance_facts.py561
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_instance_info.py561
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_launch_template.py707
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_lc.py695
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_lc_facts.py224
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_lc_find.py217
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_lc_info.py224
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_metric_alarm.py406
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_placement_group.py206
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_placement_group_facts.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_placement_group_info.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_scaling_policy.py387
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_snapshot_copy.py195
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway.py573
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_info.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_egress_igw.py188
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_endpoint.py370
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_endpoint_facts.py189
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_endpoint_info.py189
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_igw.py291
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_igw_facts.py175
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_igw_info.py175
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl.py632
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl_facts.py222
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl_info.py222
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_nat_gateway.py1004
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_nat_gateway_facts.py144
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_nat_gateway_info.py144
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_peer.py435
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_peering_facts.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_peering_info.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_route_table.py744
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_route_table_facts.py130
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_route_table_info.py130
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw.py570
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw_facts.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw_info.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py778
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn_facts.py217
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn_info.py217
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_win_password.py212
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_attribute.py303
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_cluster.py227
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_ecr.py568
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_service.py862
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_service_facts.py254
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_service_info.py254
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_tag.py222
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_task.py443
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition.py518
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition_facts.py332
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition_info.py332
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/efs.py752
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/efs_facts.py398
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/efs_info.py398
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/elasticache.py546
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/elasticache_facts.py309
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/elasticache_info.py309
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/elasticache_parameter_group.py343
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/elasticache_snapshot.py215
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/elasticache_subnet_group.py144
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_application_lb.py665
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_application_lb_facts.py286
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_application_lb_info.py286
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_classic_lb.py1354
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_classic_lb_facts.py215
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_classic_lb_info.py215
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_instance.py369
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_network_lb.py468
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_target.py353
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_target_facts.py436
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_target_group.py865
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_target_group_facts.py321
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_target_group_info.py321
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_target_info.py436
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/execute_lambda.py268
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam.py869
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_cert.py313
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_cert_facts.py163
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_group.py436
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_managed_policy.py389
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_mfa_device_facts.py107
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_mfa_device_info.py107
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_password_policy.py212
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_policy.py344
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_policy_info.py217
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_role.py667
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_role_facts.py254
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_role_info.py254
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_saml_federation.py247
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_server_certificate_facts.py163
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_server_certificate_info.py163
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_user.py369
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_user_info.py179
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/kinesis_stream.py1408
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/lambda.py604
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/lambda_alias.py378
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/lambda_event.py432
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/lambda_facts.py389
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/lambda_info.py376
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/lambda_policy.py431
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/lightsail.py337
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds.py1396
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_instance.py1234
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_instance_facts.py408
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_instance_info.py408
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_param_group.py342
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_snapshot.py349
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_snapshot_facts.py393
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_snapshot_info.py393
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_subnet_group.py225
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/redshift.py685
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/redshift_cross_region_snapshots.py202
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/redshift_facts.py351
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/redshift_info.py351
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/redshift_subnet_group.py177
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/route53.py708
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/route53_facts.py492
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/route53_health_check.py369
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/route53_info.py492
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/route53_zone.py436
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/s3_bucket_notification.py259
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/s3_lifecycle.py513
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/s3_logging.py173
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/s3_metrics_configuration.py221
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/s3_sync.py557
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/s3_website.py322
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/sns.py230
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/sns_topic.py525
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/sqs_queue.py475
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/sts_assume_role.py176
-rw-r--r--collections-debian-merged/ansible_collections/community/aws/plugins/modules/sts_session_token.py147
216 files changed, 83208 insertions, 0 deletions
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/connection/__init__.py b/collections-debian-merged/ansible_collections/community/aws/plugins/connection/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/connection/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/connection/aws_ssm.py b/collections-debian-merged/ansible_collections/community/aws/plugins/connection/aws_ssm.py
new file mode 100644
index 00000000..94289eee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/connection/aws_ssm.py
@@ -0,0 +1,592 @@
+# Based on the ssh connection plugin by Michael DeHaan
+#
+# Copyright: (c) 2018, Pat Sharkey <psharkey@cleo.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+author:
+- Pat Sharkey (@psharkey) <psharkey@cleo.com>
+- HanumanthaRao MVL (@hanumantharaomvl) <hanumanth@flux7.com>
+- Gaurav Ashtikar (@gau1991 )<gaurav.ashtikar@flux7.com>
+connection: aws_ssm
+short_description: execute via AWS Systems Manager
+description:
+- This connection plugin allows ansible to execute tasks on an EC2 instance via the aws ssm CLI.
+requirements:
+- The remote EC2 instance must be running the AWS Systems Manager Agent (SSM Agent).
+- The control machine must have the aws session manager plugin installed.
+- The remote EC2 linux instance must have the curl installed.
+options:
+ access_key_id:
+ description: The STS access key to use when connecting via session-manager.
+ vars:
+ - name: ansible_aws_ssm_access_key_id
+ version_added: 1.3.0
+ secret_access_key:
+ description: The STS secret key to use when connecting via session-manager.
+ vars:
+ - name: ansible_aws_ssm_secret_access_key
+ version_added: 1.3.0
+ session_token:
+ description: The STS session token to use when connecting via session-manager.
+ vars:
+ - name: ansible_aws_ssm_session_token
+ version_added: 1.3.0
+ instance_id:
+ description: The EC2 instance ID.
+ vars:
+ - name: ansible_aws_ssm_instance_id
+ region:
+ description: The region the EC2 instance is located.
+ vars:
+ - name: ansible_aws_ssm_region
+ default: 'us-east-1'
+ bucket_name:
+ description: The name of the S3 bucket used for file transfers.
+ vars:
+ - name: ansible_aws_ssm_bucket_name
+ plugin:
+ description: This defines the location of the session-manager-plugin binary.
+ vars:
+ - name: ansible_aws_ssm_plugin
+ default: '/usr/local/bin/session-manager-plugin'
+ retries:
+ description: Number of attempts to connect.
+ default: 3
+ type: integer
+ vars:
+ - name: ansible_aws_ssm_retries
+ ssm_timeout:
+ description: Connection timeout seconds.
+ default: 60
+ type: integer
+ vars:
+ - name: ansible_aws_ssm_timeout
+'''
+
+EXAMPLES = r'''
+
+# Stop Spooler Process on Windows Instances
+- name: Stop Spooler Service on Windows Instances
+ vars:
+ ansible_connection: aws_ssm
+ ansible_shell_type: powershell
+ ansible_aws_ssm_bucket_name: nameofthebucket
+ ansible_aws_ssm_region: us-east-1
+ tasks:
+ - name: Stop spooler service
+ win_service:
+ name: spooler
+ state: stopped
+
+# Install a Nginx Package on Linux Instance
+- name: Install a Nginx Package
+ vars:
+ ansible_connection: aws_ssm
+ ansible_aws_ssm_bucket_name: nameofthebucket
+ ansible_aws_ssm_region: us-west-2
+ tasks:
+ - name: Install a Nginx Package
+ yum:
+ name: nginx
+ state: present
+
+# Create a directory in Windows Instances
+- name: Create a directory in Windows Instance
+ vars:
+ ansible_connection: aws_ssm
+ ansible_shell_type: powershell
+ ansible_aws_ssm_bucket_name: nameofthebucket
+ ansible_aws_ssm_region: us-east-1
+ tasks:
+ - name: Create a Directory
+ win_file:
+ path: C:\Windows\temp
+ state: directory
+
+# Making use of Dynamic Inventory Plugin
+# =======================================
+# aws_ec2.yml (Dynamic Inventory - Linux)
+# This will return the Instance IDs matching the filter
+#plugin: aws_ec2
+#regions:
+# - us-east-1
+#hostnames:
+# - instance-id
+#filters:
+# tag:SSMTag: ssmlinux
+# -----------------------
+- name: install aws-cli
+ hosts: all
+ gather_facts: false
+ vars:
+ ansible_connection: aws_ssm
+ ansible_aws_ssm_bucket_name: nameofthebucket
+ ansible_aws_ssm_region: us-east-1
+ tasks:
+ - name: aws-cli
+ raw: yum install -y awscli
+ tags: aws-cli
+# Execution: ansible-playbook linux.yaml -i aws_ec2.yml
+# The playbook tasks will get executed on the instance ids returned from the dynamic inventory plugin using ssm connection.
+# =====================================================
+# aws_ec2.yml (Dynamic Inventory - Windows)
+#plugin: aws_ec2
+#regions:
+# - us-east-1
+#hostnames:
+# - instance-id
+#filters:
+# tag:SSMTag: ssmwindows
+# -----------------------
+- name: Create a dir.
+ hosts: all
+ gather_facts: false
+ vars:
+ ansible_connection: aws_ssm
+ ansible_shell_type: powershell
+ ansible_aws_ssm_bucket_name: nameofthebucket
+ ansible_aws_ssm_region: us-east-1
+ tasks:
+ - name: Create the directory
+ win_file:
+ path: C:\Temp\SSM_Testing5
+ state: directory
+# Execution: ansible-playbook win_file.yaml -i aws_ec2.yml
+# The playbook tasks will get executed on the instance ids returned from the dynamic inventory plugin using ssm connection.
+'''
+
+import os
+import getpass
+import json
+import os
+import pty
+import random
+import re
+import select
+import string
+import subprocess
+import time
+
+try:
+ import boto3
+ HAS_BOTO_3 = True
+except ImportError as e:
+ HAS_BOTO_3_ERROR = str(e)
+ HAS_BOTO_3 = False
+
+from functools import wraps
+from ansible import constants as C
+from ansible.errors import AnsibleConnectionFailure, AnsibleError, AnsibleFileNotFound
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils.six import PY3
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.plugins.connection import ConnectionBase
+from ansible.plugins.shell.powershell import _common_args
+from ansible.utils.display import Display
+
+display = Display()
+
+
+def _ssm_retry(func):
+ """
+ Decorator to retry in the case of a connection failure
+ Will retry if:
+ * an exception is caught
+ Will not retry if
+ * remaining_tries is <2
+ * retries limit reached
+ """
+ @wraps(func)
+ def wrapped(self, *args, **kwargs):
+ remaining_tries = int(self.get_option('retries')) + 1
+ cmd_summary = "%s..." % args[0]
+ for attempt in range(remaining_tries):
+ cmd = args[0]
+
+ try:
+ return_tuple = func(self, *args, **kwargs)
+ display.vvv(return_tuple, host=self.host)
+ break
+
+ except (AnsibleConnectionFailure, Exception) as e:
+ if attempt == remaining_tries - 1:
+ raise
+ else:
+ pause = 2 ** attempt - 1
+ if pause > 30:
+ pause = 30
+
+ if isinstance(e, AnsibleConnectionFailure):
+ msg = "ssm_retry: attempt: %d, cmd (%s), pausing for %d seconds" % (attempt, cmd_summary, pause)
+ else:
+ msg = "ssm_retry: attempt: %d, caught exception(%s) from cmd (%s), pausing for %d seconds" % (attempt, e, cmd_summary, pause)
+
+ display.vv(msg, host=self.host)
+
+ time.sleep(pause)
+
+ # Do not attempt to reuse the existing session on retries
+ self.close()
+
+ continue
+
+ return return_tuple
+ return wrapped
+
+
+def chunks(lst, n):
+ """Yield successive n-sized chunks from lst."""
+ for i in range(0, len(lst), n):
+ yield lst[i:i + n]
+
+
+class Connection(ConnectionBase):
+ ''' AWS SSM based connections '''
+
+ transport = 'community.aws.aws_ssm'
+ allow_executable = False
+ allow_extras = True
+ has_pipelining = False
+ is_windows = False
+ _client = None
+ _session = None
+ _stdout = None
+ _session_id = ''
+ _timeout = False
+ MARK_LENGTH = 26
+
+ def __init__(self, *args, **kwargs):
+ if not HAS_BOTO_3:
+ raise AnsibleError('{0}: {1}'.format(missing_required_lib("boto3"), HAS_BOTO_3_ERROR))
+
+ super(Connection, self).__init__(*args, **kwargs)
+ self.host = self._play_context.remote_addr
+
+ if getattr(self._shell, "SHELL_FAMILY", '') == 'powershell':
+ self.delegate = None
+ self.has_native_async = True
+ self.always_pipeline_modules = True
+ self.module_implementation_preferences = ('.ps1', '.exe', '')
+ self.protocol = None
+ self.shell_id = None
+ self._shell_type = 'powershell'
+ self.is_windows = True
+
+ def _connect(self):
+ ''' connect to the host via ssm '''
+
+ self._play_context.remote_user = getpass.getuser()
+
+ if not self._session_id:
+ self.start_session()
+ return self
+
+ def start_session(self):
+ ''' start ssm session '''
+
+ if self.get_option('instance_id') is None:
+ self.instance_id = self.host
+ else:
+ self.instance_id = self.get_option('instance_id')
+
+ display.vvv(u"ESTABLISH SSM CONNECTION TO: {0}".format(self.instance_id), host=self.host)
+
+ executable = self.get_option('plugin')
+ if not os.path.exists(to_bytes(executable, errors='surrogate_or_strict')):
+ raise AnsibleError("failed to find the executable specified %s."
+ " Please verify if the executable exists and re-try." % executable)
+
+ profile_name = ''
+ region_name = self.get_option('region')
+ ssm_parameters = dict()
+ client = self._get_boto_client('ssm', region_name=region_name)
+ self._client = client
+ response = client.start_session(Target=self.instance_id, Parameters=ssm_parameters)
+ self._session_id = response['SessionId']
+
+ cmd = [
+ executable,
+ json.dumps(response),
+ region_name,
+ "StartSession",
+ profile_name,
+ json.dumps({"Target": self.instance_id}),
+ client.meta.endpoint_url
+ ]
+
+ display.vvvv(u"SSM COMMAND: {0}".format(to_text(cmd)), host=self.host)
+
+ stdout_r, stdout_w = pty.openpty()
+ session = subprocess.Popen(
+ cmd,
+ stdin=subprocess.PIPE,
+ stdout=stdout_w,
+ stderr=subprocess.PIPE,
+ close_fds=True,
+ bufsize=0,
+ )
+
+ os.close(stdout_w)
+ self._stdout = os.fdopen(stdout_r, 'rb', 0)
+ self._session = session
+ self._poll_stdout = select.poll()
+ self._poll_stdout.register(self._stdout, select.POLLIN)
+
+ # Disable command echo and prompt.
+ self._prepare_terminal()
+
+ display.vvv(u"SSM CONNECTION ID: {0}".format(self._session_id), host=self.host)
+
+ return session
+
+ @_ssm_retry
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ ''' run a command on the ssm host '''
+
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ display.vvv(u"EXEC {0}".format(to_text(cmd)), host=self.host)
+
+ session = self._session
+
+ mark_begin = "".join([random.choice(string.ascii_letters) for i in xrange(self.MARK_LENGTH)])
+ if self.is_windows:
+ mark_start = mark_begin + " $LASTEXITCODE"
+ else:
+ mark_start = mark_begin
+ mark_end = "".join([random.choice(string.ascii_letters) for i in xrange(self.MARK_LENGTH)])
+
+ # Wrap command in markers accordingly for the shell used
+ cmd = self._wrap_command(cmd, sudoable, mark_start, mark_end)
+
+ self._flush_stderr(session)
+
+ for chunk in chunks(cmd, 1024):
+ session.stdin.write(to_bytes(chunk, errors='surrogate_or_strict'))
+
+ # Read stdout between the markers
+ stdout = ''
+ win_line = ''
+ begin = False
+ stop_time = int(round(time.time())) + self.get_option('ssm_timeout')
+ while session.poll() is None:
+ remaining = stop_time - int(round(time.time()))
+ if remaining < 1:
+ self._timeout = True
+ display.vvvv(u"EXEC timeout stdout: {0}".format(to_text(stdout)), host=self.host)
+ raise AnsibleConnectionFailure("SSM exec_command timeout on host: %s"
+ % self.instance_id)
+ if self._poll_stdout.poll(1000):
+ line = self._filter_ansi(self._stdout.readline())
+ display.vvvv(u"EXEC stdout line: {0}".format(to_text(line)), host=self.host)
+ else:
+ display.vvvv(u"EXEC remaining: {0}".format(remaining), host=self.host)
+ continue
+
+ if not begin and self.is_windows:
+ win_line = win_line + line
+ line = win_line
+
+ if mark_start in line:
+ begin = True
+ if not line.startswith(mark_start):
+ stdout = ''
+ continue
+ if begin:
+ if mark_end in line:
+ display.vvvv(u"POST_PROCESS: {0}".format(to_text(stdout)), host=self.host)
+ returncode, stdout = self._post_process(stdout, mark_begin)
+ break
+ else:
+ stdout = stdout + line
+
+ stderr = self._flush_stderr(session)
+
+ return (returncode, stdout, stderr)
+
+ def _prepare_terminal(self):
+ ''' perform any one-time terminal settings '''
+
+ if not self.is_windows:
+ cmd = "stty -echo\n" + "PS1=''\n"
+ cmd = to_bytes(cmd, errors='surrogate_or_strict')
+ self._session.stdin.write(cmd)
+
+ def _wrap_command(self, cmd, sudoable, mark_start, mark_end):
+ ''' wrap command so stdout and status can be extracted '''
+
+ if self.is_windows:
+ if not cmd.startswith(" ".join(_common_args) + " -EncodedCommand"):
+ cmd = self._shell._encode_script(cmd, preserve_rc=True)
+ cmd = cmd + "; echo " + mark_start + "\necho " + mark_end + "\n"
+ else:
+ if sudoable:
+ cmd = "sudo " + cmd
+ cmd = "echo " + mark_start + "\n" + cmd + "\necho $'\\n'$?\n" + "echo " + mark_end + "\n"
+
+ display.vvvv(u"_wrap_command: '{0}'".format(to_text(cmd)), host=self.host)
+ return cmd
+
+ def _post_process(self, stdout, mark_begin):
+ ''' extract command status and strip unwanted lines '''
+
+ if self.is_windows:
+ # Value of $LASTEXITCODE will be the line after the mark
+ trailer = stdout[stdout.rfind(mark_begin):]
+ last_exit_code = trailer.splitlines()[1]
+ if last_exit_code.isdigit:
+ returncode = int(last_exit_code)
+ else:
+ returncode = -1
+ # output to keep will be before the mark
+ stdout = stdout[:stdout.rfind(mark_begin)]
+
+ # If it looks like JSON remove any newlines
+ if stdout.startswith('{'):
+ stdout = stdout.replace('\n', '')
+
+ return (returncode, stdout)
+ else:
+ # Get command return code
+ returncode = int(stdout.splitlines()[-2])
+
+ # Throw away ending lines
+ for x in range(0, 3):
+ stdout = stdout[:stdout.rfind('\n')]
+
+ return (returncode, stdout)
+
+ def _filter_ansi(self, line):
+ ''' remove any ANSI terminal control codes '''
+ line = to_text(line)
+
+ if self.is_windows:
+ osc_filter = re.compile(r'\x1b\][^\x07]*\x07')
+ line = osc_filter.sub('', line)
+ ansi_filter = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]')
+ line = ansi_filter.sub('', line)
+
+ # Replace or strip sequence (at terminal width)
+ line = line.replace('\r\r\n', '\n')
+ if len(line) == 201:
+ line = line[:-1]
+
+ return line
+
+ def _flush_stderr(self, subprocess):
+ ''' read and return stderr with minimal blocking '''
+
+ poll_stderr = select.poll()
+ poll_stderr.register(subprocess.stderr, select.POLLIN)
+ stderr = ''
+
+ while subprocess.poll() is None:
+ if poll_stderr.poll(1):
+ line = subprocess.stderr.readline()
+ display.vvvv(u"stderr line: {0}".format(to_text(line)), host=self.host)
+ stderr = stderr + line
+ else:
+ break
+
+ return stderr
+
+ def _get_url(self, client_method, bucket_name, out_path, http_method):
+ ''' Generate URL for get_object / put_object '''
+ client = self._get_boto_client('s3')
+ return client.generate_presigned_url(client_method, Params={'Bucket': bucket_name, 'Key': out_path}, ExpiresIn=3600, HttpMethod=http_method)
+
+ def _get_boto_client(self, service, region_name=None):
+ ''' Gets a boto3 client based on the STS token '''
+
+ aws_access_key_id = self.get_option('access_key_id')
+ aws_secret_access_key = self.get_option('secret_access_key')
+ aws_session_token = self.get_option('session_token')
+ if aws_access_key_id is None or aws_secret_access_key is None or aws_session_token is None:
+ aws_access_key_id = os.environ.get("AWS_ACCESS_KEY_ID", None)
+ aws_secret_access_key = os.environ.get("AWS_SECRET_ACCESS_KEY", None)
+ aws_session_token = os.environ.get("AWS_SESSION_TOKEN", None)
+ client = boto3.client(
+ service,
+ aws_access_key_id=aws_access_key_id,
+ aws_secret_access_key=aws_secret_access_key,
+ aws_session_token=aws_session_token,
+ region_name=region_name)
+ return client
+
+ @_ssm_retry
+ def _file_transport_command(self, in_path, out_path, ssm_action):
+ ''' transfer a file from using an intermediate S3 bucket '''
+
+ path_unescaped = "{0}/{1}".format(self.instance_id, out_path)
+ s3_path = path_unescaped.replace('\\', '/')
+ bucket_url = 's3://%s/%s' % (self.get_option('bucket_name'), s3_path)
+
+ if self.is_windows:
+ put_command = "Invoke-WebRequest -Method PUT -InFile '%s' -Uri '%s' -UseBasicParsing" % (
+ in_path, self._get_url('put_object', self.get_option('bucket_name'), s3_path, 'PUT'))
+ get_command = "Invoke-WebRequest '%s' -OutFile '%s'" % (
+ self._get_url('get_object', self.get_option('bucket_name'), s3_path, 'GET'), out_path)
+ else:
+ put_command = "curl --request PUT --upload-file '%s' '%s'" % (
+ in_path, self._get_url('put_object', self.get_option('bucket_name'), s3_path, 'PUT'))
+ get_command = "curl '%s' -o '%s'" % (
+ self._get_url('get_object', self.get_option('bucket_name'), s3_path, 'GET'), out_path)
+
+ client = self._get_boto_client('s3')
+ if ssm_action == 'get':
+ (returncode, stdout, stderr) = self.exec_command(put_command, in_data=None, sudoable=False)
+ with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb') as data:
+ client.download_fileobj(self.get_option('bucket_name'), s3_path, data)
+ else:
+ with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as data:
+ client.upload_fileobj(data, self.get_option('bucket_name'), s3_path)
+ (returncode, stdout, stderr) = self.exec_command(get_command, in_data=None, sudoable=False)
+
+ # Remove the files from the bucket after they've been transferred
+ client.delete_object(Bucket=self.get_option('bucket_name'), Key=s3_path)
+
+ # Check the return code
+ if returncode == 0:
+ return (returncode, stdout, stderr)
+ else:
+ raise AnsibleError("failed to transfer file to %s %s:\n%s\n%s" %
+ (to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to remote '''
+
+ super(Connection, self).put_file(in_path, out_path)
+
+ display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self.host)
+ if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
+ raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_native(in_path)))
+
+ return self._file_transport_command(in_path, out_path, 'put')
+
+ def fetch_file(self, in_path, out_path):
+ ''' fetch a file from remote to local '''
+
+ super(Connection, self).fetch_file(in_path, out_path)
+
+ display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self.host)
+ return self._file_transport_command(in_path, out_path, 'get')
+
+ def close(self):
+ ''' terminate the connection '''
+ if self._session_id:
+
+ display.vvv(u"CLOSING SSM CONNECTION TO: {0}".format(self.instance_id), host=self.host)
+ if self._timeout:
+ self._session.terminate()
+ else:
+ cmd = b"\nexit\n"
+ self._session.communicate(cmd)
+
+ display.vvvv(u"TERMINATE SSM SESSION: {0}".format(self._session_id), host=self.host)
+ self._client.terminate_session(SessionId=self._session_id)
+ self._session_id = ''
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/__init__.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_acm.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_acm.py
new file mode 100644
index 00000000..25581db1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_acm.py
@@ -0,0 +1,395 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author:
+# - Matthew Davis <Matthew.Davis.2@team.telstra.com>
+# on behalf of Telstra Corporation Limited
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: aws_acm
+short_description: Upload and delete certificates in the AWS Certificate Manager service
+version_added: 1.0.0
+description:
+ - Import and delete certificates in Amazon Web Service's Certificate Manager (AWS ACM).
+ - >
+ This module does not currently interact with AWS-provided certificates.
+ It currently only manages certificates provided to AWS by the user.
+ - The ACM API allows users to upload multiple certificates for the same domain name,
+ and even multiple identical certificates.
+ This module attempts to restrict such freedoms, to be idempotent, as per the Ansible philosophy.
+ It does this through applying AWS resource "Name" tags to ACM certificates.
+ - >
+ When I(state=present),
+ if there is one certificate in ACM
+ with a C(Name) tag equal to the C(name_tag) parameter,
+ and an identical body and chain,
+ this task will succeed without effect.
+ - >
+ When I(state=present),
+ if there is one certificate in ACM
+ a I(Name) tag equal to the I(name_tag) parameter,
+ and a different body,
+ this task will overwrite that certificate.
+ - >
+ When I(state=present),
+ if there are multiple certificates in ACM
+ with a I(Name) tag equal to the I(name_tag) parameter,
+ this task will fail.
+ - >
+ When I(state=absent) and I(certificate_arn) is defined,
+ this module will delete the ACM resource with that ARN if it exists in this region,
+ and succeed without effect if it doesn't exist.
+ - >
+ When I(state=absent) and I(domain_name) is defined,
+ this module will delete all ACM resources in this AWS region with a corresponding domain name.
+ If there are none, it will succeed without effect.
+ - >
+ When I(state=absent) and I(certificate_arn) is not defined,
+ and I(domain_name) is not defined,
+ this module will delete all ACM resources in this AWS region with a corresponding I(Name) tag.
+ If there are none, it will succeed without effect.
+ - Note that this may not work properly with keys of size 4096 bits, due to a limitation of the ACM API.
+options:
+ certificate:
+ description:
+ - The body of the PEM encoded public certificate.
+ - Required when I(state) is not C(absent).
+ - If your certificate is in a file, use C(lookup('file', 'path/to/cert.pem')).
+ type: str
+
+ certificate_arn:
+ description:
+ - The ARN of a certificate in ACM to delete
+ - Ignored when I(state=present).
+ - If I(state=absent), you must provide one of I(certificate_arn), I(domain_name) or I(name_tag).
+ - >
+ If I(state=absent) and no resource exists with this ARN in this region,
+ the task will succeed with no effect.
+ - >
+ If I(state=absent) and the corresponding resource exists in a different region,
+ this task may report success without deleting that resource.
+ type: str
+ aliases: [arn]
+
+ certificate_chain:
+ description:
+ - The body of the PEM encoded chain for your certificate.
+ - If your certificate chain is in a file, use C(lookup('file', 'path/to/chain.pem')).
+ - Ignored when I(state=absent)
+ type: str
+
+ domain_name:
+ description:
+ - The domain name of the certificate.
+ - >
+ If I(state=absent) and I(domain_name) is specified,
+ this task will delete all ACM certificates with this domain.
+ - Exactly one of I(domain_name), I(name_tag) and I(certificate_arn) must be provided.
+ - >
+ If I(state=present) this must not be specified.
+ (Since the domain name is encoded within the public certificate's body.)
+ type: str
+ aliases: [domain]
+
+ name_tag:
+ description:
+ - The unique identifier for tagging resources using AWS tags, with key I(Name).
+ - This can be any set of characters accepted by AWS for tag values.
+ - >
+ This is to ensure Ansible can treat certificates idempotently,
+ even though the ACM API allows duplicate certificates.
+ - If I(state=preset), this must be specified.
+ - >
+ If I(state=absent), you must provide exactly one of
+ I(certificate_arn), I(domain_name) or I(name_tag).
+ type: str
+ aliases: [name]
+
+ private_key:
+ description:
+ - The body of the PEM encoded private key.
+ - Required when I(state=present).
+ - Ignored when I(state=absent).
+ - If your private key is in a file, use C(lookup('file', 'path/to/key.pem')).
+ type: str
+
+ state:
+ description:
+ - >
+ If I(state=present), the specified public certificate and private key
+ will be uploaded, with I(Name) tag equal to I(name_tag).
+ - >
+ If I(state=absent), any certificates in this region
+ with a corresponding I(domain_name), I(name_tag) or I(certificate_arn)
+ will be deleted.
+ choices: [present, absent]
+ default: present
+ type: str
+requirements:
+ - boto3
+author:
+ - Matthew Davis (@matt-telstra) on behalf of Telstra Corporation Limited
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+
+- name: upload a self-signed certificate
+ community.aws.aws_acm:
+ certificate: "{{ lookup('file', 'cert.pem' ) }}"
+ privateKey: "{{ lookup('file', 'key.pem' ) }}"
+ name_tag: my_cert # to be applied through an AWS tag as "Name":"my_cert"
+ region: ap-southeast-2 # AWS region
+
+- name: create/update a certificate with a chain
+ community.aws.aws_acm:
+ certificate: "{{ lookup('file', 'cert.pem' ) }}"
+ privateKey: "{{ lookup('file', 'key.pem' ) }}"
+ name_tag: my_cert
+ certificate_chain: "{{ lookup('file', 'chain.pem' ) }}"
+ state: present
+ region: ap-southeast-2
+ register: cert_create
+
+- name: print ARN of cert we just created
+ ansible.builtin.debug:
+ var: cert_create.certificate.arn
+
+- name: delete the cert we just created
+ community.aws.aws_acm:
+ name_tag: my_cert
+ state: absent
+ region: ap-southeast-2
+
+- name: delete a certificate with a particular ARN
+ community.aws.aws_acm:
+ certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901"
+ state: absent
+ region: ap-southeast-2
+
+- name: delete all certificates with a particular domain name
+ community.aws.aws_acm:
+ domain_name: acm.ansible.com
+ state: absent
+ region: ap-southeast-2
+
+'''
+
+RETURN = '''
+certificate:
+ description: Information about the certificate which was uploaded
+ type: complex
+ returned: when I(state=present)
+ contains:
+ arn:
+ description: The ARN of the certificate in ACM
+ type: str
+ returned: when I(state=present)
+ sample: "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901"
+ domain_name:
+ description: The domain name encoded within the public certificate
+ type: str
+ returned: when I(state=present)
+ sample: acm.ansible.com
+arns:
+ description: A list of the ARNs of the certificates in ACM which were deleted
+ type: list
+ elements: str
+ returned: when I(state=absent)
+ sample:
+ - "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901"
+'''
+
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.acm import ACMServiceManager
+from ansible.module_utils._text import to_text
+import base64
+import re # regex library
+
+
+# Takes in two text arguments
+# Each a PEM encoded certificate
+# Or a chain of PEM encoded certificates
+# May include some lines between each chain in the cert, e.g. "Subject: ..."
+# Returns True iff the chains/certs are functionally identical (including chain order)
+def chain_compare(module, a, b):
+
+ chain_a_pem = pem_chain_split(module, a)
+ chain_b_pem = pem_chain_split(module, b)
+
+ if len(chain_a_pem) != len(chain_b_pem):
+ return False
+
+ # Chain length is the same
+ for (ca, cb) in zip(chain_a_pem, chain_b_pem):
+ der_a = PEM_body_to_DER(module, ca)
+ der_b = PEM_body_to_DER(module, cb)
+ if der_a != der_b:
+ return False
+
+ return True
+
+
+# Takes in PEM encoded data with no headers
+# returns equivilent DER as byte array
+def PEM_body_to_DER(module, pem):
+ try:
+ der = base64.b64decode(to_text(pem))
+ except (ValueError, TypeError) as e:
+ module.fail_json_aws(e, msg="Unable to decode certificate chain")
+ return der
+
+
+# Store this globally to avoid repeated recompilation
+pem_chain_split_regex = re.compile(r"------?BEGIN [A-Z0-9. ]*CERTIFICATE------?([a-zA-Z0-9\+\/=\s]+)------?END [A-Z0-9. ]*CERTIFICATE------?")
+
+
+# Use regex to split up a chain or single cert into an array of base64 encoded data
+# Using "-----BEGIN CERTIFICATE-----" and "----END CERTIFICATE----"
+# Noting that some chains have non-pem data in between each cert
+# This function returns only what's between the headers, excluding the headers
+def pem_chain_split(module, pem):
+
+ pem_arr = re.findall(pem_chain_split_regex, to_text(pem))
+
+ if len(pem_arr) == 0:
+ # This happens if the regex doesn't match at all
+ module.fail_json(msg="Unable to split certificate chain. Possibly zero-length chain?")
+
+ return pem_arr
+
+
+def main():
+ argument_spec = dict(
+ certificate=dict(),
+ certificate_arn=dict(aliases=['arn']),
+ certificate_chain=dict(),
+ domain_name=dict(aliases=['domain']),
+ name_tag=dict(aliases=['name']),
+ private_key=dict(no_log=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ required_if = [
+ ['state', 'present', ['certificate', 'name_tag', 'private_key']],
+ ]
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if)
+ acm = ACMServiceManager(module)
+
+ # Check argument requirements
+ if module.params['state'] == 'present':
+ if module.params['certificate_arn']:
+ module.fail_json(msg="Parameter 'certificate_arn' is only valid if parameter 'state' is specified as 'absent'")
+ else: # absent
+ # exactly one of these should be specified
+ absent_args = ['certificate_arn', 'domain_name', 'name_tag']
+ if sum([(module.params[a] is not None) for a in absent_args]) != 1:
+ for a in absent_args:
+ module.debug("%s is %s" % (a, module.params[a]))
+ module.fail_json(msg="If 'state' is specified as 'absent' then exactly one of 'name_tag', certificate_arn' or 'domain_name' must be specified")
+
+ if module.params['name_tag']:
+ tags = dict(Name=module.params['name_tag'])
+ else:
+ tags = None
+
+ client = module.client('acm')
+
+ # fetch the list of certificates currently in ACM
+ certificates = acm.get_certificates(client=client,
+ module=module,
+ domain_name=module.params['domain_name'],
+ arn=module.params['certificate_arn'],
+ only_tags=tags)
+
+ module.debug("Found %d corresponding certificates in ACM" % len(certificates))
+
+ if module.params['state'] == 'present':
+ if len(certificates) > 1:
+ msg = "More than one certificate with Name=%s exists in ACM in this region" % module.params['name_tag']
+ module.fail_json(msg=msg, certificates=certificates)
+ elif len(certificates) == 1:
+ # update the existing certificate
+ module.debug("Existing certificate found in ACM")
+ old_cert = certificates[0] # existing cert in ACM
+ if ('tags' not in old_cert) or ('Name' not in old_cert['tags']) or (old_cert['tags']['Name'] != module.params['name_tag']):
+ # shouldn't happen
+ module.fail_json(msg="Internal error, unsure which certificate to update", certificate=old_cert)
+
+ if 'certificate' not in old_cert:
+ # shouldn't happen
+ module.fail_json(msg="Internal error, unsure what the existing cert in ACM is", certificate=old_cert)
+
+ # Are the existing certificate in ACM and the local certificate the same?
+ same = True
+ same &= chain_compare(module, old_cert['certificate'], module.params['certificate'])
+ if module.params['certificate_chain']:
+ # Need to test this
+ # not sure if Amazon appends the cert itself to the chain when self-signed
+ same &= chain_compare(module, old_cert['certificate_chain'], module.params['certificate_chain'])
+ else:
+ # When there is no chain with a cert
+ # it seems Amazon returns the cert itself as the chain
+ same &= chain_compare(module, old_cert['certificate_chain'], module.params['certificate'])
+
+ if same:
+ module.debug("Existing certificate in ACM is the same, doing nothing")
+ domain = acm.get_domain_of_cert(client=client, module=module, arn=old_cert['certificate_arn'])
+ module.exit_json(certificate=dict(domain_name=domain, arn=old_cert['certificate_arn']), changed=False)
+ else:
+ module.debug("Existing certificate in ACM is different, overwriting")
+
+ # update cert in ACM
+ arn = acm.import_certificate(client, module,
+ certificate=module.params['certificate'],
+ private_key=module.params['private_key'],
+ certificate_chain=module.params['certificate_chain'],
+ arn=old_cert['certificate_arn'],
+ tags=tags)
+ domain = acm.get_domain_of_cert(client=client, module=module, arn=arn)
+ module.exit_json(certificate=dict(domain_name=domain, arn=arn), changed=True)
+ else: # len(certificates) == 0
+ module.debug("No certificate in ACM. Creating new one.")
+ arn = acm.import_certificate(client=client,
+ module=module,
+ certificate=module.params['certificate'],
+ private_key=module.params['private_key'],
+ certificate_chain=module.params['certificate_chain'],
+ tags=tags)
+ domain = acm.get_domain_of_cert(client=client, module=module, arn=arn)
+
+ module.exit_json(certificate=dict(domain_name=domain, arn=arn), changed=True)
+
+ else: # state == absent
+ for cert in certificates:
+ acm.delete_certificate(client, module, cert['certificate_arn'])
+ module.exit_json(arns=[cert['certificate_arn'] for cert in certificates],
+ changed=(len(certificates) > 0))
+
+
+if __name__ == '__main__':
+ # tests()
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_acm_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_acm_facts.py
new file mode 100644
index 00000000..97d9a879
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_acm_facts.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: aws_acm_info
+short_description: Retrieve certificate information from AWS Certificate Manager service
+version_added: 1.0.0
+description:
+ - Retrieve information for ACM certificates
+ - This module was called C(aws_acm_facts) before Ansible 2.9. The usage did not change.
+ - Note that this will not return information about uploaded keys of size 4096 bits, due to a limitation of the ACM API.
+options:
+ certificate_arn:
+ description:
+ - If provided, the results will be filtered to show only the certificate with this ARN.
+ - If no certificate with this ARN exists, this task will fail.
+ - If a certificate with this ARN exists in a different region, this task will fail
+ aliases:
+ - arn
+ type: str
+ domain_name:
+ description:
+ - The domain name of an ACM certificate to limit the search to
+ aliases:
+ - name
+ type: str
+ statuses:
+ description:
+ - Status to filter the certificate results
+ choices: ['PENDING_VALIDATION', 'ISSUED', 'INACTIVE', 'EXPIRED', 'VALIDATION_TIMED_OUT', 'REVOKED', 'FAILED']
+ type: list
+ elements: str
+ tags:
+ description:
+ - Filter results to show only certificates with tags that match all the tags specified here.
+ type: dict
+requirements:
+ - boto3
+author:
+ - Will Thames (@willthames)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+- name: obtain all ACM certificates
+ community.aws.aws_acm_info:
+
+- name: obtain all information for a single ACM certificate
+ community.aws.aws_acm_info:
+ domain_name: "*.example_com"
+
+- name: obtain all certificates pending validation
+ community.aws.aws_acm_info:
+ statuses:
+ - PENDING_VALIDATION
+
+- name: obtain all certificates with tag Name=foo and myTag=bar
+ community.aws.aws_acm_info:
+ tags:
+ Name: foo
+ myTag: bar
+
+
+# The output is still a list of certificates, just one item long.
+- name: obtain information about a certificate with a particular ARN
+ community.aws.aws_acm_info:
+ certificate_arn: "arn:aws:acm:ap-southeast-2:123456789876:certificate/abcdeabc-abcd-1234-4321-abcdeabcde12"
+
+'''
+
+RETURN = r'''
+certificates:
+ description: A list of certificates
+ returned: always
+ type: complex
+ contains:
+ certificate:
+ description: The ACM Certificate body
+ returned: when certificate creation is complete
+ sample: '-----BEGIN CERTIFICATE-----\\nMII.....-----END CERTIFICATE-----\\n'
+ type: str
+ certificate_arn:
+ description: Certificate ARN
+ returned: always
+ sample: arn:aws:acm:ap-southeast-2:123456789012:certificate/abcd1234-abcd-1234-abcd-123456789abc
+ type: str
+ certificate_chain:
+ description: Full certificate chain for the certificate
+ returned: when certificate creation is complete
+ sample: '-----BEGIN CERTIFICATE-----\\nMII...\\n-----END CERTIFICATE-----\\n-----BEGIN CERTIFICATE-----\\n...'
+ type: str
+ created_at:
+ description: Date certificate was created
+ returned: always
+ sample: '2017-08-15T10:31:19+10:00'
+ type: str
+ domain_name:
+ description: Domain name for the certificate
+ returned: always
+ sample: '*.example.com'
+ type: str
+ domain_validation_options:
+ description: Options used by ACM to validate the certificate
+ returned: when certificate type is AMAZON_ISSUED
+ type: complex
+ contains:
+ domain_name:
+ description: Fully qualified domain name of the certificate
+ returned: always
+ sample: example.com
+ type: str
+ validation_domain:
+ description: The domain name ACM used to send validation emails
+ returned: always
+ sample: example.com
+ type: str
+ validation_emails:
+ description: A list of email addresses that ACM used to send domain validation emails
+ returned: always
+ sample:
+ - admin@example.com
+ - postmaster@example.com
+ type: list
+ elements: str
+ validation_status:
+ description: Validation status of the domain
+ returned: always
+ sample: SUCCESS
+ type: str
+ failure_reason:
+ description: Reason certificate request failed
+ returned: only when certificate issuing failed
+ type: str
+ sample: NO_AVAILABLE_CONTACTS
+ in_use_by:
+ description: A list of ARNs for the AWS resources that are using the certificate.
+ returned: always
+ sample: []
+ type: list
+ elements: str
+ issued_at:
+ description: Date certificate was issued
+ returned: always
+ sample: '2017-01-01T00:00:00+10:00'
+ type: str
+ issuer:
+ description: Issuer of the certificate
+ returned: always
+ sample: Amazon
+ type: str
+ key_algorithm:
+ description: Algorithm used to generate the certificate
+ returned: always
+ sample: RSA-2048
+ type: str
+ not_after:
+ description: Date after which the certificate is not valid
+ returned: always
+ sample: '2019-01-01T00:00:00+10:00'
+ type: str
+ not_before:
+ description: Date before which the certificate is not valid
+ returned: always
+ sample: '2017-01-01T00:00:00+10:00'
+ type: str
+ renewal_summary:
+ description: Information about managed renewal process
+ returned: when certificate is issued by Amazon and a renewal has been started
+ type: complex
+ contains:
+ domain_validation_options:
+ description: Options used by ACM to validate the certificate
+ returned: when certificate type is AMAZON_ISSUED
+ type: complex
+ contains:
+ domain_name:
+ description: Fully qualified domain name of the certificate
+ returned: always
+ sample: example.com
+ type: str
+ validation_domain:
+ description: The domain name ACM used to send validation emails
+ returned: always
+ sample: example.com
+ type: str
+ validation_emails:
+ description: A list of email addresses that ACM used to send domain validation emails
+ returned: always
+ sample:
+ - admin@example.com
+ - postmaster@example.com
+ type: list
+ elements: str
+ validation_status:
+ description: Validation status of the domain
+ returned: always
+ sample: SUCCESS
+ type: str
+ renewal_status:
+ description: Status of the domain renewal
+ returned: always
+ sample: PENDING_AUTO_RENEWAL
+ type: str
+ revocation_reason:
+ description: Reason for certificate revocation
+ returned: when the certificate has been revoked
+ sample: SUPERCEDED
+ type: str
+ revoked_at:
+ description: Date certificate was revoked
+ returned: when the certificate has been revoked
+ sample: '2017-09-01T10:00:00+10:00'
+ type: str
+ serial:
+ description: The serial number of the certificate
+ returned: always
+ sample: 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f
+ type: str
+ signature_algorithm:
+ description: Algorithm used to sign the certificate
+ returned: always
+ sample: SHA256WITHRSA
+ type: str
+ status:
+ description: Status of the certificate in ACM
+ returned: always
+ sample: ISSUED
+ type: str
+ subject:
+ description: The name of the entity that is associated with the public key contained in the certificate
+ returned: always
+ sample: CN=*.example.com
+ type: str
+ subject_alternative_names:
+ description: Subject Alternative Names for the certificate
+ returned: always
+ sample:
+ - '*.example.com'
+ type: list
+ elements: str
+ tags:
+ description: Tags associated with the certificate
+ returned: always
+ type: dict
+ sample:
+ Application: helloworld
+ Environment: test
+ type:
+ description: The source of the certificate
+ returned: always
+ sample: AMAZON_ISSUED
+ type: str
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.acm import ACMServiceManager
+
+
+def main():
+ argument_spec = dict(
+ certificate_arn=dict(aliases=['arn']),
+ domain_name=dict(aliases=['name']),
+ statuses=dict(
+ type='list',
+ elements='str',
+ choices=['PENDING_VALIDATION', 'ISSUED', 'INACTIVE', 'EXPIRED', 'VALIDATION_TIMED_OUT', 'REVOKED', 'FAILED']
+ ),
+ tags=dict(type='dict'),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ acm_info = ACMServiceManager(module)
+
+ if module._name == 'aws_acm_facts':
+ module.deprecate("The 'aws_acm_facts' module has been renamed to 'aws_acm_info'", date='2021-12-01', collection_name='community.aws')
+
+ client = module.client('acm')
+
+ certificates = acm_info.get_certificates(client, module,
+ domain_name=module.params['domain_name'],
+ statuses=module.params['statuses'],
+ arn=module.params['certificate_arn'],
+ only_tags=module.params['tags'])
+
+ if module.params['certificate_arn'] and len(certificates) != 1:
+ module.fail_json(msg="No certificate exists in this region with ARN %s" % module.params['certificate_arn'])
+
+ module.exit_json(certificates=certificates)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_acm_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_acm_info.py
new file mode 100644
index 00000000..97d9a879
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_acm_info.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: aws_acm_info
+short_description: Retrieve certificate information from AWS Certificate Manager service
+version_added: 1.0.0
+description:
+ - Retrieve information for ACM certificates
+ - This module was called C(aws_acm_facts) before Ansible 2.9. The usage did not change.
+ - Note that this will not return information about uploaded keys of size 4096 bits, due to a limitation of the ACM API.
+options:
+ certificate_arn:
+ description:
+ - If provided, the results will be filtered to show only the certificate with this ARN.
+ - If no certificate with this ARN exists, this task will fail.
+ - If a certificate with this ARN exists in a different region, this task will fail
+ aliases:
+ - arn
+ type: str
+ domain_name:
+ description:
+ - The domain name of an ACM certificate to limit the search to
+ aliases:
+ - name
+ type: str
+ statuses:
+ description:
+ - Status to filter the certificate results
+ choices: ['PENDING_VALIDATION', 'ISSUED', 'INACTIVE', 'EXPIRED', 'VALIDATION_TIMED_OUT', 'REVOKED', 'FAILED']
+ type: list
+ elements: str
+ tags:
+ description:
+ - Filter results to show only certificates with tags that match all the tags specified here.
+ type: dict
+requirements:
+ - boto3
+author:
+ - Will Thames (@willthames)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+- name: obtain all ACM certificates
+ community.aws.aws_acm_info:
+
+- name: obtain all information for a single ACM certificate
+ community.aws.aws_acm_info:
+ domain_name: "*.example_com"
+
+- name: obtain all certificates pending validation
+ community.aws.aws_acm_info:
+ statuses:
+ - PENDING_VALIDATION
+
+- name: obtain all certificates with tag Name=foo and myTag=bar
+ community.aws.aws_acm_info:
+ tags:
+ Name: foo
+ myTag: bar
+
+
+# The output is still a list of certificates, just one item long.
+- name: obtain information about a certificate with a particular ARN
+ community.aws.aws_acm_info:
+ certificate_arn: "arn:aws:acm:ap-southeast-2:123456789876:certificate/abcdeabc-abcd-1234-4321-abcdeabcde12"
+
+'''
+
+RETURN = r'''
+certificates:
+ description: A list of certificates
+ returned: always
+ type: complex
+ contains:
+ certificate:
+ description: The ACM Certificate body
+ returned: when certificate creation is complete
+ sample: '-----BEGIN CERTIFICATE-----\\nMII.....-----END CERTIFICATE-----\\n'
+ type: str
+ certificate_arn:
+ description: Certificate ARN
+ returned: always
+ sample: arn:aws:acm:ap-southeast-2:123456789012:certificate/abcd1234-abcd-1234-abcd-123456789abc
+ type: str
+ certificate_chain:
+ description: Full certificate chain for the certificate
+ returned: when certificate creation is complete
+ sample: '-----BEGIN CERTIFICATE-----\\nMII...\\n-----END CERTIFICATE-----\\n-----BEGIN CERTIFICATE-----\\n...'
+ type: str
+ created_at:
+ description: Date certificate was created
+ returned: always
+ sample: '2017-08-15T10:31:19+10:00'
+ type: str
+ domain_name:
+ description: Domain name for the certificate
+ returned: always
+ sample: '*.example.com'
+ type: str
+ domain_validation_options:
+ description: Options used by ACM to validate the certificate
+ returned: when certificate type is AMAZON_ISSUED
+ type: complex
+ contains:
+ domain_name:
+ description: Fully qualified domain name of the certificate
+ returned: always
+ sample: example.com
+ type: str
+ validation_domain:
+ description: The domain name ACM used to send validation emails
+ returned: always
+ sample: example.com
+ type: str
+ validation_emails:
+ description: A list of email addresses that ACM used to send domain validation emails
+ returned: always
+ sample:
+ - admin@example.com
+ - postmaster@example.com
+ type: list
+ elements: str
+ validation_status:
+ description: Validation status of the domain
+ returned: always
+ sample: SUCCESS
+ type: str
+ failure_reason:
+ description: Reason certificate request failed
+ returned: only when certificate issuing failed
+ type: str
+ sample: NO_AVAILABLE_CONTACTS
+ in_use_by:
+ description: A list of ARNs for the AWS resources that are using the certificate.
+ returned: always
+ sample: []
+ type: list
+ elements: str
+ issued_at:
+ description: Date certificate was issued
+ returned: always
+ sample: '2017-01-01T00:00:00+10:00'
+ type: str
+ issuer:
+ description: Issuer of the certificate
+ returned: always
+ sample: Amazon
+ type: str
+ key_algorithm:
+ description: Algorithm used to generate the certificate
+ returned: always
+ sample: RSA-2048
+ type: str
+ not_after:
+ description: Date after which the certificate is not valid
+ returned: always
+ sample: '2019-01-01T00:00:00+10:00'
+ type: str
+ not_before:
+ description: Date before which the certificate is not valid
+ returned: always
+ sample: '2017-01-01T00:00:00+10:00'
+ type: str
+ renewal_summary:
+ description: Information about managed renewal process
+ returned: when certificate is issued by Amazon and a renewal has been started
+ type: complex
+ contains:
+ domain_validation_options:
+ description: Options used by ACM to validate the certificate
+ returned: when certificate type is AMAZON_ISSUED
+ type: complex
+ contains:
+ domain_name:
+ description: Fully qualified domain name of the certificate
+ returned: always
+ sample: example.com
+ type: str
+ validation_domain:
+ description: The domain name ACM used to send validation emails
+ returned: always
+ sample: example.com
+ type: str
+ validation_emails:
+ description: A list of email addresses that ACM used to send domain validation emails
+ returned: always
+ sample:
+ - admin@example.com
+ - postmaster@example.com
+ type: list
+ elements: str
+ validation_status:
+ description: Validation status of the domain
+ returned: always
+ sample: SUCCESS
+ type: str
+ renewal_status:
+ description: Status of the domain renewal
+ returned: always
+ sample: PENDING_AUTO_RENEWAL
+ type: str
+ revocation_reason:
+ description: Reason for certificate revocation
+ returned: when the certificate has been revoked
+ sample: SUPERCEDED
+ type: str
+ revoked_at:
+ description: Date certificate was revoked
+ returned: when the certificate has been revoked
+ sample: '2017-09-01T10:00:00+10:00'
+ type: str
+ serial:
+ description: The serial number of the certificate
+ returned: always
+ sample: 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f
+ type: str
+ signature_algorithm:
+ description: Algorithm used to sign the certificate
+ returned: always
+ sample: SHA256WITHRSA
+ type: str
+ status:
+ description: Status of the certificate in ACM
+ returned: always
+ sample: ISSUED
+ type: str
+ subject:
+ description: The name of the entity that is associated with the public key contained in the certificate
+ returned: always
+ sample: CN=*.example.com
+ type: str
+ subject_alternative_names:
+ description: Subject Alternative Names for the certificate
+ returned: always
+ sample:
+ - '*.example.com'
+ type: list
+ elements: str
+ tags:
+ description: Tags associated with the certificate
+ returned: always
+ type: dict
+ sample:
+ Application: helloworld
+ Environment: test
+ type:
+ description: The source of the certificate
+ returned: always
+ sample: AMAZON_ISSUED
+ type: str
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.acm import ACMServiceManager
+
+
+def main():
+ argument_spec = dict(
+ certificate_arn=dict(aliases=['arn']),
+ domain_name=dict(aliases=['name']),
+ statuses=dict(
+ type='list',
+ elements='str',
+ choices=['PENDING_VALIDATION', 'ISSUED', 'INACTIVE', 'EXPIRED', 'VALIDATION_TIMED_OUT', 'REVOKED', 'FAILED']
+ ),
+ tags=dict(type='dict'),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ acm_info = ACMServiceManager(module)
+
+ if module._name == 'aws_acm_facts':
+ module.deprecate("The 'aws_acm_facts' module has been renamed to 'aws_acm_info'", date='2021-12-01', collection_name='community.aws')
+
+ client = module.client('acm')
+
+ certificates = acm_info.get_certificates(client, module,
+ domain_name=module.params['domain_name'],
+ statuses=module.params['statuses'],
+ arn=module.params['certificate_arn'],
+ only_tags=module.params['tags'])
+
+ if module.params['certificate_arn'] and len(certificates) != 1:
+ module.fail_json(msg="No certificate exists in this region with ARN %s" % module.params['certificate_arn'])
+
+ module.exit_json(certificates=certificates)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_api_gateway.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_api_gateway.py
new file mode 100644
index 00000000..f7466b1d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_api_gateway.py
@@ -0,0 +1,368 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_api_gateway
+version_added: 1.0.0
+short_description: Manage AWS API Gateway APIs
+description:
+ - Allows for the management of API Gateway APIs.
+ - Normally you should give the api_id since there is no other
+ stable guaranteed unique identifier for the API. If you do
+ not give api_id then a new API will be created each time
+ this is run.
+ - Beware that there are very hard limits on the rate that
+ you can call API Gateway's REST API. You may need to patch
+ your boto. See U(https://github.com/boto/boto3/issues/876)
+ and discuss it with your AWS rep.
+ - swagger_file and swagger_text are passed directly on to AWS
+ transparently whilst swagger_dict is an ansible dict which is
+ converted to JSON before the API definitions are uploaded.
+requirements: [ boto3 ]
+options:
+ api_id:
+ description:
+ - The ID of the API you want to manage.
+ type: str
+ state:
+ description: Create or delete API Gateway.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ swagger_file:
+ description:
+ - JSON or YAML file containing swagger definitions for API.
+ Exactly one of I(swagger_file), I(swagger_text) or I(swagger_dict) must
+ be present.
+ type: path
+ aliases: ['src', 'api_file']
+ swagger_text:
+ description:
+ - Swagger definitions for API in JSON or YAML as a string direct
+ from playbook.
+ type: str
+ swagger_dict:
+ description:
+ - Swagger definitions API ansible dictionary which will be
+ converted to JSON and uploaded.
+ type: json
+ stage:
+ description:
+ - The name of the stage the API should be deployed to.
+ type: str
+ deploy_desc:
+ description:
+ - Description of the deployment.
+ - Recorded and visible in the AWS console.
+ default: Automatic deployment by Ansible.
+ type: str
+ cache_enabled:
+ description:
+ - Enable API GW caching of backend responses.
+ type: bool
+ default: false
+ cache_size:
+ description:
+ - Size in GB of the API GW cache, becomes effective when cache_enabled is true.
+ choices: ['0.5', '1.6', '6.1', '13.5', '28.4', '58.2', '118', '237']
+ type: str
+ default: '0.5'
+ stage_variables:
+ description:
+ - ENV variables for the stage. Define a dict of key values pairs for variables.
+ type: dict
+ stage_canary_settings:
+ description:
+ - Canary settings for the deployment of the stage.
+ - 'Dict with following settings:'
+ - 'C(percentTraffic): The percent (0-100) of traffic diverted to a canary deployment.'
+ - 'C(deploymentId): The ID of the canary deployment.'
+ - 'C(stageVariableOverrides): Stage variables overridden for a canary release deployment.'
+ - 'C(useStageCache): A Boolean flag to indicate whether the canary deployment uses the stage cache or not.'
+ - See docs U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/apigateway.html#APIGateway.Client.create_stage)
+ type: dict
+ tracing_enabled:
+ description:
+ - Specifies whether active tracing with X-ray is enabled for the API GW stage.
+ type: bool
+ default: false
+ endpoint_type:
+ description:
+ - Type of endpoint configuration.
+ - Use C(EDGE) for an edge optimized API endpoint,
+ C(REGIONAL) for just a regional deploy or C(PRIVATE) for a private API.
+ - This flag will only be used when creating a new API Gateway setup, not for updates.
+ choices: ['EDGE', 'REGIONAL', 'PRIVATE']
+ type: str
+ default: EDGE
+author:
+ - 'Michael De La Rue (@mikedlr)'
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+notes:
+ - A future version of this module will probably use tags or another
+ ID so that an API can be created only once.
+ - As an early work around an intermediate version will probably do
+ the same using a tag embedded in the API name.
+
+'''
+
+EXAMPLES = '''
+- name: Setup AWS API Gateway setup on AWS and deploy API definition
+ community.aws.aws_api_gateway:
+ swagger_file: my_api.yml
+ stage: production
+ cache_enabled: true
+ cache_size: '1.6'
+ tracing_enabled: true
+ endpoint_type: EDGE
+ state: present
+
+- name: Update API definition to deploy new version
+ community.aws.aws_api_gateway:
+ api_id: 'abc123321cba'
+ swagger_file: my_api.yml
+ deploy_desc: Make auth fix available.
+ cache_enabled: true
+ cache_size: '1.6'
+ endpoint_type: EDGE
+ state: present
+
+- name: Update API definitions and settings and deploy as canary
+ community.aws.aws_api_gateway:
+ api_id: 'abc123321cba'
+ swagger_file: my_api.yml
+ cache_enabled: true
+ cache_size: '6.1'
+ canary_settings: { percentTraffic: 50.0, deploymentId: '123', useStageCache: True }
+ state: present
+'''
+
+RETURN = '''
+api_id:
+ description: API id of the API endpoint created
+ returned: success
+ type: str
+ sample: '0ln4zq7p86'
+configure_response:
+ description: AWS response from the API configure call
+ returned: success
+ type: dict
+ sample: { api_key_source: "HEADER", created_at: "2020-01-01T11:37:59+00:00", id: "0ln4zq7p86" }
+deploy_response:
+ description: AWS response from the API deploy call
+ returned: success
+ type: dict
+ sample: { created_date: "2020-01-01T11:36:59+00:00", id: "rptv4b", description: "Automatic deployment by Ansible." }
+resource_actions:
+ description: Actions performed against AWS API
+ returned: always
+ type: list
+ sample: ["apigateway:CreateRestApi", "apigateway:CreateDeployment", "apigateway:PutRestApi"]
+'''
+
+import json
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+import traceback
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def main():
+ argument_spec = dict(
+ api_id=dict(type='str', required=False),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ swagger_file=dict(type='path', default=None, aliases=['src', 'api_file']),
+ swagger_dict=dict(type='json', default=None),
+ swagger_text=dict(type='str', default=None),
+ stage=dict(type='str', default=None),
+ deploy_desc=dict(type='str', default="Automatic deployment by Ansible."),
+ cache_enabled=dict(type='bool', default=False),
+ cache_size=dict(type='str', default='0.5', choices=['0.5', '1.6', '6.1', '13.5', '28.4', '58.2', '118', '237']),
+ stage_variables=dict(type='dict', default={}),
+ stage_canary_settings=dict(type='dict', default={}),
+ tracing_enabled=dict(type='bool', default=False),
+ endpoint_type=dict(type='str', default='EDGE', choices=['EDGE', 'REGIONAL', 'PRIVATE'])
+ )
+
+ mutually_exclusive = [['swagger_file', 'swagger_dict', 'swagger_text']] # noqa: F841
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=False,
+ mutually_exclusive=mutually_exclusive,
+ )
+
+ api_id = module.params.get('api_id')
+ state = module.params.get('state') # noqa: F841
+ swagger_file = module.params.get('swagger_file')
+ swagger_dict = module.params.get('swagger_dict')
+ swagger_text = module.params.get('swagger_text')
+ endpoint_type = module.params.get('endpoint_type')
+
+ client = module.client('apigateway')
+
+ changed = True # for now it will stay that way until we can sometimes avoid change
+ conf_res = None
+ dep_res = None
+ del_res = None
+
+ if state == "present":
+ if api_id is None:
+ api_id = create_empty_api(module, client, endpoint_type)
+ api_data = get_api_definitions(module, swagger_file=swagger_file,
+ swagger_dict=swagger_dict, swagger_text=swagger_text)
+ conf_res, dep_res = ensure_api_in_correct_state(module, client, api_id, api_data)
+ if state == "absent":
+ del_res = delete_rest_api(module, client, api_id)
+
+ exit_args = {"changed": changed, "api_id": api_id}
+
+ if conf_res is not None:
+ exit_args['configure_response'] = camel_dict_to_snake_dict(conf_res)
+ if dep_res is not None:
+ exit_args['deploy_response'] = camel_dict_to_snake_dict(dep_res)
+ if del_res is not None:
+ exit_args['delete_response'] = camel_dict_to_snake_dict(del_res)
+
+ module.exit_json(**exit_args)
+
+
+def get_api_definitions(module, swagger_file=None, swagger_dict=None, swagger_text=None):
+ apidata = None
+ if swagger_file is not None:
+ try:
+ with open(swagger_file) as f:
+ apidata = f.read()
+ except OSError as e:
+ msg = "Failed trying to read swagger file {0}: {1}".format(str(swagger_file), str(e))
+ module.fail_json(msg=msg, exception=traceback.format_exc())
+ if swagger_dict is not None:
+ apidata = json.dumps(swagger_dict)
+ if swagger_text is not None:
+ apidata = swagger_text
+
+ if apidata is None:
+ module.fail_json(msg='module error - no swagger info provided')
+ return apidata
+
+
+def create_empty_api(module, client, endpoint_type):
+ """
+ creates a new empty API ready to be configured. The description is
+ temporarily set to show the API as incomplete but should be
+ updated when the API is configured.
+ """
+ desc = "Incomplete API creation by ansible aws_api_gateway module"
+ try:
+ awsret = create_api(client, name="ansible-temp-api", description=desc, endpoint_type=endpoint_type)
+ except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
+ module.fail_json_aws(e, msg="creating API")
+ return awsret["id"]
+
+
+def delete_rest_api(module, client, api_id):
+ """
+ Deletes entire REST API setup
+ """
+ try:
+ delete_response = delete_api(client, api_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
+ module.fail_json_aws(e, msg="deleting API {0}".format(api_id))
+ return delete_response
+
+
+def ensure_api_in_correct_state(module, client, api_id, api_data):
+ """Make sure that we have the API configured and deployed as instructed.
+
+ This function first configures the API correctly uploading the
+ swagger definitions and then deploys those. Configuration and
+ deployment should be closely tied because there is only one set of
+ definitions so if we stop, they may be updated by someone else and
+ then we deploy the wrong configuration.
+ """
+
+ configure_response = None
+ try:
+ configure_response = configure_api(client, api_id, api_data=api_data)
+ except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
+ module.fail_json_aws(e, msg="configuring API {0}".format(api_id))
+
+ deploy_response = None
+
+ stage = module.params.get('stage')
+ if stage:
+ try:
+ deploy_response = create_deployment(client, api_id, **module.params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
+ msg = "deploying api {0} to stage {1}".format(api_id, stage)
+ module.fail_json_aws(e, msg)
+
+ return configure_response, deploy_response
+
+
+retry_params = {"retries": 10, "delay": 10, "catch_extra_error_codes": ['TooManyRequestsException']}
+
+
+@AWSRetry.jittered_backoff(**retry_params)
+def create_api(client, name=None, description=None, endpoint_type=None):
+ return client.create_rest_api(name="ansible-temp-api", description=description, endpointConfiguration={'types': [endpoint_type]})
+
+
+@AWSRetry.jittered_backoff(**retry_params)
+def delete_api(client, api_id):
+ return client.delete_rest_api(restApiId=api_id)
+
+
+@AWSRetry.jittered_backoff(**retry_params)
+def configure_api(client, api_id, api_data=None, mode="overwrite"):
+ return client.put_rest_api(restApiId=api_id, mode=mode, body=api_data)
+
+
+@AWSRetry.jittered_backoff(**retry_params)
+def create_deployment(client, rest_api_id, **params):
+ canary_settings = params.get('stage_canary_settings')
+
+ if canary_settings and len(canary_settings) > 0:
+ result = client.create_deployment(
+ restApiId=rest_api_id,
+ stageName=params.get('stage'),
+ description=params.get('deploy_desc'),
+ cacheClusterEnabled=params.get('cache_enabled'),
+ cacheClusterSize=params.get('cache_size'),
+ variables=params.get('stage_variables'),
+ canarySettings=canary_settings,
+ tracingEnabled=params.get('tracing_enabled')
+ )
+ else:
+ result = client.create_deployment(
+ restApiId=rest_api_id,
+ stageName=params.get('stage'),
+ description=params.get('deploy_desc'),
+ cacheClusterEnabled=params.get('cache_enabled'),
+ cacheClusterSize=params.get('cache_size'),
+ variables=params.get('stage_variables'),
+ tracingEnabled=params.get('tracing_enabled')
+ )
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_application_scaling_policy.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_application_scaling_policy.py
new file mode 100644
index 00000000..dcc8b8b1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_application_scaling_policy.py
@@ -0,0 +1,538 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_application_scaling_policy
+version_added: 1.0.0
+short_description: Manage Application Auto Scaling Scaling Policies
+notes:
+ - for details of the parameters and returns see
+ U(http://boto3.readthedocs.io/en/latest/reference/services/application-autoscaling.html#ApplicationAutoScaling.Client.put_scaling_policy)
+description:
+ - Creates, updates or removes a Scaling Policy.
+author:
+ - Gustavo Maia (@gurumaia)
+ - Chen Leibovich (@chenl87)
+requirements: [ json, botocore, boto3 ]
+options:
+ state:
+ description: Whether a policy should be C(present) or C(absent).
+ required: yes
+ choices: ['absent', 'present']
+ type: str
+ policy_name:
+ description: The name of the scaling policy.
+ required: yes
+ type: str
+ service_namespace:
+ description: The namespace of the AWS service.
+ required: yes
+ choices: ['ecs', 'elasticmapreduce', 'ec2', 'appstream', 'dynamodb']
+ type: str
+ resource_id:
+ description: The identifier of the resource associated with the scalable target.
+ required: yes
+ type: str
+ scalable_dimension:
+ description: The scalable dimension associated with the scalable target.
+ required: yes
+ choices: [ 'ecs:service:DesiredCount',
+ 'ec2:spot-fleet-request:TargetCapacity',
+ 'elasticmapreduce:instancegroup:InstanceCount',
+ 'appstream:fleet:DesiredCapacity',
+ 'dynamodb:table:ReadCapacityUnits',
+ 'dynamodb:table:WriteCapacityUnits',
+ 'dynamodb:index:ReadCapacityUnits',
+ 'dynamodb:index:WriteCapacityUnits']
+ type: str
+ policy_type:
+ description: The policy type.
+ required: yes
+ choices: ['StepScaling', 'TargetTrackingScaling']
+ type: str
+ step_scaling_policy_configuration:
+ description: A step scaling policy. This parameter is required if you are creating a policy and I(policy_type=StepScaling).
+ required: no
+ type: dict
+ target_tracking_scaling_policy_configuration:
+ description:
+ - A target tracking policy. This parameter is required if you are creating a new policy and I(policy_type=TargetTrackingScaling).
+ - 'Full documentation of the suboptions can be found in the API documentation:'
+ - 'U(https://docs.aws.amazon.com/autoscaling/application/APIReference/API_TargetTrackingScalingPolicyConfiguration.html)'
+ required: no
+ type: dict
+ suboptions:
+ CustomizedMetricSpecification:
+ description: The metric to use if using a customized metric.
+ type: dict
+ DisableScaleIn:
+ description: Whether scaling-in should be disabled.
+ type: bool
+ PredefinedMetricSpecification:
+ description: The metric to use if using a predefined metric.
+ type: dict
+ ScaleInCooldown:
+ description: The time (in seconds) to wait after scaling-in before another scaling action can occur.
+ type: int
+ ScaleOutCooldown:
+ description: The time (in seconds) to wait after scaling-out before another scaling action can occur.
+ type: int
+ TargetValue:
+ description: The target value for the metric.
+ type: float
+ minimum_tasks:
+ description: The minimum value to scale to in response to a scale in event.
+ This parameter is required if you are creating a first new policy for the specified service.
+ required: no
+ type: int
+ maximum_tasks:
+ description: The maximum value to scale to in response to a scale out event.
+ This parameter is required if you are creating a first new policy for the specified service.
+ required: no
+ type: int
+ override_task_capacity:
+ description:
+ - Whether or not to override values of minimum and/or maximum tasks if it's already set.
+ - Defaults to C(false).
+ required: no
+ type: bool
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create step scaling policy for ECS Service
+- name: scaling_policy
+ community.aws.aws_application_scaling_policy:
+ state: present
+ policy_name: test_policy
+ service_namespace: ecs
+ resource_id: service/poc-pricing/test-as
+ scalable_dimension: ecs:service:DesiredCount
+ policy_type: StepScaling
+ minimum_tasks: 1
+ maximum_tasks: 6
+ step_scaling_policy_configuration:
+ AdjustmentType: ChangeInCapacity
+ StepAdjustments:
+ - MetricIntervalUpperBound: 123
+ ScalingAdjustment: 2
+ - MetricIntervalLowerBound: 123
+ ScalingAdjustment: -2
+ Cooldown: 123
+ MetricAggregationType: Average
+
+# Create target tracking scaling policy for ECS Service
+- name: scaling_policy
+ community.aws.aws_application_scaling_policy:
+ state: present
+ policy_name: test_policy
+ service_namespace: ecs
+ resource_id: service/poc-pricing/test-as
+ scalable_dimension: ecs:service:DesiredCount
+ policy_type: TargetTrackingScaling
+ minimum_tasks: 1
+ maximum_tasks: 6
+ target_tracking_scaling_policy_configuration:
+ TargetValue: 60
+ PredefinedMetricSpecification:
+ PredefinedMetricType: ECSServiceAverageCPUUtilization
+ ScaleOutCooldown: 60
+ ScaleInCooldown: 60
+
+# Remove scalable target for ECS Service
+- name: scaling_policy
+ community.aws.aws_application_scaling_policy:
+ state: absent
+ policy_name: test_policy
+ policy_type: StepScaling
+ service_namespace: ecs
+ resource_id: service/cluster-name/service-name
+ scalable_dimension: ecs:service:DesiredCount
+'''
+
+RETURN = '''
+alarms:
+ description: List of the CloudWatch alarms associated with the scaling policy
+ returned: when state present
+ type: complex
+ contains:
+ alarm_arn:
+ description: The Amazon Resource Name (ARN) of the alarm
+ returned: when state present
+ type: str
+ alarm_name:
+ description: The name of the alarm
+ returned: when state present
+ type: str
+service_namespace:
+ description: The namespace of the AWS service.
+ returned: when state present
+ type: str
+ sample: ecs
+resource_id:
+ description: The identifier of the resource associated with the scalable target.
+ returned: when state present
+ type: str
+ sample: service/cluster-name/service-name
+scalable_dimension:
+ description: The scalable dimension associated with the scalable target.
+ returned: when state present
+ type: str
+ sample: ecs:service:DesiredCount
+policy_arn:
+ description: The Amazon Resource Name (ARN) of the scaling policy..
+ returned: when state present
+ type: str
+policy_name:
+ description: The name of the scaling policy.
+ returned: when state present
+ type: str
+policy_type:
+ description: The policy type.
+ returned: when state present
+ type: str
+min_capacity:
+ description: The minimum value to scale to in response to a scale in event. Required if I(state) is C(present).
+ returned: when state present
+ type: int
+ sample: 1
+max_capacity:
+ description: The maximum value to scale to in response to a scale out event. Required if I(state) is C(present).
+ returned: when state present
+ type: int
+ sample: 2
+role_arn:
+ description: The ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf. Required if I(state) is C(present).
+ returned: when state present
+ type: str
+ sample: arn:aws:iam::123456789123:role/roleName
+step_scaling_policy_configuration:
+ description: The step scaling policy.
+ returned: when state present and the policy type is StepScaling
+ type: complex
+ contains:
+ adjustment_type:
+ description: The adjustment type
+ returned: when state present and the policy type is StepScaling
+ type: str
+ sample: "ChangeInCapacity, PercentChangeInCapacity, ExactCapacity"
+ cooldown:
+ description: The amount of time, in seconds, after a scaling activity completes
+ where previous trigger-related scaling activities can influence future scaling events
+ returned: when state present and the policy type is StepScaling
+ type: int
+ sample: 60
+ metric_aggregation_type:
+ description: The aggregation type for the CloudWatch metrics
+ returned: when state present and the policy type is StepScaling
+ type: str
+ sample: "Average, Minimum, Maximum"
+ step_adjustments:
+ description: A set of adjustments that enable you to scale based on the size of the alarm breach
+ returned: when state present and the policy type is StepScaling
+ type: list
+ elements: dict
+target_tracking_scaling_policy_configuration:
+ description: The target tracking policy.
+ returned: when state present and the policy type is TargetTrackingScaling
+ type: complex
+ contains:
+ predefined_metric_specification:
+ description: A predefined metric
+ returned: when state present and the policy type is TargetTrackingScaling
+ type: complex
+ contains:
+ predefined_metric_type:
+ description: The metric type
+ returned: when state present and the policy type is TargetTrackingScaling
+ type: str
+ sample: "ECSServiceAverageCPUUtilization, ECSServiceAverageMemoryUtilization"
+ resource_label:
+ description: Identifies the resource associated with the metric type
+ returned: when metric type is ALBRequestCountPerTarget
+ type: str
+ scale_in_cooldown:
+ description: The amount of time, in seconds, after a scale in activity completes before another scale in activity can start
+ returned: when state present and the policy type is TargetTrackingScaling
+ type: int
+ sample: 60
+ scale_out_cooldown:
+ description: The amount of time, in seconds, after a scale out activity completes before another scale out activity can start
+ returned: when state present and the policy type is TargetTrackingScaling
+ type: int
+ sample: 60
+ target_value:
+ description: The target value for the metric
+ returned: when state present and the policy type is TargetTrackingScaling
+ type: int
+ sample: 70
+creation_time:
+ description: The Unix timestamp for when the scalable target was created.
+ returned: when state present
+ type: str
+ sample: '2017-09-28T08:22:51.881000-03:00'
+''' # NOQA
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import _camel_to_snake, camel_dict_to_snake_dict
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+
+# Merge the results of the scalable target creation and policy deletion/creation
+# There's no risk in overriding values since mutual keys have the same values in our case
+def merge_results(scalable_target_result, policy_result):
+ if scalable_target_result['changed'] or policy_result['changed']:
+ changed = True
+ else:
+ changed = False
+
+ merged_response = scalable_target_result['response'].copy()
+ merged_response.update(policy_result['response'])
+
+ return {"changed": changed, "response": merged_response}
+
+
+def delete_scaling_policy(connection, module):
+ changed = False
+ try:
+ scaling_policy = connection.describe_scaling_policies(
+ ServiceNamespace=module.params.get('service_namespace'),
+ ResourceId=module.params.get('resource_id'),
+ ScalableDimension=module.params.get('scalable_dimension'),
+ PolicyNames=[module.params.get('policy_name')],
+ MaxResults=1
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe scaling policies")
+
+ if scaling_policy['ScalingPolicies']:
+ try:
+ connection.delete_scaling_policy(
+ ServiceNamespace=module.params.get('service_namespace'),
+ ResourceId=module.params.get('resource_id'),
+ ScalableDimension=module.params.get('scalable_dimension'),
+ PolicyName=module.params.get('policy_name'),
+ )
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to delete scaling policy")
+
+ return {"changed": changed}
+
+
+def create_scalable_target(connection, module):
+ changed = False
+
+ try:
+ scalable_targets = connection.describe_scalable_targets(
+ ServiceNamespace=module.params.get('service_namespace'),
+ ResourceIds=[
+ module.params.get('resource_id'),
+ ],
+ ScalableDimension=module.params.get('scalable_dimension')
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe scalable targets")
+
+ # Scalable target registration will occur if:
+ # 1. There is no scalable target registered for this service
+ # 2. A scalable target exists, different min/max values are defined and override is set to "yes"
+ if (
+ not scalable_targets['ScalableTargets']
+ or (
+ module.params.get('override_task_capacity')
+ and (
+ scalable_targets['ScalableTargets'][0]['MinCapacity'] != module.params.get('minimum_tasks')
+ or scalable_targets['ScalableTargets'][0]['MaxCapacity'] != module.params.get('maximum_tasks')
+ )
+ )
+ ):
+ changed = True
+ try:
+ connection.register_scalable_target(
+ ServiceNamespace=module.params.get('service_namespace'),
+ ResourceId=module.params.get('resource_id'),
+ ScalableDimension=module.params.get('scalable_dimension'),
+ MinCapacity=module.params.get('minimum_tasks'),
+ MaxCapacity=module.params.get('maximum_tasks')
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to register scalable target")
+
+ try:
+ response = connection.describe_scalable_targets(
+ ServiceNamespace=module.params.get('service_namespace'),
+ ResourceIds=[
+ module.params.get('resource_id'),
+ ],
+ ScalableDimension=module.params.get('scalable_dimension')
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe scalable targets")
+
+ if (response['ScalableTargets']):
+ snaked_response = camel_dict_to_snake_dict(response['ScalableTargets'][0])
+ else:
+ snaked_response = {}
+
+ return {"changed": changed, "response": snaked_response}
+
+
+def create_scaling_policy(connection, module):
+ try:
+ scaling_policy = connection.describe_scaling_policies(
+ ServiceNamespace=module.params.get('service_namespace'),
+ ResourceId=module.params.get('resource_id'),
+ ScalableDimension=module.params.get('scalable_dimension'),
+ PolicyNames=[module.params.get('policy_name')],
+ MaxResults=1
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe scaling policies")
+
+ changed = False
+
+ if scaling_policy['ScalingPolicies']:
+ scaling_policy = scaling_policy['ScalingPolicies'][0]
+ # check if the input parameters are equal to what's already configured
+ for attr in ('PolicyName',
+ 'ServiceNamespace',
+ 'ResourceId',
+ 'ScalableDimension',
+ 'PolicyType',
+ 'StepScalingPolicyConfiguration',
+ 'TargetTrackingScalingPolicyConfiguration'):
+ if attr in scaling_policy and scaling_policy[attr] != module.params.get(_camel_to_snake(attr)):
+ changed = True
+ scaling_policy[attr] = module.params.get(_camel_to_snake(attr))
+ else:
+ changed = True
+ scaling_policy = {
+ 'PolicyName': module.params.get('policy_name'),
+ 'ServiceNamespace': module.params.get('service_namespace'),
+ 'ResourceId': module.params.get('resource_id'),
+ 'ScalableDimension': module.params.get('scalable_dimension'),
+ 'PolicyType': module.params.get('policy_type'),
+ 'StepScalingPolicyConfiguration': module.params.get('step_scaling_policy_configuration'),
+ 'TargetTrackingScalingPolicyConfiguration': module.params.get('target_tracking_scaling_policy_configuration')
+ }
+
+ if changed:
+ try:
+ if (module.params.get('step_scaling_policy_configuration')):
+ connection.put_scaling_policy(
+ PolicyName=scaling_policy['PolicyName'],
+ ServiceNamespace=scaling_policy['ServiceNamespace'],
+ ResourceId=scaling_policy['ResourceId'],
+ ScalableDimension=scaling_policy['ScalableDimension'],
+ PolicyType=scaling_policy['PolicyType'],
+ StepScalingPolicyConfiguration=scaling_policy['StepScalingPolicyConfiguration']
+ )
+ elif (module.params.get('target_tracking_scaling_policy_configuration')):
+ connection.put_scaling_policy(
+ PolicyName=scaling_policy['PolicyName'],
+ ServiceNamespace=scaling_policy['ServiceNamespace'],
+ ResourceId=scaling_policy['ResourceId'],
+ ScalableDimension=scaling_policy['ScalableDimension'],
+ PolicyType=scaling_policy['PolicyType'],
+ TargetTrackingScalingPolicyConfiguration=scaling_policy['TargetTrackingScalingPolicyConfiguration']
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to create scaling policy")
+
+ try:
+ response = connection.describe_scaling_policies(
+ ServiceNamespace=module.params.get('service_namespace'),
+ ResourceId=module.params.get('resource_id'),
+ ScalableDimension=module.params.get('scalable_dimension'),
+ PolicyNames=[module.params.get('policy_name')],
+ MaxResults=1
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe scaling policies")
+
+ if (response['ScalingPolicies']):
+ snaked_response = camel_dict_to_snake_dict(response['ScalingPolicies'][0])
+ else:
+ snaked_response = {}
+
+ return {"changed": changed, "response": snaked_response}
+
+
+def main():
+ argument_spec = dict(
+ state=dict(type='str', required=True, choices=['present', 'absent']),
+ policy_name=dict(type='str', required=True),
+ service_namespace=dict(type='str', required=True, choices=['appstream', 'dynamodb', 'ec2', 'ecs', 'elasticmapreduce']),
+ resource_id=dict(type='str', required=True),
+ scalable_dimension=dict(type='str',
+ required=True,
+ choices=['ecs:service:DesiredCount',
+ 'ec2:spot-fleet-request:TargetCapacity',
+ 'elasticmapreduce:instancegroup:InstanceCount',
+ 'appstream:fleet:DesiredCapacity',
+ 'dynamodb:table:ReadCapacityUnits',
+ 'dynamodb:table:WriteCapacityUnits',
+ 'dynamodb:index:ReadCapacityUnits',
+ 'dynamodb:index:WriteCapacityUnits']),
+ policy_type=dict(type='str', required=True, choices=['StepScaling', 'TargetTrackingScaling']),
+ step_scaling_policy_configuration=dict(type='dict'),
+ target_tracking_scaling_policy_configuration=dict(
+ type='dict',
+ options=dict(
+ CustomizedMetricSpecification=dict(type='dict'),
+ DisableScaleIn=dict(type='bool'),
+ PredefinedMetricSpecification=dict(type='dict'),
+ ScaleInCooldown=dict(type='int'),
+ ScaleOutCooldown=dict(type='int'),
+ TargetValue=dict(type='float'),
+ )
+ ),
+ minimum_tasks=dict(type='int'),
+ maximum_tasks=dict(type='int'),
+ override_task_capacity=dict(type='bool'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ connection = module.client('application-autoscaling')
+
+ # Remove any target_tracking_scaling_policy_configuration suboptions that are None
+ policy_config_options = [
+ 'CustomizedMetricSpecification', 'DisableScaleIn', 'PredefinedMetricSpecification', 'ScaleInCooldown', 'ScaleOutCooldown', 'TargetValue'
+ ]
+ if isinstance(module.params['target_tracking_scaling_policy_configuration'], dict):
+ for option in policy_config_options:
+ if module.params['target_tracking_scaling_policy_configuration'][option] is None:
+ module.params['target_tracking_scaling_policy_configuration'].pop(option)
+
+ if module.params.get("state") == 'present':
+ # A scalable target must be registered prior to creating a scaling policy
+ scalable_target_result = create_scalable_target(connection, module)
+ policy_result = create_scaling_policy(connection, module)
+ # Merge the results of the scalable target creation and policy deletion/creation
+ # There's no risk in overriding values since mutual keys have the same values in our case
+ merged_result = merge_results(scalable_target_result, policy_result)
+ module.exit_json(**merged_result)
+ else:
+ policy_result = delete_scaling_policy(connection, module)
+ module.exit_json(**policy_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_batch_compute_environment.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_batch_compute_environment.py
new file mode 100644
index 00000000..39ff11e2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_batch_compute_environment.py
@@ -0,0 +1,485 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Jon Meran <jonathan.meran@sonos.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: aws_batch_compute_environment
+version_added: 1.0.0
+short_description: Manage AWS Batch Compute Environments
+description:
+ - This module allows the management of AWS Batch Compute Environments.
+ - It is idempotent and supports "Check" mode.
+ - Use module M(community.aws.aws_batch_compute_environment) to manage the compute
+ environment, M(community.aws.aws_batch_job_queue) to manage job queues, M(community.aws.aws_batch_job_definition) to manage job definitions.
+
+
+author: Jon Meran (@jonmer85)
+options:
+ compute_environment_name:
+ description:
+ - The name for your compute environment.
+ - Up to 128 letters (uppercase and lowercase), numbers, and underscores are allowed.
+ required: true
+ type: str
+ type:
+ description:
+ - The type of the compute environment.
+ required: true
+ choices: ["MANAGED", "UNMANAGED"]
+ type: str
+ state:
+ description:
+ - Describes the desired state.
+ default: "present"
+ choices: ["present", "absent"]
+ type: str
+ compute_environment_state:
+ description:
+ - The state of the compute environment.
+ - If the state is C(ENABLED), then the compute environment accepts jobs
+ from a queue and can scale out automatically based on queues.
+ default: "ENABLED"
+ choices: ["ENABLED", "DISABLED"]
+ type: str
+ service_role:
+ description:
+ - The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS
+ services on your behalf.
+ required: true
+ type: str
+ compute_resource_type:
+ description:
+ - The type of compute resource.
+ required: true
+ choices: ["EC2", "SPOT"]
+ type: str
+ minv_cpus:
+ description:
+ - The minimum number of EC2 vCPUs that an environment should maintain.
+ required: true
+ type: int
+ maxv_cpus:
+ description:
+ - The maximum number of EC2 vCPUs that an environment can reach.
+ required: true
+ type: int
+ desiredv_cpus:
+ description:
+ - The desired number of EC2 vCPUS in the compute environment.
+ type: int
+ instance_types:
+ description:
+ - The instance types that may be launched.
+ required: true
+ type: list
+ elements: str
+ image_id:
+ description:
+ - The Amazon Machine Image (AMI) ID used for instances launched in the compute environment.
+ type: str
+ subnets:
+ description:
+ - The VPC subnets into which the compute resources are launched.
+ required: true
+ type: list
+ elements: str
+ security_group_ids:
+ description:
+ - The EC2 security groups that are associated with instances launched in the compute environment.
+ required: true
+ type: list
+ elements: str
+ ec2_key_pair:
+ description:
+ - The EC2 key pair that is used for instances launched in the compute environment.
+ type: str
+ instance_role:
+ description:
+ - The Amazon ECS instance role applied to Amazon EC2 instances in a compute environment.
+ required: true
+ type: str
+ tags:
+ description:
+ - Key-value pair tags to be applied to resources that are launched in the compute environment.
+ type: dict
+ bid_percentage:
+ description:
+ - The minimum percentage that a Spot Instance price must be when compared with the On-Demand price for that
+ instance type before instances are launched.
+ - For example, if your bid percentage is 20%, then the Spot price
+ must be below 20% of the current On-Demand price for that EC2 instance.
+ type: int
+ spot_iam_fleet_role:
+ description:
+ - The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT compute environment.
+ type: str
+
+requirements:
+ - boto3
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+- name: My Batch Compute Environment
+ community.aws.aws_batch_compute_environment:
+ compute_environment_name: computeEnvironmentName
+ state: present
+ region: us-east-1
+ compute_environment_state: ENABLED
+ type: MANAGED
+ compute_resource_type: EC2
+ minv_cpus: 0
+ maxv_cpus: 2
+ desiredv_cpus: 1
+ instance_types:
+ - optimal
+ subnets:
+ - my-subnet1
+ - my-subnet2
+ security_group_ids:
+ - my-sg1
+ - my-sg2
+ instance_role: arn:aws:iam::<account>:instance-profile/<role>
+ tags:
+ tag1: value1
+ tag2: value2
+ service_role: arn:aws:iam::<account>:role/service-role/<role>
+ register: aws_batch_compute_environment_action
+
+- name: show results
+ ansible.builtin.debug:
+ var: aws_batch_compute_environment_action
+'''
+
+RETURN = r'''
+---
+output:
+ description: "returns what action was taken, whether something was changed, invocation and response"
+ returned: always
+ sample:
+ batch_compute_environment_action: none
+ changed: false
+ invocation:
+ module_args:
+ aws_access_key: ~
+ aws_secret_key: ~
+ bid_percentage: ~
+ compute_environment_name: <name>
+ compute_environment_state: ENABLED
+ compute_resource_type: EC2
+ desiredv_cpus: 0
+ ec2_key_pair: ~
+ ec2_url: ~
+ image_id: ~
+ instance_role: "arn:aws:iam::..."
+ instance_types:
+ - optimal
+ maxv_cpus: 8
+ minv_cpus: 0
+ profile: ~
+ region: us-east-1
+ security_group_ids:
+ - "*******"
+ security_token: ~
+ service_role: "arn:aws:iam::...."
+ spot_iam_fleet_role: ~
+ state: present
+ subnets:
+ - "******"
+ tags:
+ Environment: <name>
+ Name: <name>
+ type: MANAGED
+ validate_certs: true
+ response:
+ computeEnvironmentArn: "arn:aws:batch:...."
+ computeEnvironmentName: <name>
+ computeResources:
+ desiredvCpus: 0
+ instanceRole: "arn:aws:iam::..."
+ instanceTypes:
+ - optimal
+ maxvCpus: 8
+ minvCpus: 0
+ securityGroupIds:
+ - "******"
+ subnets:
+ - "*******"
+ tags:
+ Environment: <name>
+ Name: <name>
+ type: EC2
+ ecsClusterArn: "arn:aws:ecs:....."
+ serviceRole: "arn:aws:iam::..."
+ state: ENABLED
+ status: VALID
+ statusReason: "ComputeEnvironment Healthy"
+ type: MANAGED
+ type: dict
+'''
+
+import re
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+
+# ---------------------------------------------------------------------------------------------------
+#
+# Helper Functions & classes
+#
+# ---------------------------------------------------------------------------------------------------
+
+def set_api_params(module, module_params):
+ """
+ Sets module parameters to those expected by the boto3 API.
+
+ :param module:
+ :param module_params:
+ :return:
+ """
+ api_params = dict((k, v) for k, v in dict(module.params).items() if k in module_params and v is not None)
+ return snake_dict_to_camel_dict(api_params)
+
+
+def validate_params(module):
+ """
+ Performs basic parameter validation.
+
+ :param module:
+ :return:
+ """
+
+ compute_environment_name = module.params['compute_environment_name']
+
+ # validate compute environment name
+ if not re.search(r'^[\w\_:]+$', compute_environment_name):
+ module.fail_json(
+ msg="Function compute_environment_name {0} is invalid. Names must contain only alphanumeric characters "
+ "and underscores.".format(compute_environment_name)
+ )
+ if not compute_environment_name.startswith('arn:aws:batch:'):
+ if len(compute_environment_name) > 128:
+ module.fail_json(msg='compute_environment_name "{0}" exceeds 128 character limit'
+ .format(compute_environment_name))
+
+ return
+
+
+# ---------------------------------------------------------------------------------------------------
+#
+# Batch Compute Environment functions
+#
+# ---------------------------------------------------------------------------------------------------
+
+def get_current_compute_environment(module, client):
+ try:
+ environments = client.describe_compute_environments(
+ computeEnvironments=[module.params['compute_environment_name']]
+ )
+ if len(environments['computeEnvironments']) > 0:
+ return environments['computeEnvironments'][0]
+ else:
+ return None
+ except ClientError:
+ return None
+
+
+def create_compute_environment(module, client):
+ """
+ Adds a Batch compute environment
+
+ :param module:
+ :param client:
+ :return:
+ """
+
+ changed = False
+
+ # set API parameters
+ params = (
+ 'compute_environment_name', 'type', 'service_role')
+ api_params = set_api_params(module, params)
+
+ if module.params['compute_environment_state'] is not None:
+ api_params['state'] = module.params['compute_environment_state']
+
+ compute_resources_param_list = ('minv_cpus', 'maxv_cpus', 'desiredv_cpus', 'instance_types', 'image_id', 'subnets',
+ 'security_group_ids', 'ec2_key_pair', 'instance_role', 'tags', 'bid_percentage',
+ 'spot_iam_fleet_role')
+ compute_resources_params = set_api_params(module, compute_resources_param_list)
+
+ if module.params['compute_resource_type'] is not None:
+ compute_resources_params['type'] = module.params['compute_resource_type']
+
+ # if module.params['minv_cpus'] is not None:
+ # compute_resources_params['minvCpus'] = module.params['minv_cpus']
+
+ api_params['computeResources'] = compute_resources_params
+
+ try:
+ if not module.check_mode:
+ client.create_compute_environment(**api_params)
+ changed = True
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Error creating compute environment')
+
+ return changed
+
+
+def remove_compute_environment(module, client):
+ """
+ Remove a Batch compute environment
+
+ :param module:
+ :param client:
+ :return:
+ """
+
+ changed = False
+
+ # set API parameters
+ api_params = {'computeEnvironment': module.params['compute_environment_name']}
+
+ try:
+ if not module.check_mode:
+ client.delete_compute_environment(**api_params)
+ changed = True
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Error removing compute environment')
+ return changed
+
+
+def manage_state(module, client):
+ changed = False
+ current_state = 'absent'
+ state = module.params['state']
+ compute_environment_state = module.params['compute_environment_state']
+ compute_environment_name = module.params['compute_environment_name']
+ service_role = module.params['service_role']
+ minv_cpus = module.params['minv_cpus']
+ maxv_cpus = module.params['maxv_cpus']
+ desiredv_cpus = module.params['desiredv_cpus']
+ action_taken = 'none'
+ update_env_response = ''
+
+ check_mode = module.check_mode
+
+ # check if the compute environment exists
+ current_compute_environment = get_current_compute_environment(module, client)
+ response = current_compute_environment
+ if current_compute_environment:
+ current_state = 'present'
+
+ if state == 'present':
+ if current_state == 'present':
+ updates = False
+ # Update Batch Compute Environment configuration
+ compute_kwargs = {'computeEnvironment': compute_environment_name}
+
+ # Update configuration if needed
+ compute_resources = {}
+ if compute_environment_state and current_compute_environment['state'] != compute_environment_state:
+ compute_kwargs.update({'state': compute_environment_state})
+ updates = True
+ if service_role and current_compute_environment['serviceRole'] != service_role:
+ compute_kwargs.update({'serviceRole': service_role})
+ updates = True
+ if minv_cpus is not None and current_compute_environment['computeResources']['minvCpus'] != minv_cpus:
+ compute_resources['minvCpus'] = minv_cpus
+ if maxv_cpus is not None and current_compute_environment['computeResources']['maxvCpus'] != maxv_cpus:
+ compute_resources['maxvCpus'] = maxv_cpus
+ if desiredv_cpus is not None and current_compute_environment['computeResources']['desiredvCpus'] != desiredv_cpus:
+ compute_resources['desiredvCpus'] = desiredv_cpus
+ if len(compute_resources) > 0:
+ compute_kwargs['computeResources'] = compute_resources
+ updates = True
+ if updates:
+ try:
+ if not check_mode:
+ update_env_response = client.update_compute_environment(**compute_kwargs)
+ if not update_env_response:
+ module.fail_json(msg='Unable to get compute environment information after creating')
+ changed = True
+ action_taken = "updated"
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to update environment.")
+
+ else:
+ # Create Batch Compute Environment
+ changed = create_compute_environment(module, client)
+ # Describe compute environment
+ action_taken = 'added'
+ response = get_current_compute_environment(module, client)
+ if not response:
+ module.fail_json(msg='Unable to get compute environment information after creating')
+ else:
+ if current_state == 'present':
+ # remove the compute environment
+ changed = remove_compute_environment(module, client)
+ action_taken = 'deleted'
+ return dict(changed=changed, batch_compute_environment_action=action_taken, response=response)
+
+
+# ---------------------------------------------------------------------------------------------------
+#
+# MAIN
+#
+# ---------------------------------------------------------------------------------------------------
+
+def main():
+ """
+ Main entry point.
+
+ :return dict: changed, batch_compute_environment_action, response
+ """
+
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ compute_environment_name=dict(required=True),
+ type=dict(required=True, choices=['MANAGED', 'UNMANAGED']),
+ compute_environment_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']),
+ service_role=dict(required=True),
+ compute_resource_type=dict(required=True, choices=['EC2', 'SPOT']),
+ minv_cpus=dict(type='int', required=True),
+ maxv_cpus=dict(type='int', required=True),
+ desiredv_cpus=dict(type='int'),
+ instance_types=dict(type='list', required=True, elements='str'),
+ image_id=dict(),
+ subnets=dict(type='list', required=True, elements='str'),
+ security_group_ids=dict(type='list', required=True, elements='str'),
+ ec2_key_pair=dict(),
+ instance_role=dict(required=True),
+ tags=dict(type='dict'),
+ bid_percentage=dict(type='int'),
+ spot_iam_fleet_role=dict(),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = module.client('batch')
+
+ validate_params(module)
+
+ results = manage_state(module, client)
+
+ module.exit_json(**camel_dict_to_snake_dict(results, ignore_list=['Tags']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_batch_job_definition.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_batch_job_definition.py
new file mode 100644
index 00000000..18d0429a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_batch_job_definition.py
@@ -0,0 +1,456 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Jon Meran <jonathan.meran@sonos.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: aws_batch_job_definition
+version_added: 1.0.0
+short_description: Manage AWS Batch Job Definitions
+description:
+ - This module allows the management of AWS Batch Job Definitions.
+ - It is idempotent and supports "Check" mode.
+ - Use module M(community.aws.aws_batch_compute_environment) to manage the compute
+ environment, M(community.aws.aws_batch_job_queue) to manage job queues, M(community.aws.aws_batch_job_definition) to manage job definitions.
+author: Jon Meran (@jonmer85)
+options:
+ job_definition_arn:
+ description:
+ - The ARN for the job definition.
+ type: str
+ job_definition_name:
+ description:
+ - The name for the job definition.
+ required: true
+ type: str
+ state:
+ description:
+ - Describes the desired state.
+ default: "present"
+ choices: ["present", "absent"]
+ type: str
+ type:
+ description:
+ - The type of job definition.
+ required: true
+ type: str
+ parameters:
+ description:
+ - Default parameter substitution placeholders to set in the job definition. Parameters are specified as a
+ key-value pair mapping. Parameters in a SubmitJob request override any corresponding parameter defaults from
+ the job definition.
+ type: dict
+ image:
+ description:
+ - The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker
+ Hub registry are available by default. Other repositories are specified with `` repository-url /image <colon>tag ``.
+ Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes,
+ and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker
+ Remote API and the IMAGE parameter of docker run.
+ required: true
+ type: str
+ vcpus:
+ description:
+ - The number of vCPUs reserved for the container. This parameter maps to CpuShares in the Create a container
+ section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to
+ 1,024 CPU shares.
+ required: true
+ type: int
+ memory:
+ description:
+ - The hard limit (in MiB) of memory to present to the container. If your container attempts to exceed the memory
+ specified here, the container is killed. This parameter maps to Memory in the Create a container section of the
+ Docker Remote API and the --memory option to docker run.
+ required: true
+ type: int
+ command:
+ description:
+ - The command that is passed to the container. This parameter maps to Cmd in the Create a container section of
+ the Docker Remote API and the COMMAND parameter to docker run. For more information,
+ see U(https://docs.docker.com/engine/reference/builder/#cmd).
+ type: list
+ elements: str
+ job_role_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the IAM role that the container can assume for AWS permissions.
+ type: str
+ volumes:
+ description:
+ - A list of data volumes used in a job.
+ suboptions:
+ host:
+ description:
+ - The contents of the host parameter determine whether your data volume persists on the host container
+ instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host
+ path for your data volume, but the data is not guaranteed to persist after the containers associated with
+ it stop running.
+ This is a dictionary with one property, sourcePath - The path on the host container
+ instance that is presented to the container. If this parameter is empty,then the Docker daemon has assigned
+ a host path for you. If the host parameter contains a sourcePath file location, then the data volume
+ persists at the specified location on the host container instance until you delete it manually. If the
+ sourcePath value does not exist on the host container instance, the Docker daemon creates it. If the
+ location does exist, the contents of the source path folder are exported.
+ name:
+ description:
+ - The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are
+ allowed. This name is referenced in the sourceVolume parameter of container definition mountPoints.
+ type: list
+ elements: dict
+ environment:
+ description:
+ - The environment variables to pass to a container. This parameter maps to Env in the Create a container section
+ of the Docker Remote API and the --env option to docker run.
+ suboptions:
+ name:
+ description:
+ - The name of the key value pair. For environment variables, this is the name of the environment variable.
+ value:
+ description:
+ - The value of the key value pair. For environment variables, this is the value of the environment variable.
+ type: list
+ elements: dict
+ mount_points:
+ description:
+ - The mount points for data volumes in your container. This parameter maps to Volumes in the Create a container
+ section of the Docker Remote API and the --volume option to docker run.
+ suboptions:
+ containerPath:
+ description:
+ - The path on the container at which to mount the host volume.
+ readOnly:
+ description:
+ - If this value is true , the container has read-only access to the volume; otherwise, the container can write
+ to the volume. The default value is C(false).
+ sourceVolume:
+ description:
+ - The name of the volume to mount.
+ type: list
+ elements: dict
+ readonly_root_filesystem:
+ description:
+ - When this parameter is true, the container is given read-only access to its root file system. This parameter
+ maps to ReadonlyRootfs in the Create a container section of the Docker Remote API and the --read-only option
+ to docker run.
+ type: str
+ privileged:
+ description:
+ - When this parameter is true, the container is given elevated privileges on the host container instance
+ (similar to the root user). This parameter maps to Privileged in the Create a container section of the
+ Docker Remote API and the --privileged option to docker run.
+ type: str
+ ulimits:
+ description:
+ - A list of ulimits to set in the container. This parameter maps to Ulimits in the Create a container section
+ of the Docker Remote API and the --ulimit option to docker run.
+ suboptions:
+ hardLimit:
+ description:
+ - The hard limit for the ulimit type.
+ name:
+ description:
+ - The type of the ulimit.
+ softLimit:
+ description:
+ - The soft limit for the ulimit type.
+ type: list
+ elements: dict
+ user:
+ description:
+ - The user name to use inside the container. This parameter maps to User in the Create a container section of
+ the Docker Remote API and the --user option to docker run.
+ type: str
+ attempts:
+ description:
+ - Retry strategy - The number of times to move a job to the RUNNABLE status. You may specify between 1 and 10
+ attempts. If attempts is greater than one, the job is retried if it fails until it has moved to RUNNABLE that
+ many times.
+ type: int
+requirements:
+ - boto3
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+---
+- hosts: localhost
+ gather_facts: no
+ vars:
+ state: present
+ tasks:
+- name: My Batch Job Definition
+ community.aws.aws_batch_job_definition:
+ job_definition_name: My Batch Job Definition
+ state: present
+ type: container
+ parameters:
+ Param1: Val1
+ Param2: Val2
+ image: <Docker Image URL>
+ vcpus: 1
+ memory: 512
+ command:
+ - python
+ - run_my_script.py
+ - arg1
+ job_role_arn: <Job Role ARN>
+ attempts: 3
+ register: job_definition_create_result
+
+- name: show results
+ ansible.builtin.debug: var=job_definition_create_result
+'''
+
+RETURN = r'''
+---
+output:
+ description: "returns what action was taken, whether something was changed, invocation and response"
+ returned: always
+ sample:
+ aws_batch_job_definition_action: none
+ changed: false
+ response:
+ job_definition_arn: "arn:aws:batch:...."
+ job_definition_name: <name>
+ status: INACTIVE
+ type: container
+ type: dict
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.batch import cc, set_api_params
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+
+# ---------------------------------------------------------------------------------------------------
+#
+# Helper Functions & classes
+#
+# ---------------------------------------------------------------------------------------------------
+
+# logger = logging.getLogger()
+# logging.basicConfig(filename='ansible_debug.log')
+# logger.setLevel(logging.DEBUG)
+
+
+def validate_params(module, batch_client):
+ """
+ Performs basic parameter validation.
+
+ :param module:
+ :param batch_client:
+ :return:
+ """
+ return
+
+
+# ---------------------------------------------------------------------------------------------------
+#
+# Batch Job Definition functions
+#
+# ---------------------------------------------------------------------------------------------------
+
+def get_current_job_definition(module, batch_client):
+ try:
+ environments = batch_client.describe_job_definitions(
+ jobDefinitionName=module.params['job_definition_name']
+ )
+ if len(environments['jobDefinitions']) > 0:
+ latest_revision = max(map(lambda d: d['revision'], environments['jobDefinitions']))
+ latest_definition = next((x for x in environments['jobDefinitions'] if x['revision'] == latest_revision),
+ None)
+ return latest_definition
+ return None
+ except ClientError:
+ return None
+
+
+def create_job_definition(module, batch_client):
+ """
+ Adds a Batch job definition
+
+ :param module:
+ :param batch_client:
+ :return:
+ """
+
+ changed = False
+
+ # set API parameters
+ api_params = set_api_params(module, get_base_params())
+ container_properties_params = set_api_params(module, get_container_property_params())
+ retry_strategy_params = set_api_params(module, get_retry_strategy_params())
+
+ api_params['retryStrategy'] = retry_strategy_params
+ api_params['containerProperties'] = container_properties_params
+
+ try:
+ if not module.check_mode:
+ batch_client.register_job_definition(**api_params)
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Error registering job definition')
+
+ return changed
+
+
+def get_retry_strategy_params():
+ return 'attempts',
+
+
+def get_container_property_params():
+ return ('image', 'vcpus', 'memory', 'command', 'job_role_arn', 'volumes', 'environment', 'mount_points',
+ 'readonly_root_filesystem', 'privileged', 'ulimits', 'user')
+
+
+def get_base_params():
+ return 'job_definition_name', 'type', 'parameters'
+
+
+def get_compute_environment_order_list(module):
+ compute_environment_order_list = []
+ for ceo in module.params['compute_environment_order']:
+ compute_environment_order_list.append(dict(order=ceo['order'], computeEnvironment=ceo['compute_environment']))
+ return compute_environment_order_list
+
+
+def remove_job_definition(module, batch_client):
+ """
+ Remove a Batch job definition
+
+ :param module:
+ :param batch_client:
+ :return:
+ """
+
+ changed = False
+
+ try:
+ if not module.check_mode:
+ batch_client.deregister_job_definition(jobDefinition=module.params['job_definition_arn'])
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Error removing job definition')
+ return changed
+
+
+def job_definition_equal(module, current_definition):
+ equal = True
+
+ for param in get_base_params():
+ if module.params.get(param) != current_definition.get(cc(param)):
+ equal = False
+ break
+
+ for param in get_container_property_params():
+ if module.params.get(param) != current_definition.get('containerProperties').get(cc(param)):
+ equal = False
+ break
+
+ for param in get_retry_strategy_params():
+ if module.params.get(param) != current_definition.get('retryStrategy').get(cc(param)):
+ equal = False
+ break
+
+ return equal
+
+
+def manage_state(module, batch_client):
+ changed = False
+ current_state = 'absent'
+ state = module.params['state']
+ job_definition_name = module.params['job_definition_name']
+ action_taken = 'none'
+ response = None
+
+ check_mode = module.check_mode
+
+ # check if the job definition exists
+ current_job_definition = get_current_job_definition(module, batch_client)
+ if current_job_definition:
+ current_state = 'present'
+
+ if state == 'present':
+ if current_state == 'present':
+ # check if definition has changed and register a new version if necessary
+ if not job_definition_equal(module, current_job_definition):
+ create_job_definition(module, batch_client)
+ action_taken = 'updated with new version'
+ changed = True
+ else:
+ # Create Job definition
+ changed = create_job_definition(module, batch_client)
+ action_taken = 'added'
+
+ response = get_current_job_definition(module, batch_client)
+ if not response:
+ module.fail_json(msg='Unable to get job definition information after creating/updating')
+ else:
+ if current_state == 'present':
+ # remove the Job definition
+ changed = remove_job_definition(module, batch_client)
+ action_taken = 'deregistered'
+ return dict(changed=changed, batch_job_definition_action=action_taken, response=response)
+
+
+# ---------------------------------------------------------------------------------------------------
+#
+# MAIN
+#
+# ---------------------------------------------------------------------------------------------------
+
+def main():
+ """
+ Main entry point.
+
+ :return dict: ansible facts
+ """
+
+ argument_spec = dict(
+ state=dict(required=False, default='present', choices=['present', 'absent']),
+ job_definition_name=dict(required=True),
+ job_definition_arn=dict(),
+ type=dict(required=True),
+ parameters=dict(type='dict'),
+ image=dict(required=True),
+ vcpus=dict(type='int', required=True),
+ memory=dict(type='int', required=True),
+ command=dict(type='list', default=[], elements='str'),
+ job_role_arn=dict(),
+ volumes=dict(type='list', default=[], elements='dict'),
+ environment=dict(type='list', default=[], elements='dict'),
+ mount_points=dict(type='list', default=[], elements='dict'),
+ readonly_root_filesystem=dict(),
+ privileged=dict(),
+ ulimits=dict(type='list', default=[], elements='dict'),
+ user=dict(),
+ attempts=dict(type='int')
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ batch_client = module.client('batch')
+
+ validate_params(module, batch_client)
+
+ results = manage_state(module, batch_client)
+
+ module.exit_json(**camel_dict_to_snake_dict(results))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_batch_job_queue.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_batch_job_queue.py
new file mode 100644
index 00000000..b472371e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_batch_job_queue.py
@@ -0,0 +1,307 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Jon Meran <jonathan.meran@sonos.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: aws_batch_job_queue
+version_added: 1.0.0
+short_description: Manage AWS Batch Job Queues
+description:
+ - This module allows the management of AWS Batch Job Queues.
+ - It is idempotent and supports "Check" mode.
+ - Use module M(community.aws.aws_batch_compute_environment) to manage the compute
+ environment, M(community.aws.aws_batch_job_queue) to manage job queues, M(community.aws.aws_batch_job_definition) to manage job definitions.
+author: Jon Meran (@jonmer85)
+options:
+ job_queue_name:
+ description:
+ - The name for the job queue
+ required: true
+ type: str
+ state:
+ description:
+ - Describes the desired state.
+ default: "present"
+ choices: ["present", "absent"]
+ type: str
+ job_queue_state:
+ description:
+ - The state of the job queue. If the job queue state is ENABLED, it is able to accept jobs.
+ default: "ENABLED"
+ choices: ["ENABLED", "DISABLED"]
+ type: str
+ priority:
+ description:
+ - The priority of the job queue. Job queues with a higher priority (or a lower integer value for the priority
+ parameter) are evaluated first when associated with same compute environment. Priority is determined in
+ ascending order, for example, a job queue with a priority value of 1 is given scheduling preference over a job
+ queue with a priority value of 10.
+ required: true
+ type: int
+ compute_environment_order:
+ description:
+ - The set of compute environments mapped to a job queue and their order relative to each other. The job
+ scheduler uses this parameter to determine which compute environment should execute a given job. Compute
+ environments must be in the VALID state before you can associate them with a job queue. You can associate up to
+ 3 compute environments with a job queue.
+ required: true
+ type: list
+ elements: dict
+ suboptions:
+ order:
+ type: int
+ description: The relative priority of the environment.
+ compute_environment:
+ type: str
+ description: The name of the compute environment.
+requirements:
+ - boto3
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: My Batch Job Queue
+ community.aws.aws_batch_job_queue:
+ job_queue_name: jobQueueName
+ state: present
+ region: us-east-1
+ job_queue_state: ENABLED
+ priority: 1
+ compute_environment_order:
+ - order: 1
+ compute_environment: my_compute_env1
+ - order: 2
+ compute_environment: my_compute_env2
+ register: batch_job_queue_action
+
+- name: show results
+ ansible.builtin.debug:
+ var: batch_job_queue_action
+'''
+
+RETURN = r'''
+---
+output:
+ description: "returns what action was taken, whether something was changed, invocation and response"
+ returned: always
+ sample:
+ batch_job_queue_action: updated
+ changed: false
+ response:
+ job_queue_arn: "arn:aws:batch:...."
+ job_queue_name: <name>
+ priority: 1
+ state: DISABLED
+ status: UPDATING
+ status_reason: "JobQueue Healthy"
+ type: dict
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.batch import set_api_params
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+# ---------------------------------------------------------------------------------------------------
+#
+# Helper Functions & classes
+#
+# ---------------------------------------------------------------------------------------------------
+
+
+def validate_params(module):
+ """
+ Performs basic parameter validation.
+
+ :param module:
+ """
+ return
+
+
+# ---------------------------------------------------------------------------------------------------
+#
+# Batch Job Queue functions
+#
+# ---------------------------------------------------------------------------------------------------
+
+def get_current_job_queue(module, client):
+ try:
+ environments = client.describe_job_queues(
+ jobQueues=[module.params['job_queue_name']]
+ )
+ return environments['jobQueues'][0] if len(environments['jobQueues']) > 0 else None
+ except ClientError:
+ return None
+
+
+def create_job_queue(module, client):
+ """
+ Adds a Batch job queue
+
+ :param module:
+ :param client:
+ :return:
+ """
+
+ changed = False
+
+ # set API parameters
+ params = ('job_queue_name', 'priority')
+ api_params = set_api_params(module, params)
+
+ if module.params['job_queue_state'] is not None:
+ api_params['state'] = module.params['job_queue_state']
+
+ api_params['computeEnvironmentOrder'] = get_compute_environment_order_list(module)
+
+ try:
+ if not module.check_mode:
+ client.create_job_queue(**api_params)
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Error creating compute environment')
+
+ return changed
+
+
+def get_compute_environment_order_list(module):
+ compute_environment_order_list = []
+ for ceo in module.params['compute_environment_order']:
+ compute_environment_order_list.append(dict(order=ceo['order'], computeEnvironment=ceo['compute_environment']))
+ return compute_environment_order_list
+
+
+def remove_job_queue(module, client):
+ """
+ Remove a Batch job queue
+
+ :param module:
+ :param client:
+ :return:
+ """
+
+ changed = False
+
+ # set API parameters
+ api_params = {'jobQueue': module.params['job_queue_name']}
+
+ try:
+ if not module.check_mode:
+ client.delete_job_queue(**api_params)
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Error removing job queue')
+ return changed
+
+
+def manage_state(module, client):
+ changed = False
+ current_state = 'absent'
+ state = module.params['state']
+ job_queue_state = module.params['job_queue_state']
+ job_queue_name = module.params['job_queue_name']
+ priority = module.params['priority']
+ action_taken = 'none'
+ response = None
+
+ check_mode = module.check_mode
+
+ # check if the job queue exists
+ current_job_queue = get_current_job_queue(module, client)
+ if current_job_queue:
+ current_state = 'present'
+
+ if state == 'present':
+ if current_state == 'present':
+ updates = False
+ # Update Batch Job Queue configuration
+ job_kwargs = {'jobQueue': job_queue_name}
+
+ # Update configuration if needed
+ if job_queue_state and current_job_queue['state'] != job_queue_state:
+ job_kwargs.update({'state': job_queue_state})
+ updates = True
+ if priority is not None and current_job_queue['priority'] != priority:
+ job_kwargs.update({'priority': priority})
+ updates = True
+
+ new_compute_environment_order_list = get_compute_environment_order_list(module)
+ if new_compute_environment_order_list != current_job_queue['computeEnvironmentOrder']:
+ job_kwargs['computeEnvironmentOrder'] = new_compute_environment_order_list
+ updates = True
+
+ if updates:
+ try:
+ if not check_mode:
+ client.update_job_queue(**job_kwargs)
+ changed = True
+ action_taken = "updated"
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to update job queue")
+
+ else:
+ # Create Job Queue
+ changed = create_job_queue(module, client)
+ action_taken = 'added'
+
+ # Describe job queue
+ response = get_current_job_queue(module, client)
+ if not response:
+ module.fail_json(msg='Unable to get job queue information after creating/updating')
+ else:
+ if current_state == 'present':
+ # remove the Job Queue
+ changed = remove_job_queue(module, client)
+ action_taken = 'deleted'
+ return dict(changed=changed, batch_job_queue_action=action_taken, response=response)
+
+
+# ---------------------------------------------------------------------------------------------------
+#
+# MAIN
+#
+# ---------------------------------------------------------------------------------------------------
+
+def main():
+ """
+ Main entry point.
+
+ :return dict: changed, batch_job_queue_action, response
+ """
+
+ argument_spec = dict(
+ state=dict(required=False, default='present', choices=['present', 'absent']),
+ job_queue_name=dict(required=True),
+ job_queue_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']),
+ priority=dict(type='int', required=True),
+ compute_environment_order=dict(type='list', required=True, elements='dict'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = module.client('batch')
+
+ validate_params(module)
+
+ results = manage_state(module, client)
+
+ module.exit_json(**camel_dict_to_snake_dict(results))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_codebuild.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_codebuild.py
new file mode 100644
index 00000000..7c5e7500
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_codebuild.py
@@ -0,0 +1,405 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: aws_codebuild
+version_added: 1.0.0
+short_description: Create or delete an AWS CodeBuild project
+notes:
+ - For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html).
+description:
+ - Create or delete a CodeBuild projects on AWS, used for building code artifacts from source code.
+author:
+ - Stefan Horning (@stefanhorning) <horning@mediapeers.com>
+requirements: [ botocore, boto3 ]
+options:
+ name:
+ description:
+ - Name of the CodeBuild project.
+ required: true
+ type: str
+ description:
+ description:
+ - Descriptive text of the CodeBuild project.
+ type: str
+ source:
+ description:
+ - Configure service and location for the build input source.
+ required: true
+ suboptions:
+ type:
+ description:
+ - "The type of the source. Allows one of these: C(CODECOMMIT), C(CODEPIPELINE), C(GITHUB), C(S3), C(BITBUCKET), C(GITHUB_ENTERPRISE)."
+ required: true
+ type: str
+ location:
+ description:
+ - Information about the location of the source code to be built. For type CODEPIPELINE location should not be specified.
+ type: str
+ git_clone_depth:
+ description:
+ - When using git you can specify the clone depth as an integer here.
+ type: int
+ buildspec:
+ description:
+ - The build spec declaration to use for the builds in this build project. Leave empty if part of the code project.
+ type: str
+ insecure_ssl:
+ description:
+ - Enable this flag to ignore SSL warnings while connecting to the project source code.
+ type: bool
+ type: dict
+ artifacts:
+ description:
+ - Information about the build output artifacts for the build project.
+ required: true
+ suboptions:
+ type:
+ description:
+ - "The type of build output for artifacts. Can be one of the following: C(CODEPIPELINE), C(NO_ARTIFACTS), C(S3)."
+ required: true
+ location:
+ description:
+ - Information about the build output artifact location. When choosing type S3, set the bucket name here.
+ path:
+ description:
+ - Along with namespace_type and name, the pattern that AWS CodeBuild will use to name and store the output artifacts.
+ - Used for path in S3 bucket when type is C(S3).
+ namespace_type:
+ description:
+ - Along with path and name, the pattern that AWS CodeBuild will use to determine the name and location to store the output artifacts.
+ - Accepts C(BUILD_ID) and C(NONE).
+ - "See docs here: U(http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html#CodeBuild.Client.create_project)."
+ name:
+ description:
+ - Along with path and namespace_type, the pattern that AWS CodeBuild will use to name and store the output artifact.
+ packaging:
+ description:
+ - The type of build output artifact to create on S3, can be NONE for creating a folder or ZIP for a ZIP file.
+ type: dict
+ cache:
+ description:
+ - Caching params to speed up following builds.
+ suboptions:
+ type:
+ description:
+ - Cache type. Can be C(NO_CACHE) or C(S3).
+ required: true
+ location:
+ description:
+ - Caching location on S3.
+ required: true
+ type: dict
+ environment:
+ description:
+ - Information about the build environment for the build project.
+ suboptions:
+ type:
+ description:
+ - The type of build environment to use for the project. Usually C(LINUX_CONTAINER).
+ required: true
+ image:
+ description:
+ - The ID of the Docker image to use for this build project.
+ required: true
+ compute_type:
+ description:
+ - Information about the compute resources the build project will use.
+ - "Available values include: C(BUILD_GENERAL1_SMALL), C(BUILD_GENERAL1_MEDIUM), C(BUILD_GENERAL1_LARGE)."
+ required: true
+ environment_variables:
+ description:
+ - A set of environment variables to make available to builds for the build project. List of dictionaries with name and value fields.
+ - "Example: { name: 'MY_ENV_VARIABLE', value: 'test' }"
+ privileged_mode:
+ description:
+ - Enables running the Docker daemon inside a Docker container. Set to true only if the build project is be used to build Docker images.
+ type: dict
+ service_role:
+ description:
+ - The ARN of the AWS IAM role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.
+ type: str
+ timeout_in_minutes:
+ description:
+ - How long CodeBuild should wait until timing out any build that has not been marked as completed.
+ default: 60
+ type: int
+ encryption_key:
+ description:
+ - The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.
+ type: str
+ tags:
+ description:
+ - A set of tags for the build project.
+ type: list
+ elements: dict
+ suboptions:
+ key:
+ description: The name of the Tag.
+ type: str
+ value:
+ description: The value of the Tag.
+ type: str
+ vpc_config:
+ description:
+ - The VPC config enables AWS CodeBuild to access resources in an Amazon VPC.
+ type: dict
+ state:
+ description:
+ - Create or remove code build project.
+ default: 'present'
+ choices: ['present', 'absent']
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- community.aws.aws_codebuild:
+ name: my_project
+ description: My nice little project
+ service_role: "arn:aws:iam::123123:role/service-role/code-build-service-role"
+ source:
+ # Possible values: BITBUCKET, CODECOMMIT, CODEPIPELINE, GITHUB, S3
+ type: CODEPIPELINE
+ buildspec: ''
+ artifacts:
+ namespaceType: NONE
+ packaging: NONE
+ type: CODEPIPELINE
+ name: my_project
+ environment:
+ computeType: BUILD_GENERAL1_SMALL
+ privilegedMode: "true"
+ image: "aws/codebuild/docker:17.09.0"
+ type: LINUX_CONTAINER
+ environmentVariables:
+ - { name: 'PROFILE', value: 'staging' }
+ encryption_key: "arn:aws:kms:us-east-1:123123:alias/aws/s3"
+ region: us-east-1
+ state: present
+'''
+
+RETURN = r'''
+project:
+ description: Returns the dictionary describing the code project configuration.
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: Name of the CodeBuild project
+ returned: always
+ type: str
+ sample: my_project
+ arn:
+ description: ARN of the CodeBuild project
+ returned: always
+ type: str
+ sample: arn:aws:codebuild:us-east-1:123123123:project/vod-api-app-builder
+ description:
+ description: A description of the build project
+ returned: always
+ type: str
+ sample: My nice little project
+ source:
+ description: Information about the build input source code.
+ returned: always
+ type: complex
+ contains:
+ type:
+ description: The type of the repository
+ returned: always
+ type: str
+ sample: CODEPIPELINE
+ location:
+ description: Location identifier, depending on the source type.
+ returned: when configured
+ type: str
+ git_clone_depth:
+ description: The git clone depth
+ returned: when configured
+ type: int
+ build_spec:
+ description: The build spec declaration to use for the builds in this build project.
+ returned: always
+ type: str
+ auth:
+ description: Information about the authorization settings for AWS CodeBuild to access the source code to be built.
+ returned: when configured
+ type: complex
+ insecure_ssl:
+ description: True if set to ignore SSL warnings.
+ returned: when configured
+ type: bool
+ artifacts:
+ description: Information about the output of build artifacts
+ returned: always
+ type: complex
+ contains:
+ type:
+ description: The type of build artifact.
+ returned: always
+ type: str
+ sample: CODEPIPELINE
+ location:
+ description: Output location for build artifacts
+ returned: when configured
+ type: str
+ # and more... see http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html#CodeBuild.Client.create_project
+ cache:
+ description: Cache settings for the build project.
+ returned: when configured
+ type: dict
+ environment:
+ description: Environment settings for the build
+ returned: always
+ type: dict
+ service_role:
+ description: IAM role to be used during build to access other AWS services.
+ returned: always
+ type: str
+ sample: arn:aws:iam::123123123:role/codebuild-service-role
+ timeout_in_minutes:
+ description: The timeout of a build in minutes
+ returned: always
+ type: int
+ sample: 60
+ tags:
+ description: Tags added to the project
+ returned: when configured
+ type: list
+ created:
+ description: Timestamp of the create time of the project
+ returned: always
+ type: str
+ sample: "2018-04-17T16:56:03.245000+02:00"
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, get_boto3_client_method_parameters
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict
+
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+
+def create_or_update_project(client, params, module):
+ resp = {}
+ name = params['name']
+ # clean up params
+ formatted_params = snake_dict_to_camel_dict(dict((k, v) for k, v in params.items() if v is not None))
+ permitted_create_params = get_boto3_client_method_parameters(client, 'create_project')
+ permitted_update_params = get_boto3_client_method_parameters(client, 'update_project')
+
+ formatted_create_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_create_params)
+ formatted_update_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_update_params)
+
+ # Check if project with that name already exists and if so update existing:
+ found = describe_project(client=client, name=name, module=module)
+ changed = False
+
+ if 'name' in found:
+ found_project = found
+ resp = update_project(client=client, params=formatted_update_params, module=module)
+ updated_project = resp['project']
+
+ # Prep both dicts for sensible change comparison:
+ found_project.pop('lastModified')
+ updated_project.pop('lastModified')
+ if 'tags' not in updated_project:
+ updated_project['tags'] = []
+
+ if updated_project != found_project:
+ changed = True
+ return resp, changed
+ # Or create new project:
+ try:
+ resp = client.create_project(**formatted_create_params)
+ changed = True
+ return resp, changed
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to create CodeBuild project")
+
+
+def update_project(client, params, module):
+ name = params['name']
+
+ try:
+ resp = client.update_project(**params)
+ return resp
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to update CodeBuild project")
+
+
+def delete_project(client, name, module):
+ found = describe_project(client=client, name=name, module=module)
+ changed = False
+ if 'name' in found:
+ # Mark as changed when a project with that name existed before calling delete
+ changed = True
+ try:
+ resp = client.delete_project(name=name)
+ return resp, changed
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to delete CodeBuild project")
+
+
+def describe_project(client, name, module):
+ project = {}
+ try:
+ projects = client.batch_get_projects(names=[name])['projects']
+ if len(projects) > 0:
+ project = projects[0]
+ return project
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to describe CodeBuild projects")
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(),
+ source=dict(required=True, type='dict'),
+ artifacts=dict(required=True, type='dict'),
+ cache=dict(type='dict'),
+ environment=dict(type='dict'),
+ service_role=dict(),
+ timeout_in_minutes=dict(type='int', default=60),
+ encryption_key=dict(),
+ tags=dict(type='list', elements='dict'),
+ vpc_config=dict(type='dict'),
+ state=dict(choices=['present', 'absent'], default='present')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+ client_conn = module.client('codebuild')
+
+ state = module.params.get('state')
+ changed = False
+
+ if state == 'present':
+ project_result, changed = create_or_update_project(
+ client=client_conn,
+ params=module.params,
+ module=module)
+ elif state == 'absent':
+ project_result, changed = delete_project(client=client_conn, name=module.params['name'], module=module)
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(project_result))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_codecommit.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_codecommit.py
new file mode 100644
index 00000000..18fc10a2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_codecommit.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Shuang Wang <ooocamel@icloud.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_codecommit
+version_added: 1.0.0
+short_description: Manage repositories in AWS CodeCommit
+description:
+ - Supports creation and deletion of CodeCommit repositories.
+ - See U(https://aws.amazon.com/codecommit/) for more information about CodeCommit.
+author: Shuang Wang (@ptux)
+
+requirements:
+ - botocore
+ - boto3
+ - python >= 2.6
+
+options:
+ name:
+ description:
+ - name of repository.
+ required: true
+ type: str
+ description:
+ description:
+ - description or comment of repository.
+ required: false
+ aliases:
+ - comment
+ type: str
+ state:
+ description:
+ - Specifies the state of repository.
+ required: true
+ choices: [ 'present', 'absent' ]
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+RETURN = '''
+repository_metadata:
+ description: "Information about the repository."
+ returned: always
+ type: complex
+ contains:
+ account_id:
+ description: "The ID of the AWS account associated with the repository."
+ returned: when state is present
+ type: str
+ sample: "268342293637"
+ arn:
+ description: "The Amazon Resource Name (ARN) of the repository."
+ returned: when state is present
+ type: str
+ sample: "arn:aws:codecommit:ap-northeast-1:268342293637:username"
+ clone_url_http:
+ description: "The URL to use for cloning the repository over HTTPS."
+ returned: when state is present
+ type: str
+ sample: "https://git-codecommit.ap-northeast-1.amazonaws.com/v1/repos/reponame"
+ clone_url_ssh:
+ description: "The URL to use for cloning the repository over SSH."
+ returned: when state is present
+ type: str
+ sample: "ssh://git-codecommit.ap-northeast-1.amazonaws.com/v1/repos/reponame"
+ creation_date:
+ description: "The date and time the repository was created, in timestamp format."
+ returned: when state is present
+ type: str
+ sample: "2018-10-16T13:21:41.261000+09:00"
+ last_modified_date:
+ description: "The date and time the repository was last modified, in timestamp format."
+ returned: when state is present
+ type: str
+ sample: "2018-10-16T13:21:41.261000+09:00"
+ repository_description:
+ description: "A comment or description about the repository."
+ returned: when state is present
+ type: str
+ sample: "test from ptux"
+ repository_id:
+ description: "The ID of the repository that was created or deleted"
+ returned: always
+ type: str
+ sample: "e62a5c54-i879-497b-b62f-9f99e4ebfk8e"
+ repository_name:
+ description: "The repository's name."
+ returned: when state is present
+ type: str
+ sample: "reponame"
+
+response_metadata:
+ description: "Information about the response."
+ returned: always
+ type: complex
+ contains:
+ http_headers:
+ description: "http headers of http response"
+ returned: always
+ type: dict
+ http_status_code:
+ description: "http status code of http response"
+ returned: always
+ type: str
+ sample: "200"
+ request_id:
+ description: "http request id"
+ returned: always
+ type: str
+ sample: "fb49cfca-d0fa-11e8-85cb-b3cc4b5045ef"
+ retry_attempts:
+ description: "numbers of retry attempts"
+ returned: always
+ type: str
+ sample: "0"
+'''
+
+EXAMPLES = '''
+# Create a new repository
+- community.aws.aws_codecommit:
+ name: repo
+ state: present
+
+# Delete a repository
+- community.aws.aws_codecommit:
+ name: repo
+ state: absent
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+class CodeCommit(object):
+ def __init__(self, module=None):
+ self._module = module
+ self._client = self._module.client('codecommit')
+ self._check_mode = self._module.check_mode
+
+ def process(self):
+ result = dict(changed=False)
+
+ if self._module.params['state'] == 'present':
+ if not self._repository_exists():
+ if not self._check_mode:
+ result = self._create_repository()
+ result['changed'] = True
+ else:
+ metadata = self._get_repository()['repositoryMetadata']
+ if not metadata.get('repositoryDescription'):
+ metadata['repositoryDescription'] = ''
+ if metadata['repositoryDescription'] != self._module.params['description']:
+ if not self._check_mode:
+ self._update_repository()
+ result['changed'] = True
+ result.update(self._get_repository())
+ if self._module.params['state'] == 'absent' and self._repository_exists():
+ if not self._check_mode:
+ result = self._delete_repository()
+ result['changed'] = True
+ return result
+
+ def _repository_exists(self):
+ try:
+ paginator = self._client.get_paginator('list_repositories')
+ for page in paginator.paginate():
+ repositories = page['repositories']
+ for item in repositories:
+ if self._module.params['name'] in item.values():
+ return True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="couldn't get repository")
+ return False
+
+ def _get_repository(self):
+ try:
+ result = self._client.get_repository(
+ repositoryName=self._module.params['name']
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="couldn't get repository")
+ return result
+
+ def _update_repository(self):
+ try:
+ result = self._client.update_repository_description(
+ repositoryName=self._module.params['name'],
+ repositoryDescription=self._module.params['description']
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="couldn't create repository")
+ return result
+
+ def _create_repository(self):
+ try:
+ result = self._client.create_repository(
+ repositoryName=self._module.params['name'],
+ repositoryDescription=self._module.params['description']
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="couldn't create repository")
+ return result
+
+ def _delete_repository(self):
+ try:
+ result = self._client.delete_repository(
+ repositoryName=self._module.params['name']
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="couldn't delete repository")
+ return result
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ state=dict(choices=['present', 'absent'], required=True),
+ description=dict(default='', aliases=['comment'])
+ )
+
+ ansible_aws_module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ aws_codecommit = CodeCommit(module=ansible_aws_module)
+ result = aws_codecommit.process()
+ ansible_aws_module.exit_json(**camel_dict_to_snake_dict(result))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_codepipeline.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_codepipeline.py
new file mode 100644
index 00000000..8b44dc76
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_codepipeline.py
@@ -0,0 +1,317 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: aws_codepipeline
+version_added: 1.0.0
+short_description: Create or delete AWS CodePipelines
+notes:
+ - for details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/codepipeline.html)
+description:
+ - Create or delete a CodePipeline on AWS.
+author:
+ - Stefan Horning (@stefanhorning) <horning@mediapeers.com>
+requirements: [ botocore, boto3 ]
+options:
+ name:
+ description:
+ - Name of the pipeline
+ required: true
+ type: str
+ role_arn:
+ description:
+ - ARN of the IAM role to use when executing the pipeline
+ required: true
+ type: str
+ artifact_store:
+ description:
+ - Location information where artifacts are stored (on S3). Dictionary with fields type and location.
+ required: true
+ suboptions:
+ type:
+ description:
+ - Type of the artifacts storage (only 'S3' is currently supported).
+ type: str
+ location:
+ description:
+ - Bucket name for artifacts.
+ type: str
+ type: dict
+ stages:
+ description:
+ - List of stages to perform in the CodePipeline. List of dictionaries containing name and actions for each stage.
+ required: true
+ suboptions:
+ name:
+ description:
+ - Name of the stage (step) in the codepipeline
+ type: str
+ actions:
+ description:
+ - List of action configurations for that stage.
+ - 'See the boto3 documentation for full documentation of suboptions:'
+ - 'U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/codepipeline.html#CodePipeline.Client.create_pipeline)'
+ type: list
+ elements: dict
+ elements: dict
+ type: list
+ version:
+ description:
+ - Version number of the pipeline. This number is automatically incremented when a pipeline is updated.
+ required: false
+ type: int
+ state:
+ description:
+ - Create or remove code pipeline
+ default: 'present'
+ choices: ['present', 'absent']
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Example for creating a pipeline for continuous deploy of Github code to an ECS cluster (container)
+- community.aws.aws_codepipeline:
+ name: my_deploy_pipeline
+ role_arn: arn:aws:iam::123456:role/AWS-CodePipeline-Service
+ artifact_store:
+ type: S3
+ location: my_s3_codepipline_bucket
+ stages:
+ - name: Get_source
+ actions:
+ -
+ name: Git_pull
+ actionTypeId:
+ category: Source
+ owner: ThirdParty
+ provider: GitHub
+ version: '1'
+ outputArtifacts:
+ - { name: my-app-source }
+ configuration:
+ Owner: mediapeers
+ Repo: my_gh_repo
+ PollForSourceChanges: 'true'
+ Branch: master
+ # Generate token like this:
+ # https://docs.aws.amazon.com/codepipeline/latest/userguide/GitHub-rotate-personal-token-CLI.html
+ # GH Link: https://github.com/settings/tokens
+ OAuthToken: 'abc123def456'
+ runOrder: 1
+ - name: Build
+ actions:
+ -
+ name: CodeBuild
+ actionTypeId:
+ category: Build
+ owner: AWS
+ provider: CodeBuild
+ version: '1'
+ inputArtifacts:
+ - { name: my-app-source }
+ outputArtifacts:
+ - { name: my-app-build }
+ configuration:
+ # A project with that name needs to be setup on AWS CodeBuild already (use code_build module).
+ ProjectName: codebuild-project-name
+ runOrder: 1
+ - name: ECS_deploy
+ actions:
+ -
+ name: ECS_deploy
+ actionTypeId:
+ category: Deploy
+ owner: AWS
+ provider: ECS
+ version: '1'
+ inputArtifacts:
+ - { name: vod-api-app-build }
+ configuration:
+ # an ECS cluster with that name needs to be setup on AWS ECS already (use ecs_cluster and ecs_service module)
+ ClusterName: ecs-cluster-name
+ ServiceName: ecs-cluster-service-name
+ FileName: imagedefinitions.json
+ region: us-east-1
+ state: present
+'''
+
+RETURN = r'''
+pipeline:
+ description: Returns the dictionary describing the code pipeline configuration.
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: Name of the CodePipeline
+ returned: always
+ type: str
+ sample: my_deploy_pipeline
+ role_arn:
+ description: ARN of the IAM role attached to the code pipeline
+ returned: always
+ type: str
+ sample: arn:aws:iam::123123123:role/codepipeline-service-role
+ artifact_store:
+ description: Information about where the build artifacts are stored
+ returned: always
+ type: complex
+ contains:
+ type:
+ description: The type of the artifacts store, such as S3
+ returned: always
+ type: str
+ sample: S3
+ location:
+ description: The location of the artifacts storage (s3 bucket name)
+ returned: always
+ type: str
+ sample: my_s3_codepipline_bucket
+ encryption_key:
+ description: The encryption key used to encrypt the artifacts store, such as an AWS KMS key.
+ returned: when configured
+ type: str
+ stages:
+ description: List of stages configured for this pipeline
+ returned: always
+ type: list
+ version:
+ description: The version number of the pipeline. This number is auto incremented when pipeline params are changed.
+ returned: always
+ type: int
+'''
+
+import copy
+import traceback
+
+from ansible.module_utils._text import to_native
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, compare_policies
+
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def create_pipeline(client, name, role_arn, artifact_store, stages, version, module):
+ pipeline_dict = {'name': name, 'roleArn': role_arn, 'artifactStore': artifact_store, 'stages': stages}
+ if version:
+ pipeline_dict['version'] = version
+ try:
+ resp = client.create_pipeline(pipeline=pipeline_dict)
+ return resp
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable create pipeline {0}: {1}".format(name, to_native(e)),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json(msg="Unable to create pipeline {0}: {1}".format(name, to_native(e)),
+ exception=traceback.format_exc())
+
+
+def update_pipeline(client, pipeline_dict, module):
+ try:
+ resp = client.update_pipeline(pipeline=pipeline_dict)
+ return resp
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable update pipeline {0}: {1}".format(pipeline_dict['name'], to_native(e)),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json(msg="Unable to update pipeline {0}: {1}".format(pipeline_dict['name'], to_native(e)),
+ exception=traceback.format_exc())
+
+
+def delete_pipeline(client, name, module):
+ try:
+ resp = client.delete_pipeline(name=name)
+ return resp
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable delete pipeline {0}: {1}".format(name, to_native(e)),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json(msg="Unable to delete pipeline {0}: {1}".format(name, to_native(e)),
+ exception=traceback.format_exc())
+
+
+def describe_pipeline(client, name, version, module):
+ pipeline = {}
+ try:
+ if version is not None:
+ pipeline = client.get_pipeline(name=name, version=version)
+ return pipeline
+ else:
+ pipeline = client.get_pipeline(name=name)
+ return pipeline
+ except is_boto3_error_code('PipelineNotFoundException'):
+ return pipeline
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True, type='str'),
+ role_arn=dict(required=True, type='str'),
+ artifact_store=dict(required=True, type='dict'),
+ stages=dict(required=True, type='list', elements='dict'),
+ version=dict(type='int'),
+ state=dict(choices=['present', 'absent'], default='present')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+ client_conn = module.client('codepipeline')
+
+ state = module.params.get('state')
+ changed = False
+
+ # Determine if the CodePipeline exists
+ found_code_pipeline = describe_pipeline(client=client_conn, name=module.params['name'], version=module.params['version'], module=module)
+ pipeline_result = {}
+
+ if state == 'present':
+ if 'pipeline' in found_code_pipeline:
+ pipeline_dict = copy.deepcopy(found_code_pipeline['pipeline'])
+ # Update dictionary with provided module params:
+ pipeline_dict['roleArn'] = module.params['role_arn']
+ pipeline_dict['artifactStore'] = module.params['artifact_store']
+ pipeline_dict['stages'] = module.params['stages']
+ if module.params['version'] is not None:
+ pipeline_dict['version'] = module.params['version']
+
+ pipeline_result = update_pipeline(client=client_conn, pipeline_dict=pipeline_dict, module=module)
+
+ if compare_policies(found_code_pipeline['pipeline'], pipeline_result['pipeline']):
+ changed = True
+ else:
+ pipeline_result = create_pipeline(
+ client=client_conn,
+ name=module.params['name'],
+ role_arn=module.params['role_arn'],
+ artifact_store=module.params['artifact_store'],
+ stages=module.params['stages'],
+ version=module.params['version'],
+ module=module)
+ changed = True
+ elif state == 'absent':
+ if found_code_pipeline:
+ pipeline_result = delete_pipeline(client=client_conn, name=module.params['name'], module=module)
+ changed = True
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(pipeline_result))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_config_aggregation_authorization.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_config_aggregation_authorization.py
new file mode 100644
index 00000000..5a4ee38b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_config_aggregation_authorization.py
@@ -0,0 +1,159 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_config_aggregation_authorization
+version_added: 1.0.0
+short_description: Manage cross-account AWS Config authorizations
+description:
+ - Module manages AWS Config resources.
+requirements: [ 'botocore', 'boto3' ]
+author:
+ - "Aaron Smith (@slapula)"
+options:
+ state:
+ description:
+ - Whether the Config rule should be present or absent.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ authorized_account_id:
+ description:
+ - The 12-digit account ID of the account authorized to aggregate data.
+ type: str
+ required: true
+ authorized_aws_region:
+ description:
+ - The region authorized to collect aggregated data.
+ type: str
+ required: true
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: Get current account ID
+ community.aws.aws_caller_info:
+ register: whoami
+- community.aws.aws_config_aggregation_authorization:
+ state: present
+ authorized_account_id: '{{ whoami.account }}'
+ authorized_aws_region: us-east-1
+'''
+
+RETURN = '''#'''
+
+
+try:
+ import botocore
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+def resource_exists(client, module, params):
+ try:
+ current_authorizations = client.describe_aggregation_authorizations()['AggregationAuthorizations']
+ authorization_exists = next(
+ (item for item in current_authorizations if item["AuthorizedAccountId"] == params['AuthorizedAccountId']),
+ None
+ )
+ if authorization_exists:
+ return True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError):
+ return False
+
+
+def create_resource(client, module, params, result):
+ try:
+ response = client.put_aggregation_authorization(
+ AuthorizedAccountId=params['AuthorizedAccountId'],
+ AuthorizedAwsRegion=params['AuthorizedAwsRegion']
+ )
+ result['changed'] = True
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create AWS Aggregation authorization")
+
+
+def update_resource(client, module, params, result):
+ current_authorizations = client.describe_aggregation_authorizations()['AggregationAuthorizations']
+ current_params = next(
+ (item for item in current_authorizations if item["AuthorizedAccountId"] == params['AuthorizedAccountId']),
+ None
+ )
+
+ del current_params['AggregationAuthorizationArn']
+ del current_params['CreationTime']
+
+ if params != current_params:
+ try:
+ response = client.put_aggregation_authorization(
+ AuthorizedAccountId=params['AuthorizedAccountId'],
+ AuthorizedAwsRegion=params['AuthorizedAwsRegion']
+ )
+ result['changed'] = True
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create AWS Aggregation authorization")
+
+
+def delete_resource(client, module, params, result):
+ try:
+ response = client.delete_aggregation_authorization(
+ AuthorizedAccountId=params['AuthorizedAccountId'],
+ AuthorizedAwsRegion=params['AuthorizedAwsRegion']
+ )
+ result['changed'] = True
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete AWS Aggregation authorization")
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec={
+ 'state': dict(type='str', choices=['present', 'absent'], default='present'),
+ 'authorized_account_id': dict(type='str', required=True),
+ 'authorized_aws_region': dict(type='str', required=True),
+ },
+ supports_check_mode=False,
+ )
+
+ result = {'changed': False}
+
+ params = {
+ 'AuthorizedAccountId': module.params.get('authorized_account_id'),
+ 'AuthorizedAwsRegion': module.params.get('authorized_aws_region'),
+ }
+
+ client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
+ resource_status = resource_exists(client, module, params)
+
+ if module.params.get('state') == 'present':
+ if not resource_status:
+ create_resource(client, module, params, result)
+ else:
+ update_resource(client, module, params, result)
+
+ if module.params.get('state') == 'absent':
+ if resource_status:
+ delete_resource(client, module, params, result)
+
+ module.exit_json(changed=result['changed'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_config_aggregator.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_config_aggregator.py
new file mode 100644
index 00000000..250f004a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_config_aggregator.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: aws_config_aggregator
+version_added: 1.0.0
+short_description: Manage AWS Config aggregations across multiple accounts
+description:
+ - Module manages AWS Config resources
+requirements: [ 'botocore', 'boto3' ]
+author:
+ - "Aaron Smith (@slapula)"
+options:
+ name:
+ description:
+ - The name of the AWS Config resource.
+ required: true
+ type: str
+ state:
+ description:
+ - Whether the Config rule should be present or absent.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ account_sources:
+ description:
+ - Provides a list of source accounts and regions to be aggregated.
+ suboptions:
+ account_ids:
+ description:
+ - A list of 12-digit account IDs of accounts being aggregated.
+ type: list
+ elements: str
+ aws_regions:
+ description:
+ - A list of source regions being aggregated.
+ type: list
+ elements: str
+ all_aws_regions:
+ description:
+ - If true, aggregate existing AWS Config regions and future regions.
+ type: bool
+ type: list
+ elements: dict
+ required: true
+ organization_source:
+ description:
+ - The region authorized to collect aggregated data.
+ suboptions:
+ role_arn:
+ description:
+ - ARN of the IAM role used to retrieve AWS Organization details associated with the aggregator account.
+ type: str
+ aws_regions:
+ description:
+ - The source regions being aggregated.
+ type: list
+ elements: str
+ all_aws_regions:
+ description:
+ - If true, aggregate existing AWS Config regions and future regions.
+ type: bool
+ type: dict
+ required: true
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+- name: Create cross-account aggregator
+ community.aws.aws_config_aggregator:
+ name: test_config_rule
+ state: present
+ account_sources:
+ account_ids:
+ - 1234567890
+ - 0123456789
+ - 9012345678
+ all_aws_regions: yes
+'''
+
+RETURN = r'''#'''
+
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict
+
+
+def resource_exists(client, module, params):
+ try:
+ aggregator = client.describe_configuration_aggregators(
+ ConfigurationAggregatorNames=[params['name']]
+ )
+ return aggregator['ConfigurationAggregators'][0]
+ except is_boto3_error_code('NoSuchConfigurationAggregatorException'):
+ return
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+
+
+def create_resource(client, module, params, result):
+ try:
+ client.put_configuration_aggregator(
+ ConfigurationAggregatorName=params['ConfigurationAggregatorName'],
+ AccountAggregationSources=params['AccountAggregationSources'],
+ OrganizationAggregationSource=params['OrganizationAggregationSource']
+ )
+ result['changed'] = True
+ result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator")
+
+
+def update_resource(client, module, params, result):
+ current_params = client.describe_configuration_aggregators(
+ ConfigurationAggregatorNames=[params['name']]
+ )
+
+ del current_params['ConfigurationAggregatorArn']
+ del current_params['CreationTime']
+ del current_params['LastUpdatedTime']
+
+ if params != current_params['ConfigurationAggregators'][0]:
+ try:
+ client.put_configuration_aggregator(
+ ConfigurationAggregatorName=params['ConfigurationAggregatorName'],
+ AccountAggregationSources=params['AccountAggregationSources'],
+ OrganizationAggregationSource=params['OrganizationAggregationSource']
+ )
+ result['changed'] = True
+ result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator")
+
+
+def delete_resource(client, module, params, result):
+ try:
+ client.delete_configuration_aggregator(
+ ConfigurationAggregatorName=params['ConfigurationAggregatorName']
+ )
+ result['changed'] = True
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration aggregator")
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec={
+ 'name': dict(type='str', required=True),
+ 'state': dict(type='str', choices=['present', 'absent'], default='present'),
+ 'account_sources': dict(type='list', required=True, elements='dict'),
+ 'organization_source': dict(type='dict', required=True)
+ },
+ supports_check_mode=False,
+ )
+
+ result = {
+ 'changed': False
+ }
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+
+ params = {}
+ if name:
+ params['ConfigurationAggregatorName'] = name
+ if module.params.get('account_sources'):
+ params['AccountAggregationSources'] = []
+ for i in module.params.get('account_sources'):
+ tmp_dict = {}
+ if i.get('account_ids'):
+ tmp_dict['AccountIds'] = i.get('account_ids')
+ if i.get('aws_regions'):
+ tmp_dict['AwsRegions'] = i.get('aws_regions')
+ if i.get('all_aws_regions') is not None:
+ tmp_dict['AllAwsRegions'] = i.get('all_aws_regions')
+ params['AccountAggregationSources'].append(tmp_dict)
+ if module.params.get('organization_source'):
+ params['OrganizationAggregationSource'] = {}
+ if module.params.get('organization_source').get('role_arn'):
+ params['OrganizationAggregationSource'].update({
+ 'RoleArn': module.params.get('organization_source').get('role_arn')
+ })
+ if module.params.get('organization_source').get('aws_regions'):
+ params['OrganizationAggregationSource'].update({
+ 'AwsRegions': module.params.get('organization_source').get('aws_regions')
+ })
+ if module.params.get('organization_source').get('all_aws_regions') is not None:
+ params['OrganizationAggregationSourcep'].update({
+ 'AllAwsRegions': module.params.get('organization_source').get('all_aws_regions')
+ })
+
+ client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
+
+ resource_status = resource_exists(client, module, params)
+
+ if state == 'present':
+ if not resource_status:
+ create_resource(client, module, params, result)
+ else:
+ update_resource(client, module, params, result)
+
+ if state == 'absent':
+ if resource_status:
+ delete_resource(client, module, params, result)
+
+ module.exit_json(changed=result['changed'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_config_delivery_channel.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_config_delivery_channel.py
new file mode 100644
index 00000000..6e7fe5b2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_config_delivery_channel.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_config_delivery_channel
+version_added: 1.0.0
+short_description: Manage AWS Config delivery channels
+description:
+ - This module manages AWS Config delivery locations for rule checks and configuration info.
+requirements: [ 'botocore', 'boto3' ]
+author:
+ - "Aaron Smith (@slapula)"
+options:
+ name:
+ description:
+ - The name of the AWS Config resource.
+ required: true
+ type: str
+ state:
+ description:
+ - Whether the Config rule should be present or absent.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ s3_bucket:
+ description:
+ - The name of the Amazon S3 bucket to which AWS Config delivers configuration snapshots and configuration history files.
+ type: str
+ required: true
+ s3_prefix:
+ description:
+ - The prefix for the specified Amazon S3 bucket.
+ type: str
+ sns_topic_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the Amazon SNS topic to which AWS Config sends notifications about configuration changes.
+ type: str
+ delivery_frequency:
+ description:
+ - The frequency with which AWS Config delivers configuration snapshots.
+ choices: ['One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours']
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: Create Delivery Channel for AWS Config
+ community.aws.aws_config_delivery_channel:
+ name: test_delivery_channel
+ state: present
+ s3_bucket: 'test_aws_config_bucket'
+ sns_topic_arn: 'arn:aws:sns:us-east-1:123456789012:aws_config_topic:1234ab56-cdef-7g89-01hi-2jk34l5m67no'
+ delivery_frequency: 'Twelve_Hours'
+'''
+
+RETURN = '''#'''
+
+
+try:
+ import botocore
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
+
+
+# this waits for an IAM role to become fully available, at the cost of
+# taking a long time to fail when the IAM role/policy really is invalid
+retry_unavailable_iam_on_put_delivery = AWSRetry.backoff(
+ catch_extra_error_codes=['InsufficientDeliveryPolicyException'],
+)
+
+
+def resource_exists(client, module, params):
+ try:
+ channel = client.describe_delivery_channels(
+ DeliveryChannelNames=[params['name']],
+ aws_retry=True,
+ )
+ return channel['DeliveryChannels'][0]
+ except is_boto3_error_code('NoSuchDeliveryChannelException'):
+ return
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+
+
+def create_resource(client, module, params, result):
+ try:
+ retry_unavailable_iam_on_put_delivery(
+ client.put_delivery_channel,
+ )(
+ DeliveryChannel=params,
+ )
+ result['changed'] = True
+ result['channel'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
+ return result
+ except is_boto3_error_code('InvalidS3KeyPrefixException') as e:
+ module.fail_json_aws(e, msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix")
+ except is_boto3_error_code('InsufficientDeliveryPolicyException') as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="The `s3_prefix` or `s3_bucket` parameter is invalid. "
+ "Make sure the bucket exists and is available")
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't create AWS Config delivery channel")
+
+
+def update_resource(client, module, params, result):
+ current_params = client.describe_delivery_channels(
+ DeliveryChannelNames=[params['name']],
+ aws_retry=True,
+ )
+
+ if params != current_params['DeliveryChannels'][0]:
+ try:
+ retry_unavailable_iam_on_put_delivery(
+ client.put_delivery_channel,
+ )(
+ DeliveryChannel=params,
+ )
+ result['changed'] = True
+ result['channel'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
+ return result
+ except is_boto3_error_code('InvalidS3KeyPrefixException') as e:
+ module.fail_json_aws(e, msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix")
+ except is_boto3_error_code('InsufficientDeliveryPolicyException') as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="The `s3_prefix` or `s3_bucket` parameter is invalid. "
+ "Make sure the bucket exists and is available")
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't create AWS Config delivery channel")
+
+
+def delete_resource(client, module, params, result):
+ try:
+ response = client.delete_delivery_channel(
+ DeliveryChannelName=params['name']
+ )
+ result['changed'] = True
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete AWS Config delivery channel")
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec={
+ 'name': dict(type='str', required=True),
+ 'state': dict(type='str', choices=['present', 'absent'], default='present'),
+ 's3_bucket': dict(type='str', required=True),
+ 's3_prefix': dict(type='str'),
+ 'sns_topic_arn': dict(type='str'),
+ 'delivery_frequency': dict(
+ type='str',
+ choices=[
+ 'One_Hour',
+ 'Three_Hours',
+ 'Six_Hours',
+ 'Twelve_Hours',
+ 'TwentyFour_Hours'
+ ]
+ ),
+ },
+ supports_check_mode=False,
+ )
+
+ result = {
+ 'changed': False
+ }
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+
+ params = {}
+ if name:
+ params['name'] = name
+ if module.params.get('s3_bucket'):
+ params['s3BucketName'] = module.params.get('s3_bucket')
+ if module.params.get('s3_prefix'):
+ params['s3KeyPrefix'] = module.params.get('s3_prefix')
+ if module.params.get('sns_topic_arn'):
+ params['snsTopicARN'] = module.params.get('sns_topic_arn')
+ if module.params.get('delivery_frequency'):
+ params['configSnapshotDeliveryProperties'] = {
+ 'deliveryFrequency': module.params.get('delivery_frequency')
+ }
+
+ client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
+
+ resource_status = resource_exists(client, module, params)
+
+ if state == 'present':
+ if not resource_status:
+ create_resource(client, module, params, result)
+ if resource_status:
+ update_resource(client, module, params, result)
+
+ if state == 'absent':
+ if resource_status:
+ delete_resource(client, module, params, result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_config_recorder.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_config_recorder.py
new file mode 100644
index 00000000..2d3bf003
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_config_recorder.py
@@ -0,0 +1,209 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_config_recorder
+version_added: 1.0.0
+short_description: Manage AWS Config Recorders
+description:
+ - Module manages AWS Config configuration recorder settings.
+requirements: [ 'botocore', 'boto3' ]
+author:
+ - "Aaron Smith (@slapula)"
+options:
+ name:
+ description:
+ - The name of the AWS Config resource.
+ required: true
+ type: str
+ state:
+ description:
+ - Whether the Config rule should be present or absent.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ role_arn:
+ description:
+ - Amazon Resource Name (ARN) of the IAM role used to describe the AWS resources associated with the account.
+ - Required when I(state=present).
+ type: str
+ recording_group:
+ description:
+ - Specifies the types of AWS resources for which AWS Config records configuration changes.
+ - Required when I(state=present)
+ suboptions:
+ all_supported:
+ description:
+ - Specifies whether AWS Config records configuration changes for every supported type of regional resource.
+ - If I(all_supported=true), when AWS Config adds support for a new type of regional resource, it starts
+ recording resources of that type automatically.
+ - If I(all_supported=true), you cannot enumerate a list of I(resource_types).
+ include_global_types:
+ description:
+ - Specifies whether AWS Config includes all supported types of global resources (for example, IAM resources)
+ with the resources that it records.
+ - The configuration details for any global resource are the same in all regions. To prevent duplicate configuration items,
+ you should consider customizing AWS Config in only one region to record global resources.
+ - If you set I(include_global_types=true), you must also set I(all_supported=true).
+ - If you set I(include_global_types=true), when AWS Config adds support for a new type of global resource, it starts recording
+ resources of that type automatically.
+ resource_types:
+ description:
+ - A list that specifies the types of AWS resources for which AWS Config records configuration changes (for example,
+ C(AWS::EC2::Instance) or C(AWS::CloudTrail::Trail)).
+ - Before you can set this option, you must set I(all_supported=false).
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: Create Configuration Recorder for AWS Config
+ community.aws.aws_config_recorder:
+ name: test_configuration_recorder
+ state: present
+ role_arn: 'arn:aws:iam::123456789012:role/AwsConfigRecorder'
+ recording_group:
+ all_supported: true
+ include_global_types: true
+'''
+
+RETURN = '''#'''
+
+
+try:
+ import botocore
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
+
+
+def resource_exists(client, module, params):
+ try:
+ recorder = client.describe_configuration_recorders(
+ ConfigurationRecorderNames=[params['name']]
+ )
+ return recorder['ConfigurationRecorders'][0]
+ except is_boto3_error_code('NoSuchConfigurationRecorderException'):
+ return
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+
+
+def create_resource(client, module, params, result):
+ try:
+ response = client.put_configuration_recorder(
+ ConfigurationRecorder=params
+ )
+ result['changed'] = True
+ result['recorder'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create AWS Config configuration recorder")
+
+
+def update_resource(client, module, params, result):
+ current_params = client.describe_configuration_recorders(
+ ConfigurationRecorderNames=[params['name']]
+ )
+
+ if params != current_params['ConfigurationRecorders'][0]:
+ try:
+ response = client.put_configuration_recorder(
+ ConfigurationRecorder=params
+ )
+ result['changed'] = True
+ result['recorder'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't update AWS Config configuration recorder")
+
+
+def delete_resource(client, module, params, result):
+ try:
+ response = client.delete_configuration_recorder(
+ ConfigurationRecorderName=params['name']
+ )
+ result['changed'] = True
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration recorder")
+
+
+def main():
+
+ module = AnsibleAWSModule(
+ argument_spec={
+ 'name': dict(type='str', required=True),
+ 'state': dict(type='str', choices=['present', 'absent'], default='present'),
+ 'role_arn': dict(type='str'),
+ 'recording_group': dict(type='dict'),
+ },
+ supports_check_mode=False,
+ required_if=[
+ ('state', 'present', ['role_arn', 'recording_group']),
+ ],
+ )
+
+ result = {
+ 'changed': False
+ }
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+
+ params = {}
+ if name:
+ params['name'] = name
+ if module.params.get('role_arn'):
+ params['roleARN'] = module.params.get('role_arn')
+ if module.params.get('recording_group'):
+ params['recordingGroup'] = {}
+ if module.params.get('recording_group').get('all_supported') is not None:
+ params['recordingGroup'].update({
+ 'allSupported': module.params.get('recording_group').get('all_supported')
+ })
+ if module.params.get('recording_group').get('include_global_types') is not None:
+ params['recordingGroup'].update({
+ 'includeGlobalResourceTypes': module.params.get('recording_group').get('include_global_types')
+ })
+ if module.params.get('recording_group').get('resource_types'):
+ params['recordingGroup'].update({
+ 'resourceTypes': module.params.get('recording_group').get('resource_types')
+ })
+ else:
+ params['recordingGroup'].update({
+ 'resourceTypes': []
+ })
+
+ client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
+
+ resource_status = resource_exists(client, module, params)
+
+ if state == 'present':
+ if not resource_status:
+ create_resource(client, module, params, result)
+ if resource_status:
+ update_resource(client, module, params, result)
+
+ if state == 'absent':
+ if resource_status:
+ delete_resource(client, module, params, result)
+
+ module.exit_json(changed=result['changed'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_config_rule.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_config_rule.py
new file mode 100644
index 00000000..80550586
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_config_rule.py
@@ -0,0 +1,271 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_config_rule
+version_added: 1.0.0
+short_description: Manage AWS Config resources
+description:
+ - Module manages AWS Config rules
+requirements: [ 'botocore', 'boto3' ]
+author:
+ - "Aaron Smith (@slapula)"
+options:
+ name:
+ description:
+ - The name of the AWS Config resource.
+ required: true
+ type: str
+ state:
+ description:
+ - Whether the Config rule should be present or absent.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ description:
+ description:
+ - The description that you provide for the AWS Config rule.
+ type: str
+ scope:
+ description:
+ - Defines which resources can trigger an evaluation for the rule.
+ suboptions:
+ compliance_types:
+ description:
+ - The resource types of only those AWS resources that you want to trigger an evaluation for the rule.
+ You can only specify one type if you also specify a resource ID for I(compliance_id).
+ compliance_id:
+ description:
+ - The ID of the only AWS resource that you want to trigger an evaluation for the rule. If you specify a resource ID,
+ you must specify one resource type for I(compliance_types).
+ tag_key:
+ description:
+ - The tag key that is applied to only those AWS resources that you want to trigger an evaluation for the rule.
+ tag_value:
+ description:
+ - The tag value applied to only those AWS resources that you want to trigger an evaluation for the rule.
+ If you specify a value for I(tag_value), you must also specify a value for I(tag_key).
+ type: dict
+ source:
+ description:
+ - Provides the rule owner (AWS or customer), the rule identifier, and the notifications that cause the function to
+ evaluate your AWS resources.
+ suboptions:
+ owner:
+ description:
+ - The resource types of only those AWS resources that you want to trigger an evaluation for the rule.
+ You can only specify one type if you also specify a resource ID for I(compliance_id).
+ identifier:
+ description:
+ - The ID of the only AWS resource that you want to trigger an evaluation for the rule.
+ If you specify a resource ID, you must specify one resource type for I(compliance_types).
+ details:
+ description:
+ - Provides the source and type of the event that causes AWS Config to evaluate your AWS resources.
+ - This parameter expects a list of dictionaries. Each dictionary expects the following key/value pairs.
+ - Key `EventSource` The source of the event, such as an AWS service, that triggers AWS Config to evaluate your AWS resources.
+ - Key `MessageType` The type of notification that triggers AWS Config to run an evaluation for a rule.
+ - Key `MaximumExecutionFrequency` The frequency at which you want AWS Config to run evaluations for a custom rule with a periodic trigger.
+ type: dict
+ required: true
+ input_parameters:
+ description:
+ - A string, in JSON format, that is passed to the AWS Config rule Lambda function.
+ type: str
+ execution_frequency:
+ description:
+ - The maximum frequency with which AWS Config runs evaluations for a rule.
+ choices: ['One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours']
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: Create Config Rule for AWS Config
+ community.aws.aws_config_rule:
+ name: test_config_rule
+ state: present
+ description: 'This AWS Config rule checks for public write access on S3 buckets'
+ scope:
+ compliance_types:
+ - 'AWS::S3::Bucket'
+ source:
+ owner: AWS
+ identifier: 'S3_BUCKET_PUBLIC_WRITE_PROHIBITED'
+
+'''
+
+RETURN = '''#'''
+
+
+try:
+ import botocore
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict
+
+
+def rule_exists(client, module, params):
+ try:
+ rule = client.describe_config_rules(
+ ConfigRuleNames=[params['ConfigRuleName']],
+ aws_retry=True,
+ )
+ return rule['ConfigRules'][0]
+ except is_boto3_error_code('NoSuchConfigRuleException'):
+ return
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+
+
+def create_resource(client, module, params, result):
+ try:
+ client.put_config_rule(
+ ConfigRule=params
+ )
+ result['changed'] = True
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create AWS Config rule")
+
+
+def update_resource(client, module, params, result):
+ current_params = client.describe_config_rules(
+ ConfigRuleNames=[params['ConfigRuleName']],
+ aws_retry=True,
+ )
+
+ del current_params['ConfigRules'][0]['ConfigRuleArn']
+ del current_params['ConfigRules'][0]['ConfigRuleId']
+
+ if params != current_params['ConfigRules'][0]:
+ try:
+ client.put_config_rule(
+ ConfigRule=params
+ )
+ result['changed'] = True
+ result['rule'] = camel_dict_to_snake_dict(rule_exists(client, module, params))
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create AWS Config rule")
+
+
+def delete_resource(client, module, params, result):
+ try:
+ response = client.delete_config_rule(
+ ConfigRuleName=params['ConfigRuleName'],
+ aws_retry=True,
+ )
+ result['changed'] = True
+ result['rule'] = {}
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete AWS Config rule")
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec={
+ 'name': dict(type='str', required=True),
+ 'state': dict(type='str', choices=['present', 'absent'], default='present'),
+ 'description': dict(type='str'),
+ 'scope': dict(type='dict'),
+ 'source': dict(type='dict', required=True),
+ 'input_parameters': dict(type='str'),
+ 'execution_frequency': dict(
+ type='str',
+ choices=[
+ 'One_Hour',
+ 'Three_Hours',
+ 'Six_Hours',
+ 'Twelve_Hours',
+ 'TwentyFour_Hours'
+ ]
+ ),
+ },
+ supports_check_mode=False,
+ )
+
+ result = {
+ 'changed': False
+ }
+
+ name = module.params.get('name')
+ resource_type = module.params.get('resource_type')
+ state = module.params.get('state')
+
+ params = {}
+ if name:
+ params['ConfigRuleName'] = name
+ if module.params.get('description'):
+ params['Description'] = module.params.get('description')
+ if module.params.get('scope'):
+ params['Scope'] = {}
+ if module.params.get('scope').get('compliance_types'):
+ params['Scope'].update({
+ 'ComplianceResourceTypes': module.params.get('scope').get('compliance_types')
+ })
+ if module.params.get('scope').get('tag_key'):
+ params['Scope'].update({
+ 'TagKey': module.params.get('scope').get('tag_key')
+ })
+ if module.params.get('scope').get('tag_value'):
+ params['Scope'].update({
+ 'TagValue': module.params.get('scope').get('tag_value')
+ })
+ if module.params.get('scope').get('compliance_id'):
+ params['Scope'].update({
+ 'ComplianceResourceId': module.params.get('scope').get('compliance_id')
+ })
+ if module.params.get('source'):
+ params['Source'] = {}
+ if module.params.get('source').get('owner'):
+ params['Source'].update({
+ 'Owner': module.params.get('source').get('owner')
+ })
+ if module.params.get('source').get('identifier'):
+ params['Source'].update({
+ 'SourceIdentifier': module.params.get('source').get('identifier')
+ })
+ if module.params.get('source').get('details'):
+ params['Source'].update({
+ 'SourceDetails': module.params.get('source').get('details')
+ })
+ if module.params.get('input_parameters'):
+ params['InputParameters'] = module.params.get('input_parameters')
+ if module.params.get('execution_frequency'):
+ params['MaximumExecutionFrequency'] = module.params.get('execution_frequency')
+ params['ConfigRuleState'] = 'ACTIVE'
+
+ client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
+
+ existing_rule = rule_exists(client, module, params)
+
+ if state == 'present':
+ if not existing_rule:
+ create_resource(client, module, params, result)
+ else:
+ update_resource(client, module, params, result)
+
+ if state == 'absent':
+ if existing_rule:
+ delete_resource(client, module, params, result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_direct_connect_confirm_connection.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_direct_connect_confirm_connection.py
new file mode 100644
index 00000000..948aa63c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_direct_connect_confirm_connection.py
@@ -0,0 +1,155 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+---
+module: aws_direct_connect_confirm_connection
+short_description: Confirms the creation of a hosted DirectConnect connection.
+description:
+ - Confirms the creation of a hosted DirectConnect, which requires approval before it can be used.
+ - DirectConnect connections that require approval would be in the 'ordering'.
+ - After confirmation, they will move to the 'pending' state and finally the 'available' state.
+author: "Matt Traynham (@mtraynham)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements:
+ - boto3
+ - botocore
+options:
+ name:
+ description:
+ - The name of the Direct Connect connection.
+ - One of I(connection_id) or I(name) must be specified.
+ type: str
+ connection_id:
+ description:
+ - The ID of the Direct Connect connection.
+ - One of I(connection_id) or I(name) must be specified.
+ type: str
+'''
+
+EXAMPLES = '''
+
+# confirm a Direct Connect by name
+- name: confirm the connection id
+ aws_direct_connect_confirm_connection:
+ name: my_host_direct_connect
+
+# confirm a Direct Connect by connection_id
+- name: confirm the connection id
+ aws_direct_connect_confirm_connection:
+ connection_id: dxcon-xxxxxxxx
+'''
+
+RETURN = '''
+
+connection_state:
+ description: The state of the connection.
+ returned: always
+ type: str
+ sample: pending
+'''
+
+import traceback
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, AWSRetry)
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # handled by imported AnsibleAWSModule
+
+retry_params = {"tries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]}
+
+
+@AWSRetry.backoff(**retry_params)
+def describe_connections(client, params):
+ return client.describe_connections(**params)
+
+
+def find_connection_id(client, connection_id=None, connection_name=None):
+ params = {}
+ if connection_id:
+ params['connectionId'] = connection_id
+ try:
+ response = describe_connections(client, params)
+ except (BotoCoreError, ClientError) as e:
+ if connection_id:
+ msg = "Failed to describe DirectConnect ID {0}".format(connection_id)
+ else:
+ msg = "Failed to describe DirectConnect connections"
+ raise DirectConnectError(msg=msg,
+ last_traceback=traceback.format_exc(),
+ exception=e)
+
+ match = []
+ if len(response.get('connections', [])) == 1 and connection_id:
+ if response['connections'][0]['connectionState'] != 'deleted':
+ match.append(response['connections'][0]['connectionId'])
+
+ for conn in response.get('connections', []):
+ if connection_name == conn['connectionName'] and conn['connectionState'] != 'deleted':
+ match.append(conn['connectionId'])
+
+ if len(match) == 1:
+ return match[0]
+ else:
+ raise DirectConnectError(msg="Could not find a valid DirectConnect connection")
+
+
+def get_connection_state(client, connection_id):
+ try:
+ response = describe_connections(client, dict(connectionId=connection_id))
+ return response['connections'][0]['connectionState']
+ except (BotoCoreError, ClientError, IndexError) as e:
+ raise DirectConnectError(msg="Failed to describe DirectConnect connection {0} state".format(connection_id),
+ last_traceback=traceback.format_exc(),
+ exception=e)
+
+
+def main():
+ argument_spec = dict(
+ connection_id=dict(),
+ name=dict()
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ mutually_exclusive=[['connection_id', 'name']],
+ required_one_of=[['connection_id', 'name']])
+ client = module.client('directconnect')
+
+ connection_id = module.params['connection_id']
+ connection_name = module.params['name']
+
+ changed = False
+ connection_state = None
+ try:
+ connection_id = find_connection_id(client,
+ connection_id,
+ connection_name)
+ connection_state = get_connection_state(client, connection_id)
+ if connection_state == 'ordering':
+ client.confirm_connection(connectionId=connection_id)
+ changed = True
+ connection_state = get_connection_state(client, connection_id)
+ except DirectConnectError as e:
+ if e.last_traceback:
+ module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception.response))
+ else:
+ module.fail_json(msg=e.msg)
+
+ module.exit_json(changed=changed, connection_state=connection_state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_direct_connect_connection.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_direct_connect_connection.py
new file mode 100644
index 00000000..a84e5f98
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_direct_connect_connection.py
@@ -0,0 +1,344 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_direct_connect_connection
+version_added: 1.0.0
+short_description: Creates, deletes, modifies a DirectConnect connection
+description:
+ - Create, update, or delete a Direct Connect connection between a network and a specific AWS Direct Connect location.
+ Upon creation the connection may be added to a link aggregation group or established as a standalone connection.
+ The connection may later be associated or disassociated with a link aggregation group.
+author: "Sloane Hertel (@s-hertel)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements:
+ - boto3
+ - botocore
+options:
+ state:
+ description:
+ - The state of the Direct Connect connection.
+ choices:
+ - present
+ - absent
+ type: str
+ required: true
+ name:
+ description:
+ - The name of the Direct Connect connection. This is required to create a
+ new connection.
+ - One of I(connection_id) or I(name) must be specified.
+ type: str
+ connection_id:
+ description:
+ - The ID of the Direct Connect connection.
+ - Modifying attributes of a connection with I(forced_update) will result in a new Direct Connect connection ID.
+ - One of I(connection_id) or I(name) must be specified.
+ type: str
+ location:
+ description:
+ - Where the Direct Connect connection is located.
+ - Required when I(state=present).
+ type: str
+ bandwidth:
+ description:
+ - The bandwidth of the Direct Connect connection.
+ - Required when I(state=present).
+ choices:
+ - 1Gbps
+ - 10Gbps
+ type: str
+ link_aggregation_group:
+ description:
+ - The ID of the link aggregation group you want to associate with the connection.
+ - This is optional when a stand-alone connection is desired.
+ type: str
+ forced_update:
+ description:
+ - To modify I(bandwidth) or I(location) the connection needs to be deleted and recreated.
+ - By default this will not happen. This option must be explicitly set to C(true) to change I(bandwith) or I(location).
+ type: bool
+ default: false
+'''
+
+EXAMPLES = """
+
+# create a Direct Connect connection
+- community.aws.aws_direct_connect_connection:
+ name: ansible-test-connection
+ state: present
+ location: EqDC2
+ link_aggregation_group: dxlag-xxxxxxxx
+ bandwidth: 1Gbps
+ register: dc
+
+# disassociate the LAG from the connection
+- community.aws.aws_direct_connect_connection:
+ state: present
+ connection_id: dc.connection.connection_id
+ location: EqDC2
+ bandwidth: 1Gbps
+
+# replace the connection with one with more bandwidth
+- community.aws.aws_direct_connect_connection:
+ state: present
+ name: ansible-test-connection
+ location: EqDC2
+ bandwidth: 10Gbps
+ forced_update: true
+
+# delete the connection
+- community.aws.aws_direct_connect_connection:
+ state: absent
+ name: ansible-test-connection
+"""
+
+RETURN = """
+connection:
+ description: The attributes of the direct connect connection.
+ type: complex
+ returned: I(state=present)
+ contains:
+ aws_device:
+ description: The endpoint which the physical connection terminates on.
+ returned: when the requested state is no longer 'requested'
+ type: str
+ sample: EqDC2-12pmo7hemtz1z
+ bandwidth:
+ description: The bandwidth of the connection.
+ returned: always
+ type: str
+ sample: 1Gbps
+ connection_id:
+ description: The ID of the connection.
+ returned: always
+ type: str
+ sample: dxcon-ffy9ywed
+ connection_name:
+ description: The name of the connection.
+ returned: always
+ type: str
+ sample: ansible-test-connection
+ connection_state:
+ description: The state of the connection.
+ returned: always
+ type: str
+ sample: pending
+ loa_issue_time:
+ description: The issue time of the connection's Letter of Authorization - Connecting Facility Assignment.
+ returned: when the LOA-CFA has been issued (the connection state will no longer be 'requested')
+ type: str
+ sample: '2018-03-20T17:36:26-04:00'
+ location:
+ description: The location of the connection.
+ returned: always
+ type: str
+ sample: EqDC2
+ owner_account:
+ description: The account that owns the direct connect connection.
+ returned: always
+ type: str
+ sample: '123456789012'
+ region:
+ description: The region in which the connection exists.
+ returned: always
+ type: str
+ sample: us-east-1
+"""
+
+import traceback
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, AWSRetry)
+from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import (
+ DirectConnectError,
+ delete_connection,
+ associate_connection_and_lag,
+ disassociate_connection_and_lag,
+)
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # handled by imported AnsibleAWSModule
+
+retry_params = {"tries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]}
+
+
+def connection_status(client, connection_id):
+ return connection_exists(client, connection_id=connection_id, connection_name=None, verify=False)
+
+
+def connection_exists(client, connection_id=None, connection_name=None, verify=True):
+ params = {}
+ if connection_id:
+ params['connectionId'] = connection_id
+ try:
+ response = AWSRetry.backoff(**retry_params)(client.describe_connections)(**params)
+ except (BotoCoreError, ClientError) as e:
+ if connection_id:
+ msg = "Failed to describe DirectConnect ID {0}".format(connection_id)
+ else:
+ msg = "Failed to describe DirectConnect connections"
+ raise DirectConnectError(msg=msg,
+ last_traceback=traceback.format_exc(),
+ exception=e)
+
+ match = []
+ connection = []
+
+ # look for matching connections
+
+ if len(response.get('connections', [])) == 1 and connection_id:
+ if response['connections'][0]['connectionState'] != 'deleted':
+ match.append(response['connections'][0]['connectionId'])
+ connection.extend(response['connections'])
+
+ for conn in response.get('connections', []):
+ if connection_name == conn['connectionName'] and conn['connectionState'] != 'deleted':
+ match.append(conn['connectionId'])
+ connection.append(conn)
+
+ # verifying if the connections exists; if true, return connection identifier, otherwise return False
+ if verify and len(match) == 1:
+ return match[0]
+ elif verify:
+ return False
+ # not verifying if the connection exists; just return current connection info
+ elif len(connection) == 1:
+ return {'connection': connection[0]}
+ return {'connection': {}}
+
+
+def create_connection(client, location, bandwidth, name, lag_id):
+ if not name:
+ raise DirectConnectError(msg="Failed to create a Direct Connect connection: name required.")
+ params = {
+ 'location': location,
+ 'bandwidth': bandwidth,
+ 'connectionName': name,
+ }
+ if lag_id:
+ params['lagId'] = lag_id
+
+ try:
+ connection = AWSRetry.backoff(**retry_params)(client.create_connection)(**params)
+ except (BotoCoreError, ClientError) as e:
+ raise DirectConnectError(msg="Failed to create DirectConnect connection {0}".format(name),
+ last_traceback=traceback.format_exc(),
+ exception=e)
+ return connection['connectionId']
+
+
+def changed_properties(current_status, location, bandwidth):
+ current_bandwidth = current_status['bandwidth']
+ current_location = current_status['location']
+
+ return current_bandwidth != bandwidth or current_location != location
+
+
+@AWSRetry.backoff(**retry_params)
+def update_associations(client, latest_state, connection_id, lag_id):
+ changed = False
+ if 'lagId' in latest_state and lag_id != latest_state['lagId']:
+ disassociate_connection_and_lag(client, connection_id, lag_id=latest_state['lagId'])
+ changed = True
+ if (changed and lag_id) or (lag_id and 'lagId' not in latest_state):
+ associate_connection_and_lag(client, connection_id, lag_id)
+ changed = True
+ return changed
+
+
+def ensure_present(client, connection_id, connection_name, location, bandwidth, lag_id, forced_update):
+ # the connection is found; get the latest state and see if it needs to be updated
+ if connection_id:
+ latest_state = connection_status(client, connection_id=connection_id)['connection']
+ if changed_properties(latest_state, location, bandwidth) and forced_update:
+ ensure_absent(client, connection_id)
+ return ensure_present(client=client,
+ connection_id=None,
+ connection_name=connection_name,
+ location=location,
+ bandwidth=bandwidth,
+ lag_id=lag_id,
+ forced_update=forced_update)
+ elif update_associations(client, latest_state, connection_id, lag_id):
+ return True, connection_id
+
+ # no connection found; create a new one
+ else:
+ return True, create_connection(client, location, bandwidth, connection_name, lag_id)
+
+ return False, connection_id
+
+
+@AWSRetry.backoff(**retry_params)
+def ensure_absent(client, connection_id):
+ changed = False
+ if connection_id:
+ delete_connection(client, connection_id)
+ changed = True
+
+ return changed
+
+
+def main():
+ argument_spec = dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(),
+ location=dict(),
+ bandwidth=dict(choices=['1Gbps', '10Gbps']),
+ link_aggregation_group=dict(),
+ connection_id=dict(),
+ forced_update=dict(type='bool', default=False)
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_one_of=[('connection_id', 'name')],
+ required_if=[('state', 'present', ('location', 'bandwidth'))]
+ )
+
+ connection = module.client('directconnect')
+
+ state = module.params.get('state')
+ try:
+ connection_id = connection_exists(
+ connection,
+ connection_id=module.params.get('connection_id'),
+ connection_name=module.params.get('name')
+ )
+ if not connection_id and module.params.get('connection_id'):
+ module.fail_json(msg="The Direct Connect connection {0} does not exist.".format(module.params.get('connection_id')))
+
+ if state == 'present':
+ changed, connection_id = ensure_present(connection,
+ connection_id=connection_id,
+ connection_name=module.params.get('name'),
+ location=module.params.get('location'),
+ bandwidth=module.params.get('bandwidth'),
+ lag_id=module.params.get('link_aggregation_group'),
+ forced_update=module.params.get('forced_update'))
+ response = connection_status(connection, connection_id)
+ elif state == 'absent':
+ changed = ensure_absent(connection, connection_id)
+ response = {}
+ except DirectConnectError as e:
+ if e.last_traceback:
+ module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception.response))
+ else:
+ module.fail_json(msg=e.msg)
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_direct_connect_gateway.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_direct_connect_gateway.py
new file mode 100644
index 00000000..b34d6c52
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_direct_connect_gateway.py
@@ -0,0 +1,369 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: aws_direct_connect_gateway
+author: Gobin Sougrakpam (@gobins)
+version_added: 1.0.0
+short_description: Manage AWS Direct Connect gateway
+description:
+ - Creates AWS Direct Connect Gateway.
+ - Deletes AWS Direct Connect Gateway.
+ - Attaches Virtual Gateways to Direct Connect Gateway.
+ - Detaches Virtual Gateways to Direct Connect Gateway.
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements: [ boto3 ]
+options:
+ state:
+ description:
+ - Set I(state=present) to ensure a resource is created.
+ - Set I(state=absent) to remove a resource.
+ default: present
+ choices: [ "present", "absent"]
+ type: str
+ name:
+ description:
+ - Name of the Direct Connect Gateway to be created or deleted.
+ type: str
+ amazon_asn:
+ description:
+ - The Amazon side ASN.
+ - Required when I(state=present).
+ type: str
+ direct_connect_gateway_id:
+ description:
+ - The ID of an existing Direct Connect Gateway.
+ - Required when I(state=absent).
+ type: str
+ virtual_gateway_id:
+ description:
+ - The VPN gateway ID of an existing virtual gateway.
+ type: str
+ wait_timeout:
+ description:
+ - How long to wait for the association to be deleted.
+ type: int
+ default: 320
+'''
+
+EXAMPLES = '''
+- name: Create a new direct connect gateway attached to virtual private gateway
+ community.aws.aws_direct_connect_gateway:
+ state: present
+ name: my-dx-gateway
+ amazon_asn: 7224
+ virtual_gateway_id: vpg-12345
+ register: created_dxgw
+
+- name: Create a new unattached dxgw
+ community.aws.aws_direct_connect_gateway:
+ state: present
+ name: my-dx-gateway
+ amazon_asn: 7224
+ register: created_dxgw
+
+'''
+
+RETURN = '''
+result:
+ description:
+ - The attributes of the Direct Connect Gateway
+ type: complex
+ returned: I(state=present)
+ contains:
+ amazon_side_asn:
+ description: ASN on the amazon side.
+ type: str
+ direct_connect_gateway_id:
+ description: The ID of the direct connect gateway.
+ type: str
+ direct_connect_gateway_name:
+ description: The name of the direct connect gateway.
+ type: str
+ direct_connect_gateway_state:
+ description: The state of the direct connect gateway.
+ type: str
+ owner_account:
+ description: The AWS account ID of the owner of the direct connect gateway.
+ type: str
+'''
+
+import time
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def dx_gateway_info(client, gateway_id, module):
+ try:
+ resp = client.describe_direct_connect_gateways(
+ directConnectGatewayId=gateway_id)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ if resp['directConnectGateways']:
+ return resp['directConnectGateways'][0]
+
+
+def wait_for_status(client, module, gateway_id, virtual_gateway_id, status):
+ polling_increment_secs = 15
+ max_retries = 3
+ status_achieved = False
+
+ for x in range(0, max_retries):
+ try:
+ response = check_dxgw_association(
+ client,
+ module,
+ gateway_id=gateway_id,
+ virtual_gateway_id=virtual_gateway_id)
+ if response['directConnectGatewayAssociations']:
+ if response['directConnectGatewayAssociations'][0]['associationState'] == status:
+ status_achieved = True
+ break
+ else:
+ time.sleep(polling_increment_secs)
+ else:
+ status_achieved = True
+ break
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ result = response
+ return status_achieved, result
+
+
+def associate_direct_connect_gateway(client, module, gateway_id):
+ params = dict()
+ params['virtual_gateway_id'] = module.params.get('virtual_gateway_id')
+ try:
+ response = client.create_direct_connect_gateway_association(
+ directConnectGatewayId=gateway_id,
+ virtualGatewayId=params['virtual_gateway_id'])
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ status_achieved, dxgw = wait_for_status(client, module, gateway_id, params['virtual_gateway_id'], 'associating')
+ if not status_achieved:
+ module.fail_json(msg='Error waiting for dxgw to attach to vpg - please check the AWS console')
+
+ result = response
+ return result
+
+
+def delete_association(client, module, gateway_id, virtual_gateway_id):
+ try:
+ response = client.delete_direct_connect_gateway_association(
+ directConnectGatewayId=gateway_id,
+ virtualGatewayId=virtual_gateway_id)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ status_achieved, dxgw = wait_for_status(client, module, gateway_id, virtual_gateway_id, 'disassociating')
+ if not status_achieved:
+ module.fail_json(msg='Error waiting for dxgw to detach from vpg - please check the AWS console')
+
+ result = response
+ return result
+
+
+def create_dx_gateway(client, module):
+ params = dict()
+ params['name'] = module.params.get('name')
+ params['amazon_asn'] = module.params.get('amazon_asn')
+ try:
+ response = client.create_direct_connect_gateway(
+ directConnectGatewayName=params['name'],
+ amazonSideAsn=int(params['amazon_asn']))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ result = response
+ return result
+
+
+def find_dx_gateway(client, module, gateway_id=None):
+ params = dict()
+ gateways = list()
+ if gateway_id is not None:
+ params['directConnectGatewayId'] = gateway_id
+ while True:
+ try:
+ resp = client.describe_direct_connect_gateways(**params)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ gateways.extend(resp['directConnectGateways'])
+ if 'nextToken' in resp:
+ params['nextToken'] = resp['nextToken']
+ else:
+ break
+ if gateways != []:
+ count = 0
+ for gateway in gateways:
+ if module.params.get('name') == gateway['directConnectGatewayName']:
+ count += 1
+ return gateway
+ return None
+
+
+def check_dxgw_association(client, module, gateway_id, virtual_gateway_id=None):
+ try:
+ if virtual_gateway_id is None:
+ resp = client.describe_direct_connect_gateway_associations(
+ directConnectGatewayId=gateway_id
+ )
+ else:
+ resp = client.describe_direct_connect_gateway_associations(
+ directConnectGatewayId=gateway_id,
+ virtualGatewayId=virtual_gateway_id,
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ return resp
+
+
+def ensure_present(client, module):
+ # If an existing direct connect gateway matches our args
+ # then a match is considered to have been found and we will not create another dxgw.
+
+ changed = False
+ params = dict()
+ result = dict()
+ params['name'] = module.params.get('name')
+ params['amazon_asn'] = module.params.get('amazon_asn')
+ params['virtual_gateway_id'] = module.params.get('virtual_gateway_id')
+
+ # check if a gateway matching our module args already exists
+ existing_dxgw = find_dx_gateway(client, module)
+
+ if existing_dxgw is not None and existing_dxgw['directConnectGatewayState'] != 'deleted':
+ gateway_id = existing_dxgw['directConnectGatewayId']
+ # if a gateway_id was provided, check if it is attach to the DXGW
+ if params['virtual_gateway_id']:
+ resp = check_dxgw_association(
+ client,
+ module,
+ gateway_id=gateway_id,
+ virtual_gateway_id=params['virtual_gateway_id'])
+ if not resp["directConnectGatewayAssociations"]:
+ # attach the dxgw to the supplied virtual_gateway_id
+ associate_direct_connect_gateway(client, module, gateway_id)
+ changed = True
+ # if params['virtual_gateway_id'] is not provided, check the dxgw is attached to a VPG. If so, detach it.
+ else:
+ existing_dxgw = find_dx_gateway(client, module)
+
+ resp = check_dxgw_association(client, module, gateway_id=gateway_id)
+ if resp["directConnectGatewayAssociations"]:
+ for association in resp['directConnectGatewayAssociations']:
+ if association['associationState'] not in ['disassociating', 'disassociated']:
+ delete_association(
+ client,
+ module,
+ gateway_id=gateway_id,
+ virtual_gateway_id=association['virtualGatewayId'])
+ else:
+ # create a new dxgw
+ new_dxgw = create_dx_gateway(client, module)
+ changed = True
+ gateway_id = new_dxgw['directConnectGateway']['directConnectGatewayId']
+
+ # if a vpc-id was supplied, attempt to attach it to the dxgw
+ if params['virtual_gateway_id']:
+ associate_direct_connect_gateway(client, module, gateway_id)
+ resp = check_dxgw_association(client,
+ module,
+ gateway_id=gateway_id
+ )
+ if resp["directConnectGatewayAssociations"]:
+ changed = True
+
+ result = dx_gateway_info(client, gateway_id, module)
+ return changed, result
+
+
+def ensure_absent(client, module):
+ # If an existing direct connect gateway matches our args
+ # then a match is considered to have been found and we will not create another dxgw.
+
+ changed = False
+ result = dict()
+ dx_gateway_id = module.params.get('direct_connect_gateway_id')
+ existing_dxgw = find_dx_gateway(client, module, dx_gateway_id)
+ if existing_dxgw is not None:
+ resp = check_dxgw_association(client, module,
+ gateway_id=dx_gateway_id)
+ if resp["directConnectGatewayAssociations"]:
+ for association in resp['directConnectGatewayAssociations']:
+ if association['associationState'] not in ['disassociating', 'disassociated']:
+ delete_association(client, module,
+ gateway_id=dx_gateway_id,
+ virtual_gateway_id=association['virtualGatewayId'])
+ # wait for deleting association
+ timeout = time.time() + module.params.get('wait_timeout')
+ while time.time() < timeout:
+ resp = check_dxgw_association(client,
+ module,
+ gateway_id=dx_gateway_id)
+ if resp["directConnectGatewayAssociations"] != []:
+ time.sleep(15)
+ else:
+ break
+
+ try:
+ resp = client.delete_direct_connect_gateway(
+ directConnectGatewayId=dx_gateway_id
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ result = resp['directConnectGateway']
+ return changed
+
+
+def main():
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ name=dict(),
+ amazon_asn=dict(),
+ virtual_gateway_id=dict(),
+ direct_connect_gateway_id=dict(),
+ wait_timeout=dict(type='int', default=320),
+ )
+ required_if = [('state', 'present', ['name', 'amazon_asn']),
+ ('state', 'absent', ['direct_connect_gateway_id'])]
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=required_if)
+
+ state = module.params.get('state')
+
+ try:
+ client = module.client('directconnect')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ if state == 'present':
+ (changed, results) = ensure_present(client, module)
+ elif state == 'absent':
+ changed = ensure_absent(client, module)
+ results = {}
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(results))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_direct_connect_link_aggregation_group.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_direct_connect_link_aggregation_group.py
new file mode 100644
index 00000000..41c50134
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_direct_connect_link_aggregation_group.py
@@ -0,0 +1,466 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_direct_connect_link_aggregation_group
+version_added: 1.0.0
+short_description: Manage Direct Connect LAG bundles
+description:
+ - Create, delete, or modify a Direct Connect link aggregation group.
+author: "Sloane Hertel (@s-hertel)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements:
+ - boto3
+ - botocore
+options:
+ state:
+ description:
+ - The state of the Direct Connect link aggregation group.
+ choices:
+ - present
+ - absent
+ type: str
+ required: true
+ name:
+ description:
+ - The name of the Direct Connect link aggregation group.
+ type: str
+ link_aggregation_group_id:
+ description:
+ - The ID of the Direct Connect link aggregation group.
+ type: str
+ num_connections:
+ description:
+ - The number of connections with which to initialize the link aggregation group.
+ type: int
+ min_links:
+ description:
+ - The minimum number of physical connections that must be operational for the LAG itself to be operational.
+ type: int
+ location:
+ description:
+ - The location of the link aggregation group.
+ type: str
+ bandwidth:
+ description:
+ - The bandwidth of the link aggregation group.
+ type: str
+ force_delete:
+ description:
+ - This allows the minimum number of links to be set to 0, any hosted connections disassociated,
+ and any virtual interfaces associated to the LAG deleted.
+ type: bool
+ default: false
+ connection_id:
+ description:
+ - A connection ID to link with the link aggregation group upon creation.
+ type: str
+ delete_with_disassociation:
+ description:
+ - To be used with I(state=absent) to delete connections after disassociating them with the LAG.
+ type: bool
+ default: false
+ wait:
+ description:
+ - Whether or not to wait for the operation to complete.
+ - May be useful when waiting for virtual interfaces to be deleted.
+ - The time to wait can be controlled by setting I(wait_timeout).
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - The duration in seconds to wait if I(wait=true).
+ default: 120
+ type: int
+'''
+
+EXAMPLES = """
+
+# create a Direct Connect connection
+- community.aws.aws_direct_connect_link_aggregation_group:
+ state: present
+ location: EqDC2
+ lag_id: dxlag-xxxxxxxx
+ bandwidth: 1Gbps
+
+"""
+
+RETURN = """
+changed:
+ type: str
+ description: Whether or not the LAG has changed.
+ returned: always
+aws_device:
+ type: str
+ description: The AWS Direct Connection endpoint that hosts the LAG.
+ sample: "EqSe2-1bwfvazist2k0"
+ returned: when I(state=present)
+connections:
+ type: list
+ description: A list of connections bundled by this LAG.
+ sample:
+ "connections": [
+ {
+ "aws_device": "EqSe2-1bwfvazist2k0",
+ "bandwidth": "1Gbps",
+ "connection_id": "dxcon-fgzjah5a",
+ "connection_name": "Requested Connection 1 for Lag dxlag-fgtoh97h",
+ "connection_state": "down",
+ "lag_id": "dxlag-fgnsp4rq",
+ "location": "EqSe2",
+ "owner_account": "448830907657",
+ "region": "us-west-2"
+ }
+ ]
+ returned: when I(state=present)
+connections_bandwidth:
+ type: str
+ description: The individual bandwidth of the physical connections bundled by the LAG.
+ sample: "1Gbps"
+ returned: when I(state=present)
+lag_id:
+ type: str
+ description: Unique identifier for the link aggregation group.
+ sample: "dxlag-fgnsp4rq"
+ returned: when I(state=present)
+lag_name:
+ type: str
+ description: User-provided name for the link aggregation group.
+ returned: when I(state=present)
+lag_state:
+ type: str
+ description: State of the LAG.
+ sample: "pending"
+ returned: when I(state=present)
+location:
+ type: str
+ description: Where the connection is located.
+ sample: "EqSe2"
+ returned: when I(state=present)
+minimum_links:
+ type: int
+ description: The minimum number of physical connections that must be operational for the LAG itself to be operational.
+ returned: when I(state=present)
+number_of_connections:
+ type: int
+ description: The number of physical connections bundled by the LAG.
+ returned: when I(state=present)
+owner_account:
+ type: str
+ description: Owner account ID of the LAG.
+ returned: when I(state=present)
+region:
+ type: str
+ description: The region in which the LAG exists.
+ returned: when I(state=present)
+"""
+
+import traceback
+import time
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError
+from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_connection
+from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_virtual_interface
+from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import disassociate_connection_and_lag
+
+
+def lag_status(client, lag_id):
+ return lag_exists(client, lag_id=lag_id, lag_name=None, verify=False)
+
+
+def lag_exists(client, lag_id=None, lag_name=None, verify=True):
+ """ If verify=True, returns the LAG ID or None
+ If verify=False, returns the LAG's data (or an empty dict)
+ """
+ try:
+ if lag_id:
+ response = client.describe_lags(lagId=lag_id)
+ else:
+ response = client.describe_lags()
+ except botocore.exceptions.ClientError as e:
+ if lag_id and verify:
+ return False
+ elif lag_id:
+ return {}
+ else:
+ failed_op = "Failed to describe DirectConnect link aggregation groups."
+ raise DirectConnectError(msg=failed_op,
+ last_traceback=traceback.format_exc(),
+ exception=e)
+
+ match = [] # List of LAG IDs that are exact matches
+ lag = [] # List of LAG data that are exact matches
+
+ # look for matching connections
+ if len(response.get('lags', [])) == 1 and lag_id:
+ if response['lags'][0]['lagState'] != 'deleted':
+ match.append(response['lags'][0]['lagId'])
+ lag.append(response['lags'][0])
+ else:
+ for each in response.get('lags', []):
+ if each['lagState'] != 'deleted':
+ if not lag_id:
+ if lag_name == each['lagName']:
+ match.append(each['lagId'])
+ else:
+ match.append(each['lagId'])
+
+ # verifying if the connections exists; if true, return connection identifier, otherwise return False
+ if verify and len(match) == 1:
+ return match[0]
+ elif verify:
+ return False
+
+ # not verifying if the connection exists; just return current connection info
+ else:
+ if len(lag) == 1:
+ return lag[0]
+ else:
+ return {}
+
+
+def create_lag(client, num_connections, location, bandwidth, name, connection_id):
+ if not name:
+ raise DirectConnectError(msg="Failed to create a Direct Connect link aggregation group: name required.",
+ last_traceback=None,
+ exception="")
+
+ parameters = dict(numberOfConnections=num_connections,
+ location=location,
+ connectionsBandwidth=bandwidth,
+ lagName=name)
+ if connection_id:
+ parameters.update(connectionId=connection_id)
+ try:
+ lag = client.create_lag(**parameters)
+ except botocore.exceptions.ClientError as e:
+ raise DirectConnectError(msg="Failed to create DirectConnect link aggregation group {0}".format(name),
+ last_traceback=traceback.format_exc(),
+ exception=e)
+
+ return lag['lagId']
+
+
+def delete_lag(client, lag_id):
+ try:
+ client.delete_lag(lagId=lag_id)
+ except botocore.exceptions.ClientError as e:
+ raise DirectConnectError(msg="Failed to delete Direct Connect link aggregation group {0}.".format(lag_id),
+ last_traceback=traceback.format_exc(),
+ exception=e)
+
+
+@AWSRetry.backoff(tries=5, delay=2, backoff=2.0, catch_extra_error_codes=['DirectConnectClientException'])
+def _update_lag(client, lag_id, lag_name, min_links):
+ params = {}
+ if min_links:
+ params.update(minimumLinks=min_links)
+ if lag_name:
+ params.update(lagName=lag_name)
+
+ client.update_lag(lagId=lag_id, **params)
+
+
+def update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_timeout):
+ start = time.time()
+
+ if min_links and min_links > num_connections:
+ raise DirectConnectError(
+ msg="The number of connections {0} must be greater than the minimum number of links "
+ "{1} to update the LAG {2}".format(num_connections, min_links, lag_id),
+ last_traceback=None,
+ exception=None
+ )
+
+ while True:
+ try:
+ _update_lag(client, lag_id, lag_name, min_links)
+ except botocore.exceptions.ClientError as e:
+ if wait and time.time() - start <= wait_timeout:
+ continue
+ msg = "Failed to update Direct Connect link aggregation group {0}.".format(lag_id)
+ if "MinimumLinks cannot be set higher than the number of connections" in e.response['Error']['Message']:
+ msg += "Unable to set the min number of links to {0} while the LAG connections are being requested".format(min_links)
+ raise DirectConnectError(msg=msg,
+ last_traceback=traceback.format_exc(),
+ exception=e)
+ else:
+ break
+
+
+def lag_changed(current_status, name, min_links):
+ """ Determines if a modifiable link aggregation group attribute has been modified. """
+ return (name and name != current_status['lagName']) or (min_links and min_links != current_status['minimumLinks'])
+
+
+def ensure_present(client, num_connections, lag_id, lag_name, location, bandwidth, connection_id, min_links, wait, wait_timeout):
+ exists = lag_exists(client, lag_id, lag_name)
+ if not exists and lag_id:
+ raise DirectConnectError(msg="The Direct Connect link aggregation group {0} does not exist.".format(lag_id),
+ last_traceback=None,
+ exception="")
+
+ # the connection is found; get the latest state and see if it needs to be updated
+ if exists:
+ lag_id = exists
+ latest_state = lag_status(client, lag_id)
+ if lag_changed(latest_state, lag_name, min_links):
+ update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_timeout)
+ return True, lag_id
+ return False, lag_id
+
+ # no connection found; create a new one
+ else:
+ lag_id = create_lag(client, num_connections, location, bandwidth, lag_name, connection_id)
+ update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_timeout)
+ return True, lag_id
+
+
+def describe_virtual_interfaces(client, lag_id):
+ try:
+ response = client.describe_virtual_interfaces(connectionId=lag_id)
+ except botocore.exceptions.ClientError as e:
+ raise DirectConnectError(msg="Failed to describe any virtual interfaces associated with LAG: {0}".format(lag_id),
+ last_traceback=traceback.format_exc(),
+ exception=e)
+ return response.get('virtualInterfaces', [])
+
+
+def get_connections_and_virtual_interfaces(client, lag_id):
+ virtual_interfaces = describe_virtual_interfaces(client, lag_id)
+ connections = lag_status(client, lag_id=lag_id).get('connections', [])
+ return virtual_interfaces, connections
+
+
+def disassociate_vis(client, lag_id, virtual_interfaces):
+ for vi in virtual_interfaces:
+ delete_virtual_interface(client, vi['virtualInterfaceId'])
+ try:
+ response = client.delete_virtual_interface(virtualInterfaceId=vi['virtualInterfaceId'])
+ except botocore.exceptions.ClientError as e:
+ raise DirectConnectError(msg="Could not delete virtual interface {0} to delete link aggregation group {1}.".format(vi, lag_id),
+ last_traceback=traceback.format_exc(),
+ exception=e)
+
+
+def ensure_absent(client, lag_id, lag_name, force_delete, delete_with_disassociation, wait, wait_timeout):
+ lag_id = lag_exists(client, lag_id, lag_name)
+ if not lag_id:
+ return False
+
+ latest_status = lag_status(client, lag_id)
+
+ # determine the associated connections and virtual interfaces to disassociate
+ virtual_interfaces, connections = get_connections_and_virtual_interfaces(client, lag_id)
+
+ # If min_links is not 0, there are associated connections, or if there are virtual interfaces, ask for force_delete
+ if any((latest_status['minimumLinks'], virtual_interfaces, connections)) and not force_delete:
+ raise DirectConnectError(msg="There are a minimum number of links, hosted connections, or associated virtual interfaces for LAG {0}. "
+ "To force deletion of the LAG use delete_force: True (if the LAG has virtual interfaces they will be deleted). "
+ "Optionally, to ensure hosted connections are deleted after disassociation use delete_with_disassociation: True "
+ "and wait: True (as Virtual Interfaces may take a few moments to delete)".format(lag_id),
+ last_traceback=None,
+ exception=None)
+
+ # update min_links to be 0 so we can remove the LAG
+ update_lag(client, lag_id, None, 0, len(connections), wait, wait_timeout)
+
+ # if virtual_interfaces and not delete_vi_with_disassociation: Raise failure; can't delete while vi attached
+ for connection in connections:
+ disassociate_connection_and_lag(client, connection['connectionId'], lag_id)
+ if delete_with_disassociation:
+ delete_connection(client, connection['connectionId'])
+
+ for vi in virtual_interfaces:
+ delete_virtual_interface(client, vi['virtualInterfaceId'])
+
+ start_time = time.time()
+ while True:
+ try:
+ delete_lag(client, lag_id)
+ except DirectConnectError as e:
+ if ('until its Virtual Interfaces are deleted' in e.exception) and (time.time() - start_time < wait_timeout) and wait:
+ continue
+ else:
+ return True
+
+
+def main():
+ argument_spec = dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(),
+ link_aggregation_group_id=dict(),
+ num_connections=dict(type='int'),
+ min_links=dict(type='int'),
+ location=dict(),
+ bandwidth=dict(),
+ connection_id=dict(),
+ delete_with_disassociation=dict(type='bool', default=False),
+ force_delete=dict(type='bool', default=False),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=120),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_one_of=[('link_aggregation_group_id', 'name')],
+ required_if=[('state', 'present', ('location', 'bandwidth'))],
+ )
+
+ try:
+ connection = module.client('directconnect')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ state = module.params.get('state')
+ response = {}
+ try:
+ if state == 'present':
+ changed, lag_id = ensure_present(connection,
+ num_connections=module.params.get("num_connections"),
+ lag_id=module.params.get("link_aggregation_group_id"),
+ lag_name=module.params.get("name"),
+ location=module.params.get("location"),
+ bandwidth=module.params.get("bandwidth"),
+ connection_id=module.params.get("connection_id"),
+ min_links=module.params.get("min_links"),
+ wait=module.params.get("wait"),
+ wait_timeout=module.params.get("wait_timeout"))
+ response = lag_status(connection, lag_id)
+ elif state == "absent":
+ changed = ensure_absent(connection,
+ lag_id=module.params.get("link_aggregation_group_id"),
+ lag_name=module.params.get("name"),
+ force_delete=module.params.get("force_delete"),
+ delete_with_disassociation=module.params.get("delete_with_disassociation"),
+ wait=module.params.get('wait'),
+ wait_timeout=module.params.get('wait_timeout'))
+ except DirectConnectError as e:
+ if e.last_traceback:
+ module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception))
+ else:
+ module.fail_json(msg=e.msg)
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_direct_connect_virtual_interface.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_direct_connect_virtual_interface.py
new file mode 100644
index 00000000..ba8391a0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_direct_connect_virtual_interface.py
@@ -0,0 +1,515 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: aws_direct_connect_virtual_interface
+version_added: 1.0.0
+short_description: Manage Direct Connect virtual interfaces
+description:
+ - Create, delete, or modify a Direct Connect public or private virtual interface.
+author: "Sloane Hertel (@s-hertel)"
+requirements:
+ - boto3
+ - botocore
+options:
+ state:
+ description:
+ - The desired state of the Direct Connect virtual interface.
+ choices: [present, absent]
+ type: str
+ required: true
+ id_to_associate:
+ description:
+ - The ID of the link aggregation group or connection to associate with the virtual interface.
+ aliases: [link_aggregation_group_id, connection_id]
+ type: str
+ required: true
+ public:
+ description:
+ - The type of virtual interface.
+ type: bool
+ name:
+ description:
+ - The name of the virtual interface.
+ type: str
+ vlan:
+ description:
+ - The VLAN ID.
+ default: 100
+ type: int
+ bgp_asn:
+ description:
+ - The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration.
+ default: 65000
+ type: int
+ authentication_key:
+ description:
+ - The authentication key for BGP configuration.
+ type: str
+ amazon_address:
+ description:
+ - The amazon address CIDR with which to create the virtual interface.
+ type: str
+ customer_address:
+ description:
+ - The customer address CIDR with which to create the virtual interface.
+ type: str
+ address_type:
+ description:
+ - The type of IP address for the BGP peer.
+ type: str
+ cidr:
+ description:
+ - A list of route filter prefix CIDRs with which to create the public virtual interface.
+ type: list
+ elements: str
+ virtual_gateway_id:
+ description:
+ - The virtual gateway ID required for creating a private virtual interface.
+ - To create a private virtual interface I(virtual_gateway_id) or I(direct_connect_gateway_id) is required.
+ These options are mutually exclusive.
+ type: str
+ direct_connect_gateway_id:
+ description:
+ - The direct connect gateway ID for creating a private virtual interface.
+ - To create a private virtual interface I(virtual_gateway_id) or I(direct_connect_gateway_id) is required.
+ These options are mutually exclusive.
+ type: str
+ virtual_interface_id:
+ description:
+ - The virtual interface ID.
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+RETURN = r'''
+address_family:
+ description: The address family for the BGP peer.
+ returned: always
+ type: str
+ sample: ipv4
+amazon_address:
+ description: IP address assigned to the Amazon interface.
+ returned: always
+ type: str
+ sample: 169.254.255.1/30
+asn:
+ description: The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration.
+ returned: always
+ type: int
+ sample: 65000
+auth_key:
+ description: The authentication key for BGP configuration.
+ returned: always
+ type: str
+ sample: 0xZ59Y1JZ2oDOSh6YriIlyRE
+bgp_peers:
+ description: A list of the BGP peers configured on this virtual interface.
+ returned: always
+ type: complex
+ contains:
+ address_family:
+ description: The address family for the BGP peer.
+ returned: always
+ type: str
+ sample: ipv4
+ amazon_address:
+ description: IP address assigned to the Amazon interface.
+ returned: always
+ type: str
+ sample: 169.254.255.1/30
+ asn:
+ description: The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration.
+ returned: always
+ type: int
+ sample: 65000
+ auth_key:
+ description: The authentication key for BGP configuration.
+ returned: always
+ type: str
+ sample: 0xZ59Y1JZ2oDOSh6YriIlyRE
+ bgp_peer_state:
+ description: The state of the BGP peer (verifying, pending, available)
+ returned: always
+ type: str
+ sample: available
+ bgp_status:
+ description: The up/down state of the BGP peer.
+ returned: always
+ type: str
+ sample: up
+ customer_address:
+ description: IP address assigned to the customer interface.
+ returned: always
+ type: str
+ sample: 169.254.255.2/30
+changed:
+ description: Indicated if the virtual interface has been created/modified/deleted
+ returned: always
+ type: bool
+ sample: false
+connection_id:
+ description:
+ - The ID of the connection. This field is also used as the ID type for operations that
+ use multiple connection types (LAG, interconnect, and/or connection).
+ returned: always
+ type: str
+ sample: dxcon-fgb175av
+customer_address:
+ description: IP address assigned to the customer interface.
+ returned: always
+ type: str
+ sample: 169.254.255.2/30
+customer_router_config:
+ description: Information for generating the customer router configuration.
+ returned: always
+ type: str
+location:
+ description: Where the connection is located.
+ returned: always
+ type: str
+ sample: EqDC2
+owner_account:
+ description: The AWS account that will own the new virtual interface.
+ returned: always
+ type: str
+ sample: '123456789012'
+route_filter_prefixes:
+ description: A list of routes to be advertised to the AWS network in this region (public virtual interface).
+ returned: always
+ type: complex
+ contains:
+ cidr:
+ description: A routes to be advertised to the AWS network in this region.
+ returned: always
+ type: str
+ sample: 54.227.92.216/30
+virtual_gateway_id:
+ description: The ID of the virtual private gateway to a VPC. This only applies to private virtual interfaces.
+ returned: when I(public=False)
+ type: str
+ sample: vgw-f3ce259a
+direct_connect_gateway_id:
+ description: The ID of the Direct Connect gateway. This only applies to private virtual interfaces.
+ returned: when I(public=False)
+ type: str
+ sample: f7593767-eded-44e8-926d-a2234175835d
+virtual_interface_id:
+ description: The ID of the virtual interface.
+ returned: always
+ type: str
+ sample: dxvif-fh0w7cex
+virtual_interface_name:
+ description: The name of the virtual interface assigned by the customer.
+ returned: always
+ type: str
+ sample: test_virtual_interface
+virtual_interface_state:
+ description: State of the virtual interface (confirming, verifying, pending, available, down, rejected).
+ returned: always
+ type: str
+ sample: available
+virtual_interface_type:
+ description: The type of virtual interface (private, public).
+ returned: always
+ type: str
+ sample: private
+vlan:
+ description: The VLAN ID.
+ returned: always
+ type: int
+ sample: 100
+'''
+
+EXAMPLES = r'''
+---
+- name: create an association between a LAG and connection
+ community.aws.aws_direct_connect_virtual_interface:
+ state: present
+ name: "{{ name }}"
+ link_aggregation_group_id: LAG-XXXXXXXX
+ connection_id: dxcon-XXXXXXXX
+
+- name: remove an association between a connection and virtual interface
+ community.aws.aws_direct_connect_virtual_interface:
+ state: absent
+ connection_id: dxcon-XXXXXXXX
+ virtual_interface_id: dxv-XXXXXXXX
+
+'''
+
+import traceback
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError, delete_virtual_interface
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ # handled by AnsibleAWSModule
+ pass
+
+
+def try_except_ClientError(failure_msg):
+ '''
+ Wrapper for boto3 calls that uses AWSRetry and handles exceptions
+ '''
+ def wrapper(f):
+ def run_func(*args, **kwargs):
+ try:
+ result = AWSRetry.backoff(tries=8, delay=5, catch_extra_error_codes=['DirectConnectClientException'])(f)(*args, **kwargs)
+ except (ClientError, BotoCoreError) as e:
+ raise DirectConnectError(failure_msg, traceback.format_exc(), e)
+ return result
+ return run_func
+ return wrapper
+
+
+def find_unique_vi(client, connection_id, virtual_interface_id, name):
+ '''
+ Determines if the virtual interface exists. Returns the virtual interface ID if an exact match is found.
+ If multiple matches are found False is returned. If no matches are found None is returned.
+ '''
+
+ # Get the virtual interfaces, filtering by the ID if provided.
+ vi_params = {}
+ if virtual_interface_id:
+ vi_params = {'virtualInterfaceId': virtual_interface_id}
+
+ virtual_interfaces = try_except_ClientError(
+ failure_msg="Failed to describe virtual interface")(
+ client.describe_virtual_interfaces)(**vi_params).get('virtualInterfaces')
+
+ # Remove deleting/deleted matches from the results.
+ virtual_interfaces = [vi for vi in virtual_interfaces if vi['virtualInterfaceState'] not in ('deleting', 'deleted')]
+
+ matching_virtual_interfaces = filter_virtual_interfaces(virtual_interfaces, name, connection_id)
+ return exact_match(matching_virtual_interfaces)
+
+
+def exact_match(virtual_interfaces):
+ '''
+ Returns the virtual interface ID if one was found,
+ None if the virtual interface ID needs to be created,
+ False if an exact match was not found
+ '''
+
+ if not virtual_interfaces:
+ return None
+ if len(virtual_interfaces) == 1:
+ return virtual_interfaces[0]['virtualInterfaceId']
+ else:
+ return False
+
+
+def filter_virtual_interfaces(virtual_interfaces, name, connection_id):
+ '''
+ Filters the available virtual interfaces to try to find a unique match
+ '''
+ # Filter by name if provided.
+ if name:
+ matching_by_name = find_virtual_interface_by_name(virtual_interfaces, name)
+ if len(matching_by_name) == 1:
+ return matching_by_name
+ else:
+ matching_by_name = virtual_interfaces
+
+ # If there isn't a unique match filter by connection ID as last resort (because connection_id may be a connection yet to be associated)
+ if connection_id and len(matching_by_name) > 1:
+ matching_by_connection_id = find_virtual_interface_by_connection_id(matching_by_name, connection_id)
+ if len(matching_by_connection_id) == 1:
+ return matching_by_connection_id
+ else:
+ matching_by_connection_id = matching_by_name
+
+ return matching_by_connection_id
+
+
+def find_virtual_interface_by_connection_id(virtual_interfaces, connection_id):
+ '''
+ Return virtual interfaces that have the connection_id associated
+ '''
+ return [vi for vi in virtual_interfaces if vi['connectionId'] == connection_id]
+
+
+def find_virtual_interface_by_name(virtual_interfaces, name):
+ '''
+ Return virtual interfaces that match the provided name
+ '''
+ return [vi for vi in virtual_interfaces if vi['virtualInterfaceName'] == name]
+
+
+def vi_state(client, virtual_interface_id):
+ '''
+ Returns the state of the virtual interface.
+ '''
+ err_msg = "Failed to describe virtual interface: {0}".format(virtual_interface_id)
+ vi = try_except_ClientError(failure_msg=err_msg)(client.describe_virtual_interfaces)(virtualInterfaceId=virtual_interface_id)
+ return vi['virtualInterfaces'][0]
+
+
+def assemble_params_for_creating_vi(params):
+ '''
+ Returns kwargs to use in the call to create the virtual interface
+
+ Params for public virtual interfaces:
+ virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, cidr
+ Params for private virtual interfaces:
+ virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, virtualGatewayId
+ '''
+
+ public = params['public']
+ name = params['name']
+ vlan = params['vlan']
+ bgp_asn = params['bgp_asn']
+ auth_key = params['authentication_key']
+ amazon_addr = params['amazon_address']
+ customer_addr = params['customer_address']
+ family_addr = params['address_type']
+ cidr = params['cidr']
+ virtual_gateway_id = params['virtual_gateway_id']
+ direct_connect_gateway_id = params['direct_connect_gateway_id']
+
+ parameters = dict(virtualInterfaceName=name, vlan=vlan, asn=bgp_asn)
+ opt_params = dict(authKey=auth_key, amazonAddress=amazon_addr, customerAddress=customer_addr, addressFamily=family_addr)
+
+ for name, value in opt_params.items():
+ if value:
+ parameters[name] = value
+
+ # virtual interface type specific parameters
+ if public and cidr:
+ parameters['routeFilterPrefixes'] = [{'cidr': c} for c in cidr]
+ if not public:
+ if virtual_gateway_id:
+ parameters['virtualGatewayId'] = virtual_gateway_id
+ elif direct_connect_gateway_id:
+ parameters['directConnectGatewayId'] = direct_connect_gateway_id
+
+ return parameters
+
+
+def create_vi(client, public, associated_id, creation_params):
+ '''
+ :param public: a boolean
+ :param associated_id: a link aggregation group ID or connection ID to associate
+ with the virtual interface.
+ :param creation_params: a dict of parameters to use in the boto call
+ :return The ID of the created virtual interface
+ '''
+ err_msg = "Failed to create virtual interface"
+ if public:
+ vi = try_except_ClientError(failure_msg=err_msg)(client.create_public_virtual_interface)(connectionId=associated_id,
+ newPublicVirtualInterface=creation_params)
+ else:
+ vi = try_except_ClientError(failure_msg=err_msg)(client.create_private_virtual_interface)(connectionId=associated_id,
+ newPrivateVirtualInterface=creation_params)
+ return vi['virtualInterfaceId']
+
+
+def modify_vi(client, virtual_interface_id, connection_id):
+ '''
+ Associate a new connection ID
+ '''
+ err_msg = "Unable to associate {0} with virtual interface {1}".format(connection_id, virtual_interface_id)
+ try_except_ClientError(failure_msg=err_msg)(client.associate_virtual_interface)(virtualInterfaceId=virtual_interface_id,
+ connectionId=connection_id)
+
+
+def needs_modification(client, virtual_interface_id, connection_id):
+ '''
+ Determine if the associated connection ID needs to be updated
+ '''
+ return vi_state(client, virtual_interface_id).get('connectionId') != connection_id
+
+
+def ensure_state(connection, module):
+ changed = False
+
+ state = module.params['state']
+ connection_id = module.params['id_to_associate']
+ public = module.params['public']
+ name = module.params['name']
+
+ virtual_interface_id = find_unique_vi(connection, connection_id, module.params.get('virtual_interface_id'), name)
+
+ if virtual_interface_id is False:
+ module.fail_json(msg="Multiple virtual interfaces were found. Use the virtual_interface_id, name, "
+ "and connection_id options if applicable to find a unique match.")
+
+ if state == 'present':
+
+ if not virtual_interface_id and module.params['virtual_interface_id']:
+ module.fail_json(msg="The virtual interface {0} does not exist.".format(module.params['virtual_interface_id']))
+
+ elif not virtual_interface_id:
+ assembled_params = assemble_params_for_creating_vi(module.params)
+ virtual_interface_id = create_vi(connection, public, connection_id, assembled_params)
+ changed = True
+
+ if needs_modification(connection, virtual_interface_id, connection_id):
+ modify_vi(connection, virtual_interface_id, connection_id)
+ changed = True
+
+ latest_state = vi_state(connection, virtual_interface_id)
+
+ else:
+ if virtual_interface_id:
+ delete_virtual_interface(connection, virtual_interface_id)
+ changed = True
+
+ latest_state = {}
+
+ return changed, latest_state
+
+
+def main():
+ argument_spec = dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ id_to_associate=dict(required=True, aliases=['link_aggregation_group_id', 'connection_id']),
+ public=dict(type='bool'),
+ name=dict(),
+ vlan=dict(type='int', default=100),
+ bgp_asn=dict(type='int', default=65000),
+ authentication_key=dict(),
+ amazon_address=dict(),
+ customer_address=dict(),
+ address_type=dict(),
+ cidr=dict(type='list', elements='str'),
+ virtual_gateway_id=dict(),
+ direct_connect_gateway_id=dict(),
+ virtual_interface_id=dict()
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_one_of=[['virtual_interface_id', 'name']],
+ required_if=[['state', 'present', ['public']],
+ ['public', True, ['amazon_address']],
+ ['public', True, ['customer_address']],
+ ['public', True, ['cidr']]],
+ mutually_exclusive=[['virtual_gateway_id', 'direct_connect_gateway_id']])
+
+ connection = module.client('directconnect')
+
+ try:
+ changed, latest_state = ensure_state(connection, module)
+ except DirectConnectError as e:
+ if e.exception:
+ module.fail_json_aws(exception=e.exception, msg=e.msg)
+ else:
+ module.fail_json(msg=e.msg)
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(latest_state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_eks_cluster.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_eks_cluster.py
new file mode 100644
index 00000000..d6df1609
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_eks_cluster.py
@@ -0,0 +1,303 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: aws_eks_cluster
+version_added: 1.0.0
+short_description: Manage Elastic Kubernetes Service Clusters
+description:
+ - Manage Elastic Kubernetes Service Clusters
+
+author: Will Thames (@willthames)
+
+options:
+ name:
+ description: Name of EKS cluster
+ required: True
+ type: str
+ version:
+ description: Kubernetes version - defaults to latest
+ type: str
+ role_arn:
+ description: ARN of IAM role used by the EKS cluster
+ type: str
+ subnets:
+ description: list of subnet IDs for the Kubernetes cluster
+ type: list
+ elements: str
+ security_groups:
+ description: list of security group names or IDs
+ type: list
+ elements: str
+ state:
+ description: desired state of the EKS cluster
+ choices:
+ - absent
+ - present
+ default: present
+ type: str
+ wait:
+ description: >-
+ Specifies whether the module waits until the cluster is active or deleted
+ before moving on. It takes "usually less than 10 minutes" per AWS documentation.
+ type: bool
+ default: false
+ wait_timeout:
+ description: >-
+ The duration in seconds to wait for the cluster to become active. Defaults
+ to 1200 seconds (20 minutes).
+ default: 1200
+ type: int
+
+requirements: [ 'botocore', 'boto3' ]
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create an EKS cluster
+ community.aws.aws_eks_cluster:
+ name: my_cluster
+ version: 1.14
+ role_arn: my_eks_role
+ subnets:
+ - subnet-aaaa1111
+ security_groups:
+ - my_eks_sg
+ - sg-abcd1234
+ register: caller_facts
+
+- name: Remove an EKS cluster
+ community.aws.aws_eks_cluster:
+ name: my_cluster
+ wait: yes
+ state: absent
+'''
+
+RETURN = r'''
+arn:
+ description: ARN of the EKS cluster
+ returned: when state is present
+ type: str
+ sample: arn:aws:eks:us-west-2:111111111111:cluster/my-eks-cluster
+certificate_authority:
+ description: Dictionary containing Certificate Authority Data for cluster
+ returned: after creation
+ type: complex
+ contains:
+ data:
+ description: Base-64 encoded Certificate Authority Data for cluster
+ returned: when the cluster has been created and is active
+ type: str
+endpoint:
+ description: Kubernetes API server endpoint
+ returned: when the cluster has been created and is active
+ type: str
+ sample: https://API_SERVER_ENDPOINT.yl4.us-west-2.eks.amazonaws.com
+created_at:
+ description: Cluster creation date and time
+ returned: when state is present
+ type: str
+ sample: '2018-06-06T11:56:56.242000+00:00'
+name:
+ description: EKS cluster name
+ returned: when state is present
+ type: str
+ sample: my-eks-cluster
+resources_vpc_config:
+ description: VPC configuration of the cluster
+ returned: when state is present
+ type: complex
+ contains:
+ security_group_ids:
+ description: List of security group IDs
+ returned: always
+ type: list
+ sample:
+ - sg-abcd1234
+ - sg-aaaa1111
+ subnet_ids:
+ description: List of subnet IDs
+ returned: always
+ type: list
+ sample:
+ - subnet-abcdef12
+ - subnet-345678ab
+ - subnet-cdef1234
+ vpc_id:
+ description: VPC id
+ returned: always
+ type: str
+ sample: vpc-a1b2c3d4
+role_arn:
+ description: ARN of the IAM role used by the cluster
+ returned: when state is present
+ type: str
+ sample: arn:aws:iam::111111111111:role/aws_eks_cluster_role
+status:
+ description: status of the EKS cluster
+ returned: when state is present
+ type: str
+ sample:
+ - CREATING
+ - ACTIVE
+version:
+ description: Kubernetes version of the cluster
+ returned: when state is present
+ type: str
+ sample: '1.10'
+'''
+
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+
+try:
+ import botocore.exceptions
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def ensure_present(client, module):
+ name = module.params.get('name')
+ subnets = module.params['subnets']
+ groups = module.params['security_groups']
+ wait = module.params.get('wait')
+ cluster = get_cluster(client, module)
+ try:
+ ec2 = module.client('ec2')
+ vpc_id = ec2.describe_subnets(SubnetIds=[subnets[0]])['Subnets'][0]['VpcId']
+ groups = get_ec2_security_group_ids_from_names(groups, ec2, vpc_id)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't lookup security groups")
+
+ if cluster:
+ if set(cluster['resourcesVpcConfig']['subnetIds']) != set(subnets):
+ module.fail_json(msg="Cannot modify subnets of existing cluster")
+ if set(cluster['resourcesVpcConfig']['securityGroupIds']) != set(groups):
+ module.fail_json(msg="Cannot modify security groups of existing cluster")
+ if module.params.get('version') and module.params.get('version') != cluster['version']:
+ module.fail_json(msg="Cannot modify version of existing cluster")
+
+ if wait:
+ wait_until(client, module, 'cluster_active')
+ # Ensure that fields that are only available for active clusters are
+ # included in the returned value
+ cluster = get_cluster(client, module)
+
+ module.exit_json(changed=False, **camel_dict_to_snake_dict(cluster))
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ try:
+ params = dict(name=name,
+ roleArn=module.params['role_arn'],
+ resourcesVpcConfig=dict(
+ subnetIds=subnets,
+ securityGroupIds=groups),
+ clientRequestToken='ansible-create-%s' % name)
+ if module.params['version']:
+ params['version'] = module.params['version']
+ cluster = client.create_cluster(**params)['cluster']
+ except botocore.exceptions.EndpointConnectionError as e:
+ module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't create cluster %s" % name)
+
+ if wait:
+ wait_until(client, module, 'cluster_active')
+ # Ensure that fields that are only available for active clusters are
+ # included in the returned value
+ cluster = get_cluster(client, module)
+
+ module.exit_json(changed=True, **camel_dict_to_snake_dict(cluster))
+
+
+def ensure_absent(client, module):
+ name = module.params.get('name')
+ existing = get_cluster(client, module)
+ wait = module.params.get('wait')
+ if not existing:
+ module.exit_json(changed=False)
+ if not module.check_mode:
+ try:
+ client.delete_cluster(name=module.params['name'])
+ except botocore.exceptions.EndpointConnectionError as e:
+ module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete cluster %s" % name)
+
+ if wait:
+ wait_until(client, module, 'cluster_deleted')
+
+ module.exit_json(changed=True)
+
+
+def get_cluster(client, module):
+ name = module.params.get('name')
+ try:
+ return client.describe_cluster(name=name)['cluster']
+ except is_boto3_error_code('ResourceNotFoundException'):
+ return None
+ except botocore.exceptions.EndpointConnectionError as e: # pylint: disable=duplicate-except
+ module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't get cluster %s" % name)
+
+
+def wait_until(client, module, waiter_name='cluster_active'):
+ name = module.params.get('name')
+ wait_timeout = module.params.get('wait_timeout')
+
+ waiter = get_waiter(client, waiter_name)
+ attempts = 1 + int(wait_timeout / waiter.config.delay)
+ waiter.wait(name=name, WaiterConfig={'MaxAttempts': attempts})
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ version=dict(),
+ role_arn=dict(),
+ subnets=dict(type='list', elements='str'),
+ security_groups=dict(type='list', elements='str'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ wait=dict(default=False, type='bool'),
+ wait_timeout=dict(default=1200, type='int')
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=[['state', 'present', ['role_arn', 'subnets', 'security_groups']]],
+ supports_check_mode=True,
+ )
+
+ if not module.botocore_at_least("1.10.32"):
+ module.fail_json(msg='aws_eks_cluster module requires botocore >= 1.10.32')
+
+ if (not module.botocore_at_least("1.12.38") and
+ module.params.get('state') == 'absent' and
+ module.params.get('wait')):
+ module.fail_json(msg='aws_eks_cluster: wait=yes when state=absent requires botocore >= 1.12.38')
+
+ client = module.client('eks')
+
+ if module.params.get('state') == 'present':
+ ensure_present(client, module)
+ else:
+ ensure_absent(client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_elasticbeanstalk_app.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_elasticbeanstalk_app.py
new file mode 100644
index 00000000..bab889f0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_elasticbeanstalk_app.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_elasticbeanstalk_app
+version_added: 1.0.0
+
+short_description: Create, update, and delete an elastic beanstalk application
+
+
+description:
+ - Creates, updates, deletes beanstalk applications if app_name is provided.
+
+options:
+ app_name:
+ description:
+ - Name of the beanstalk application you wish to manage.
+ aliases: [ 'name' ]
+ type: str
+ description:
+ description:
+ - The description of the application.
+ type: str
+ state:
+ description:
+ - Whether to ensure the application is present or absent.
+ default: present
+ choices: ['absent','present']
+ type: str
+ terminate_by_force:
+ description:
+ - When I(terminate_by_force=true), running environments will be terminated before deleting the application.
+ default: false
+ type: bool
+author:
+ - Harpreet Singh (@hsingh)
+ - Stephen Granger (@viper233)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Create or update an application
+- community.aws.aws_elasticbeanstalk_app:
+ app_name: Sample_App
+ description: "Hello World App"
+ state: present
+
+# Delete application
+- community.aws.aws_elasticbeanstalk_app:
+ app_name: Sample_App
+ state: absent
+
+'''
+
+RETURN = '''
+app:
+ description: Beanstalk application.
+ returned: always
+ type: dict
+ sample: {
+ "ApplicationName": "app-name",
+ "ConfigurationTemplates": [],
+ "DateCreated": "2016-12-28T14:50:03.185000+00:00",
+ "DateUpdated": "2016-12-28T14:50:03.185000+00:00",
+ "Description": "description",
+ "Versions": [
+ "1.0.0",
+ "1.0.1"
+ ]
+ }
+output:
+ description: Message indicating what change will occur.
+ returned: in check mode
+ type: str
+ sample: App is up-to-date
+'''
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+def describe_app(ebs, app_name, module):
+ apps = list_apps(ebs, app_name, module)
+
+ return None if len(apps) != 1 else apps[0]
+
+
+def list_apps(ebs, app_name, module):
+ try:
+ if app_name is not None:
+ apps = ebs.describe_applications(ApplicationNames=[app_name])
+ else:
+ apps = ebs.describe_applications()
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not describe application")
+
+ return apps.get("Applications", [])
+
+
+def check_app(ebs, app, module):
+ app_name = module.params['app_name']
+ description = module.params['description']
+ state = module.params['state']
+ terminate_by_force = module.params['terminate_by_force']
+
+ result = {}
+
+ if state == 'present' and app is None:
+ result = dict(changed=True, output="App would be created")
+ elif state == 'present' and app.get("Description", None) != description:
+ result = dict(changed=True, output="App would be updated", app=app)
+ elif state == 'present' and app.get("Description", None) == description:
+ result = dict(changed=False, output="App is up-to-date", app=app)
+ elif state == 'absent' and app is None:
+ result = dict(changed=False, output="App does not exist", app={})
+ elif state == 'absent' and app is not None:
+ result = dict(changed=True, output="App will be deleted", app=app)
+ elif state == 'absent' and app is not None and terminate_by_force is True:
+ result = dict(changed=True, output="Running environments terminated before the App will be deleted", app=app)
+
+ module.exit_json(**result)
+
+
+def filter_empty(**kwargs):
+ retval = {}
+ for k, v in kwargs.items():
+ if v:
+ retval[k] = v
+ return retval
+
+
+def main():
+ argument_spec = dict(
+ app_name=dict(aliases=['name'], type='str', required=False),
+ description=dict(),
+ state=dict(choices=['present', 'absent'], default='present'),
+ terminate_by_force=dict(type='bool', default=False, required=False)
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ app_name = module.params['app_name']
+ description = module.params['description']
+ state = module.params['state']
+ terminate_by_force = module.params['terminate_by_force']
+
+ if app_name is None:
+ module.fail_json(msg='Module parameter "app_name" is required')
+
+ result = {}
+
+ ebs = module.client('elasticbeanstalk')
+
+ app = describe_app(ebs, app_name, module)
+
+ if module.check_mode:
+ check_app(ebs, app, module)
+ module.fail_json(msg='ASSERTION FAILURE: check_app() should not return control.')
+
+ if state == 'present':
+ if app is None:
+ try:
+ create_app = ebs.create_application(**filter_empty(ApplicationName=app_name,
+ Description=description))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not create application")
+
+ app = describe_app(ebs, app_name, module)
+
+ result = dict(changed=True, app=app)
+ else:
+ if app.get("Description", None) != description:
+ try:
+ if not description:
+ ebs.update_application(ApplicationName=app_name)
+ else:
+ ebs.update_application(ApplicationName=app_name, Description=description)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not update application")
+
+ app = describe_app(ebs, app_name, module)
+
+ result = dict(changed=True, app=app)
+ else:
+ result = dict(changed=False, app=app)
+
+ else:
+ if app is None:
+ result = dict(changed=False, output='Application not found', app={})
+ else:
+ try:
+ if terminate_by_force:
+ # Running environments will be terminated before deleting the application
+ ebs.delete_application(ApplicationName=app_name, TerminateEnvByForce=terminate_by_force)
+ else:
+ ebs.delete_application(ApplicationName=app_name)
+ changed = True
+ except BotoCoreError as e:
+ module.fail_json_aws(e, msg="Cannot terminate app")
+ except ClientError as e:
+ if 'It is currently pending deletion.' not in e.response['Error']['Message']:
+ module.fail_json_aws(e, msg="Cannot terminate app")
+ else:
+ changed = False
+
+ result = dict(changed=changed, app=app)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_glue_connection.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_glue_connection.py
new file mode 100644
index 00000000..0df4ab91
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_glue_connection.py
@@ -0,0 +1,335 @@
+#!/usr/bin/python
+# Copyright: (c) 2018, Rob White (@wimnat)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: aws_glue_connection
+version_added: 1.0.0
+short_description: Manage an AWS Glue connection
+description:
+ - Manage an AWS Glue connection. See U(https://aws.amazon.com/glue/) for details.
+requirements: [ boto3 ]
+author: "Rob White (@wimnat)"
+options:
+ catalog_id:
+ description:
+ - The ID of the Data Catalog in which to create the connection. If none is supplied,
+ the AWS account ID is used by default.
+ type: str
+ connection_properties:
+ description:
+ - A dict of key-value pairs used as parameters for this connection.
+ - Required when I(state=present).
+ type: dict
+ connection_type:
+ description:
+ - The type of the connection. Currently, only JDBC is supported; SFTP is not supported.
+ default: JDBC
+ choices: [ 'JDBC', 'SFTP' ]
+ type: str
+ description:
+ description:
+ - The description of the connection.
+ type: str
+ match_criteria:
+ description:
+ - A list of UTF-8 strings that specify the criteria that you can use in selecting this connection.
+ type: list
+ elements: str
+ name:
+ description:
+ - The name of the connection.
+ required: true
+ type: str
+ security_groups:
+ description:
+ - A list of security groups to be used by the connection. Use either security group name or ID.
+ type: list
+ elements: str
+ state:
+ description:
+ - Create or delete the AWS Glue connection.
+ required: true
+ choices: [ 'present', 'absent' ]
+ type: str
+ subnet_id:
+ description:
+ - The subnet ID used by the connection.
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create an AWS Glue connection
+- community.aws.aws_glue_connection:
+ name: my-glue-connection
+ connection_properties:
+ JDBC_CONNECTION_URL: jdbc:mysql://mydb:3306/databasename
+ USERNAME: my-username
+ PASSWORD: my-password
+ state: present
+
+# Delete an AWS Glue connection
+- community.aws.aws_glue_connection:
+ name: my-glue-connection
+ state: absent
+
+'''
+
+RETURN = r'''
+connection_properties:
+ description: A dict of key-value pairs used as parameters for this connection.
+ returned: when state is present
+ type: dict
+ sample: {'JDBC_CONNECTION_URL':'jdbc:mysql://mydb:3306/databasename','USERNAME':'x','PASSWORD':'y'}
+connection_type:
+ description: The type of the connection.
+ returned: when state is present
+ type: str
+ sample: JDBC
+creation_time:
+ description: The time this connection definition was created.
+ returned: when state is present
+ type: str
+ sample: "2018-04-21T05:19:58.326000+00:00"
+description:
+ description: Description of the job being defined.
+ returned: when state is present
+ type: str
+ sample: My first Glue job
+last_updated_time:
+ description: The last time this connection definition was updated.
+ returned: when state is present
+ type: str
+ sample: "2018-04-21T05:19:58.326000+00:00"
+match_criteria:
+ description: A list of criteria that can be used in selecting this connection.
+ returned: when state is present
+ type: list
+ sample: []
+name:
+ description: The name of the connection definition.
+ returned: when state is present
+ type: str
+ sample: my-glue-connection
+physical_connection_requirements:
+ description: A dict of physical connection requirements, such as VPC and SecurityGroup,
+ needed for making this connection successfully.
+ returned: when state is present
+ type: dict
+ sample: {'subnet-id':'subnet-aabbccddee'}
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names
+
+# Non-ansible imports
+import copy
+import time
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass
+
+
+def _get_glue_connection(connection, module):
+ """
+ Get an AWS Glue connection based on name. If not found, return None.
+
+ :param connection: AWS boto3 glue connection
+ :param module: Ansible module
+ :return: boto3 Glue connection dict or None if not found
+ """
+
+ connection_name = module.params.get("name")
+ connection_catalog_id = module.params.get("catalog_id")
+
+ params = {'Name': connection_name}
+ if connection_catalog_id is not None:
+ params['CatalogId'] = connection_catalog_id
+
+ try:
+ return connection.get_connection(**params)['Connection']
+ except (BotoCoreError, ClientError) as e:
+ if e.response['Error']['Code'] == 'EntityNotFoundException':
+ return None
+ else:
+ raise e
+
+
+def _compare_glue_connection_params(user_params, current_params):
+ """
+ Compare Glue connection params. If there is a difference, return True immediately else return False
+
+ :param user_params: the Glue connection parameters passed by the user
+ :param current_params: the Glue connection parameters currently configured
+ :return: True if any parameter is mismatched else False
+ """
+
+ # Weirdly, boto3 doesn't return some keys if the value is empty e.g. Description
+ # To counter this, add the key if it's missing with a blank value
+
+ if 'Description' not in current_params:
+ current_params['Description'] = ""
+ if 'MatchCriteria' not in current_params:
+ current_params['MatchCriteria'] = list()
+ if 'PhysicalConnectionRequirements' not in current_params:
+ current_params['PhysicalConnectionRequirements'] = dict()
+ current_params['PhysicalConnectionRequirements']['SecurityGroupIdList'] = []
+ current_params['PhysicalConnectionRequirements']['SubnetId'] = ""
+
+ if 'ConnectionProperties' in user_params['ConnectionInput'] and user_params['ConnectionInput']['ConnectionProperties'] \
+ != current_params['ConnectionProperties']:
+ return True
+ if 'ConnectionType' in user_params['ConnectionInput'] and user_params['ConnectionInput']['ConnectionType'] \
+ != current_params['ConnectionType']:
+ return True
+ if 'Description' in user_params['ConnectionInput'] and user_params['ConnectionInput']['Description'] != current_params['Description']:
+ return True
+ if 'MatchCriteria' in user_params['ConnectionInput'] and set(user_params['ConnectionInput']['MatchCriteria']) != set(current_params['MatchCriteria']):
+ return True
+ if 'PhysicalConnectionRequirements' in user_params['ConnectionInput']:
+ if 'SecurityGroupIdList' in user_params['ConnectionInput']['PhysicalConnectionRequirements'] and \
+ set(user_params['ConnectionInput']['PhysicalConnectionRequirements']['SecurityGroupIdList']) \
+ != set(current_params['PhysicalConnectionRequirements']['SecurityGroupIdList']):
+ return True
+ if 'SubnetId' in user_params['ConnectionInput']['PhysicalConnectionRequirements'] and \
+ user_params['ConnectionInput']['PhysicalConnectionRequirements']['SubnetId'] \
+ != current_params['PhysicalConnectionRequirements']['SubnetId']:
+ return True
+
+ return False
+
+
+def create_or_update_glue_connection(connection, connection_ec2, module, glue_connection):
+ """
+ Create or update an AWS Glue connection
+
+ :param connection: AWS boto3 glue connection
+ :param module: Ansible module
+ :param glue_connection: a dict of AWS Glue connection parameters or None
+ :return:
+ """
+
+ changed = False
+ params = dict()
+ params['ConnectionInput'] = dict()
+ params['ConnectionInput']['Name'] = module.params.get("name")
+ params['ConnectionInput']['ConnectionType'] = module.params.get("connection_type")
+ params['ConnectionInput']['ConnectionProperties'] = module.params.get("connection_properties")
+ if module.params.get("catalog_id") is not None:
+ params['CatalogId'] = module.params.get("catalog_id")
+ if module.params.get("description") is not None:
+ params['ConnectionInput']['Description'] = module.params.get("description")
+ if module.params.get("match_criteria") is not None:
+ params['ConnectionInput']['MatchCriteria'] = module.params.get("match_criteria")
+ if module.params.get("security_groups") is not None or module.params.get("subnet_id") is not None:
+ params['ConnectionInput']['PhysicalConnectionRequirements'] = dict()
+ if module.params.get("security_groups") is not None:
+ # Get security group IDs from names
+ security_group_ids = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), connection_ec2, boto3=True)
+ params['ConnectionInput']['PhysicalConnectionRequirements']['SecurityGroupIdList'] = security_group_ids
+ if module.params.get("subnet_id") is not None:
+ params['ConnectionInput']['PhysicalConnectionRequirements']['SubnetId'] = module.params.get("subnet_id")
+
+ # If glue_connection is not None then check if it needs to be modified, else create it
+ if glue_connection:
+ if _compare_glue_connection_params(params, glue_connection):
+ try:
+ # We need to slightly modify the params for an update
+ update_params = copy.deepcopy(params)
+ update_params['Name'] = update_params['ConnectionInput']['Name']
+ connection.update_connection(**update_params)
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e)
+ else:
+ try:
+ connection.create_connection(**params)
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e)
+
+ # If changed, get the Glue connection again
+ if changed:
+ glue_connection = None
+ for i in range(10):
+ glue_connection = _get_glue_connection(connection, module)
+ if glue_connection is not None:
+ break
+ time.sleep(10)
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_connection))
+
+
+def delete_glue_connection(connection, module, glue_connection):
+ """
+ Delete an AWS Glue connection
+
+ :param connection: AWS boto3 glue connection
+ :param module: Ansible module
+ :param glue_connection: a dict of AWS Glue connection parameters or None
+ :return:
+ """
+
+ changed = False
+
+ params = {'ConnectionName': module.params.get("name")}
+ if module.params.get("catalog_id") is not None:
+ params['CatalogId'] = module.params.get("catalog_id")
+
+ if glue_connection:
+ try:
+ connection.delete_connection(**params)
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+
+ argument_spec = (
+ dict(
+ catalog_id=dict(type='str'),
+ connection_properties=dict(type='dict'),
+ connection_type=dict(type='str', default='JDBC', choices=['JDBC', 'SFTP']),
+ description=dict(type='str'),
+ match_criteria=dict(type='list', elements='str'),
+ name=dict(required=True, type='str'),
+ security_groups=dict(type='list', elements='str'),
+ state=dict(required=True, choices=['present', 'absent'], type='str'),
+ subnet_id=dict(type='str')
+ )
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present', ['connection_properties'])
+ ]
+ )
+
+ connection_glue = module.client('glue')
+ connection_ec2 = module.client('ec2')
+
+ glue_connection = _get_glue_connection(connection_glue, module)
+
+ if module.params.get("state") == 'present':
+ create_or_update_glue_connection(connection_glue, connection_ec2, module, glue_connection)
+ else:
+ delete_glue_connection(connection_glue, module, glue_connection)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_glue_job.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_glue_job.py
new file mode 100644
index 00000000..1d991f52
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_glue_job.py
@@ -0,0 +1,371 @@
+#!/usr/bin/python
+# Copyright: (c) 2018, Rob White (@wimnat)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: aws_glue_job
+version_added: 1.0.0
+short_description: Manage an AWS Glue job
+description:
+ - Manage an AWS Glue job. See U(https://aws.amazon.com/glue/) for details.
+requirements: [ boto3 ]
+author: "Rob White (@wimnat)"
+options:
+ allocated_capacity:
+ description:
+ - The number of AWS Glue data processing units (DPUs) to allocate to this Job. From 2 to 100 DPUs
+ can be allocated; the default is 10. A DPU is a relative measure of processing power that consists
+ of 4 vCPUs of compute capacity and 16 GB of memory.
+ type: int
+ command_name:
+ description:
+ - The name of the job command. This must be 'glueetl'.
+ default: glueetl
+ type: str
+ command_script_location:
+ description:
+ - The S3 path to a script that executes a job.
+ - Required when I(state=present).
+ type: str
+ connections:
+ description:
+ - A list of Glue connections used for this job.
+ type: list
+ elements: str
+ default_arguments:
+ description:
+ - A dict of default arguments for this job. You can specify arguments here that your own job-execution
+ script consumes, as well as arguments that AWS Glue itself consumes.
+ type: dict
+ description:
+ description:
+ - Description of the job being defined.
+ type: str
+ max_concurrent_runs:
+ description:
+ - The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when
+ this threshold is reached. The maximum value you can specify is controlled by a service limit.
+ type: int
+ max_retries:
+ description:
+ - The maximum number of times to retry this job if it fails.
+ type: int
+ name:
+ description:
+ - The name you assign to this job definition. It must be unique in your account.
+ required: true
+ type: str
+ role:
+ description:
+ - The name or ARN of the IAM role associated with this job.
+ - Required when I(state=present).
+ type: str
+ state:
+ description:
+ - Create or delete the AWS Glue job.
+ required: true
+ choices: [ 'present', 'absent' ]
+ type: str
+ timeout:
+ description:
+ - The job timeout in minutes.
+ type: int
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create an AWS Glue job
+- community.aws.aws_glue_job:
+ command_script_location: s3bucket/script.py
+ name: my-glue-job
+ role: my-iam-role
+ state: present
+
+# Delete an AWS Glue job
+- community.aws.aws_glue_job:
+ name: my-glue-job
+ state: absent
+
+'''
+
+RETURN = r'''
+allocated_capacity:
+ description: The number of AWS Glue data processing units (DPUs) allocated to runs of this job. From 2 to
+ 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power
+ that consists of 4 vCPUs of compute capacity and 16 GB of memory.
+ returned: when state is present
+ type: int
+ sample: 10
+command:
+ description: The JobCommand that executes this job.
+ returned: when state is present
+ type: complex
+ contains:
+ name:
+ description: The name of the job command.
+ returned: when state is present
+ type: str
+ sample: glueetl
+ script_location:
+ description: Specifies the S3 path to a script that executes a job.
+ returned: when state is present
+ type: str
+ sample: mybucket/myscript.py
+connections:
+ description: The connections used for this job.
+ returned: when state is present
+ type: dict
+ sample: "{ Connections: [ 'list', 'of', 'connections' ] }"
+created_on:
+ description: The time and date that this job definition was created.
+ returned: when state is present
+ type: str
+ sample: "2018-04-21T05:19:58.326000+00:00"
+default_arguments:
+ description: The default arguments for this job, specified as name-value pairs.
+ returned: when state is present
+ type: dict
+ sample: "{ 'mykey1': 'myvalue1' }"
+description:
+ description: Description of the job being defined.
+ returned: when state is present
+ type: str
+ sample: My first Glue job
+job_name:
+ description: The name of the AWS Glue job.
+ returned: always
+ type: str
+ sample: my-glue-job
+execution_property:
+ description: An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.
+ returned: always
+ type: complex
+ contains:
+ max_concurrent_runs:
+ description: The maximum number of concurrent runs allowed for the job. The default is 1. An error is
+ returned when this threshold is reached. The maximum value you can specify is controlled by
+ a service limit.
+ returned: when state is present
+ type: int
+ sample: 1
+last_modified_on:
+ description: The last point in time when this job definition was modified.
+ returned: when state is present
+ type: str
+ sample: "2018-04-21T05:19:58.326000+00:00"
+max_retries:
+ description: The maximum number of times to retry this job after a JobRun fails.
+ returned: when state is present
+ type: int
+ sample: 5
+name:
+ description: The name assigned to this job definition.
+ returned: when state is present
+ type: str
+ sample: my-glue-job
+role:
+ description: The name or ARN of the IAM role associated with this job.
+ returned: when state is present
+ type: str
+ sample: my-iam-role
+timeout:
+ description: The job timeout in minutes.
+ returned: when state is present
+ type: int
+ sample: 300
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+# Non-ansible imports
+import copy
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass
+
+
+def _get_glue_job(connection, module, glue_job_name):
+ """
+ Get an AWS Glue job based on name. If not found, return None.
+
+ :param connection: AWS boto3 glue connection
+ :param module: Ansible module
+ :param glue_job_name: Name of Glue job to get
+ :return: boto3 Glue job dict or None if not found
+ """
+
+ try:
+ return connection.get_job(JobName=glue_job_name)['Job']
+ except (BotoCoreError, ClientError) as e:
+ if e.response['Error']['Code'] == 'EntityNotFoundException':
+ return None
+ else:
+ module.fail_json_aws(e)
+
+
+def _compare_glue_job_params(user_params, current_params):
+ """
+ Compare Glue job params. If there is a difference, return True immediately else return False
+
+ :param user_params: the Glue job parameters passed by the user
+ :param current_params: the Glue job parameters currently configured
+ :return: True if any parameter is mismatched else False
+ """
+
+ # Weirdly, boto3 doesn't return some keys if the value is empty e.g. Description
+ # To counter this, add the key if it's missing with a blank value
+
+ if 'Description' not in current_params:
+ current_params['Description'] = ""
+ if 'DefaultArguments' not in current_params:
+ current_params['DefaultArguments'] = dict()
+
+ if 'AllocatedCapacity' in user_params and user_params['AllocatedCapacity'] != current_params['AllocatedCapacity']:
+ return True
+ if 'Command' in user_params and user_params['Command']['ScriptLocation'] != current_params['Command']['ScriptLocation']:
+ return True
+ if 'Connections' in user_params and set(user_params['Connections']) != set(current_params['Connections']):
+ return True
+ if 'DefaultArguments' in user_params and set(user_params['DefaultArguments']) != set(current_params['DefaultArguments']):
+ return True
+ if 'Description' in user_params and user_params['Description'] != current_params['Description']:
+ return True
+ if 'ExecutionProperty' in user_params and user_params['ExecutionProperty']['MaxConcurrentRuns'] != current_params['ExecutionProperty']['MaxConcurrentRuns']:
+ return True
+ if 'MaxRetries' in user_params and user_params['MaxRetries'] != current_params['MaxRetries']:
+ return True
+ if 'Timeout' in user_params and user_params['Timeout'] != current_params['Timeout']:
+ return True
+
+ return False
+
+
+def create_or_update_glue_job(connection, module, glue_job):
+ """
+ Create or update an AWS Glue job
+
+ :param connection: AWS boto3 glue connection
+ :param module: Ansible module
+ :param glue_job: a dict of AWS Glue job parameters or None
+ :return:
+ """
+
+ changed = False
+ params = dict()
+ params['Name'] = module.params.get("name")
+ params['Role'] = module.params.get("role")
+ if module.params.get("allocated_capacity") is not None:
+ params['AllocatedCapacity'] = module.params.get("allocated_capacity")
+ if module.params.get("command_script_location") is not None:
+ params['Command'] = {'Name': module.params.get("command_name"), 'ScriptLocation': module.params.get("command_script_location")}
+ if module.params.get("connections") is not None:
+ params['Connections'] = {'Connections': module.params.get("connections")}
+ if module.params.get("default_arguments") is not None:
+ params['DefaultArguments'] = module.params.get("default_arguments")
+ if module.params.get("description") is not None:
+ params['Description'] = module.params.get("description")
+ if module.params.get("max_concurrent_runs") is not None:
+ params['ExecutionProperty'] = {'MaxConcurrentRuns': module.params.get("max_concurrent_runs")}
+ if module.params.get("max_retries") is not None:
+ params['MaxRetries'] = module.params.get("max_retries")
+ if module.params.get("timeout") is not None:
+ params['Timeout'] = module.params.get("timeout")
+
+ # If glue_job is not None then check if it needs to be modified, else create it
+ if glue_job:
+ if _compare_glue_job_params(params, glue_job):
+ try:
+ # Update job needs slightly modified params
+ update_params = {'JobName': params['Name'], 'JobUpdate': copy.deepcopy(params)}
+ del update_params['JobUpdate']['Name']
+ connection.update_job(**update_params)
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e)
+ else:
+ try:
+ connection.create_job(**params)
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e)
+
+ # If changed, get the Glue job again
+ if changed:
+ glue_job = _get_glue_job(connection, module, params['Name'])
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_job))
+
+
+def delete_glue_job(connection, module, glue_job):
+ """
+ Delete an AWS Glue job
+
+ :param connection: AWS boto3 glue connection
+ :param module: Ansible module
+ :param glue_job: a dict of AWS Glue job parameters or None
+ :return:
+ """
+
+ changed = False
+
+ if glue_job:
+ try:
+ connection.delete_job(JobName=glue_job['Name'])
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+
+ argument_spec = (
+ dict(
+ allocated_capacity=dict(type='int'),
+ command_name=dict(type='str', default='glueetl'),
+ command_script_location=dict(type='str'),
+ connections=dict(type='list', elements='str'),
+ default_arguments=dict(type='dict'),
+ description=dict(type='str'),
+ max_concurrent_runs=dict(type='int'),
+ max_retries=dict(type='int'),
+ name=dict(required=True, type='str'),
+ role=dict(type='str'),
+ state=dict(required=True, choices=['present', 'absent'], type='str'),
+ timeout=dict(type='int')
+ )
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present', ['role', 'command_script_location'])
+ ]
+ )
+
+ connection = module.client('glue')
+
+ state = module.params.get("state")
+
+ glue_job = _get_glue_job(connection, module, module.params.get("name"))
+
+ if state == 'present':
+ create_or_update_glue_job(connection, module, glue_job)
+ else:
+ delete_glue_job(connection, module, glue_job)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_inspector_target.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_inspector_target.py
new file mode 100644
index 00000000..b71fbf61
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_inspector_target.py
@@ -0,0 +1,246 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Dennis Conrad for Sainsbury's
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_inspector_target
+version_added: 1.0.0
+short_description: Create, Update and Delete Amazon Inspector Assessment
+ Targets
+description: Creates, updates, or deletes Amazon Inspector Assessment Targets
+ and manages the required Resource Groups.
+author: "Dennis Conrad (@dennisconrad)"
+options:
+ name:
+ description:
+ - The user-defined name that identifies the assessment target. The name
+ must be unique within the AWS account.
+ required: true
+ type: str
+ state:
+ description:
+ - The state of the assessment target.
+ choices:
+ - absent
+ - present
+ default: present
+ type: str
+ tags:
+ description:
+ - Tags of the EC2 instances to be added to the assessment target.
+ - Required if C(state=present).
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements:
+ - boto3
+ - botocore
+'''
+
+EXAMPLES = '''
+- name: Create my_target Assessment Target
+ community.aws.aws_inspector_target:
+ name: my_target
+ tags:
+ role: scan_target
+
+- name: Update Existing my_target Assessment Target with Additional Tags
+ community.aws.aws_inspector_target:
+ name: my_target
+ tags:
+ env: dev
+ role: scan_target
+
+- name: Delete my_target Assessment Target
+ community.aws.aws_inspector_target:
+ name: my_target
+ state: absent
+'''
+
+RETURN = '''
+arn:
+ description: The ARN that specifies the Amazon Inspector assessment target.
+ returned: success
+ type: str
+ sample: "arn:aws:inspector:eu-west-1:123456789012:target/0-O4LnL7n1"
+created_at:
+ description: The time at which the assessment target was created.
+ returned: success
+ type: str
+ sample: "2018-01-29T13:48:51.958000+00:00"
+name:
+ description: The name of the Amazon Inspector assessment target.
+ returned: success
+ type: str
+ sample: "my_target"
+resource_group_arn:
+ description: The ARN that specifies the resource group that is associated
+ with the assessment target.
+ returned: success
+ type: str
+ sample: "arn:aws:inspector:eu-west-1:123456789012:resourcegroup/0-qY4gDel8"
+tags:
+ description: The tags of the resource group that is associated with the
+ assessment target.
+ returned: success
+ type: list
+ sample: {"role": "scan_target", "env": "dev"}
+updated_at:
+ description: The time at which the assessment target was last updated.
+ returned: success
+ type: str
+ sample: "2018-01-29T13:48:51.958000+00:00"
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (
+ ansible_dict_to_boto3_tag_list,
+ boto3_tag_list_to_ansible_dict,
+ camel_dict_to_snake_dict,
+ compare_aws_tags,
+)
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ state=dict(choices=['absent', 'present'], default='present'),
+ tags=dict(type='dict'),
+ )
+
+ required_if = [['state', 'present', ['tags']]]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=False,
+ required_if=required_if,
+ )
+
+ name = module.params.get('name')
+ state = module.params.get('state').lower()
+ tags = module.params.get('tags')
+ if tags:
+ tags = ansible_dict_to_boto3_tag_list(tags, 'key', 'value')
+
+ client = module.client('inspector')
+
+ try:
+ existing_target_arn = client.list_assessment_targets(
+ filter={'assessmentTargetNamePattern': name},
+ ).get('assessmentTargetArns')[0]
+
+ existing_target = camel_dict_to_snake_dict(
+ client.describe_assessment_targets(
+ assessmentTargetArns=[existing_target_arn],
+ ).get('assessmentTargets')[0]
+ )
+
+ existing_resource_group_arn = existing_target.get('resource_group_arn')
+ existing_resource_group_tags = client.describe_resource_groups(
+ resourceGroupArns=[existing_resource_group_arn],
+ ).get('resourceGroups')[0].get('tags')
+
+ target_exists = True
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e:
+ module.fail_json_aws(e, msg="trying to retrieve targets")
+ except IndexError:
+ target_exists = False
+
+ if state == 'present' and target_exists:
+ ansible_dict_tags = boto3_tag_list_to_ansible_dict(tags)
+ ansible_dict_existing_tags = boto3_tag_list_to_ansible_dict(
+ existing_resource_group_tags
+ )
+ tags_to_add, tags_to_remove = compare_aws_tags(
+ ansible_dict_tags,
+ ansible_dict_existing_tags
+ )
+ if not (tags_to_add or tags_to_remove):
+ existing_target.update({'tags': ansible_dict_existing_tags})
+ module.exit_json(changed=False, **existing_target)
+ else:
+ try:
+ updated_resource_group_arn = client.create_resource_group(
+ resourceGroupTags=tags,
+ ).get('resourceGroupArn')
+
+ client.update_assessment_target(
+ assessmentTargetArn=existing_target_arn,
+ assessmentTargetName=name,
+ resourceGroupArn=updated_resource_group_arn,
+ )
+
+ updated_target = camel_dict_to_snake_dict(
+ client.describe_assessment_targets(
+ assessmentTargetArns=[existing_target_arn],
+ ).get('assessmentTargets')[0]
+ )
+
+ updated_target.update({'tags': ansible_dict_tags})
+ module.exit_json(changed=True, **updated_target),
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e:
+ module.fail_json_aws(e, msg="trying to update target")
+
+ elif state == 'present' and not target_exists:
+ try:
+ new_resource_group_arn = client.create_resource_group(
+ resourceGroupTags=tags,
+ ).get('resourceGroupArn')
+
+ new_target_arn = client.create_assessment_target(
+ assessmentTargetName=name,
+ resourceGroupArn=new_resource_group_arn,
+ ).get('assessmentTargetArn')
+
+ new_target = camel_dict_to_snake_dict(
+ client.describe_assessment_targets(
+ assessmentTargetArns=[new_target_arn],
+ ).get('assessmentTargets')[0]
+ )
+
+ new_target.update({'tags': boto3_tag_list_to_ansible_dict(tags)})
+ module.exit_json(changed=True, **new_target)
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e:
+ module.fail_json_aws(e, msg="trying to create target")
+
+ elif state == 'absent' and target_exists:
+ try:
+ client.delete_assessment_target(
+ assessmentTargetArn=existing_target_arn,
+ )
+ module.exit_json(changed=True)
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e:
+ module.fail_json_aws(e, msg="trying to delete target")
+
+ elif state == 'absent' and not target_exists:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_kms.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_kms.py
new file mode 100644
index 00000000..b86686cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_kms.py
@@ -0,0 +1,1076 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: aws_kms
+version_added: 1.0.0
+short_description: Perform various KMS management tasks.
+description:
+ - Manage role/user access to a KMS key. Not designed for encrypting/decrypting.
+options:
+ alias:
+ description: An alias for a key. For safety, even though KMS does not require keys
+ to have an alias, this module expects all new keys to be given an alias
+ to make them easier to manage. Existing keys without an alias may be
+ referred to by I(key_id). Use M(community.aws.aws_kms_info) to find key ids. Required
+ if I(key_id) is not given. Note that passing a I(key_id) and I(alias)
+ will only cause a new alias to be added, an alias will never be renamed.
+ The 'alias/' prefix is optional.
+ required: false
+ aliases:
+ - key_alias
+ type: str
+ key_id:
+ description:
+ - Key ID or ARN of the key.
+ - One of I(alias) or I(key_id) are required.
+ required: false
+ aliases:
+ - key_arn
+ type: str
+ enable_key_rotation:
+ description:
+ - Whether the key should be automatically rotated every year.
+ required: false
+ type: bool
+ policy_mode:
+ description:
+ - (deprecated) Grant or deny access.
+ - Used for modifying the Key Policy rather than modifying a grant and only
+ works on the default policy created through the AWS Console.
+ - This option has been deprecated, and will be removed in 2.13. Use I(policy) instead.
+ default: grant
+ choices: [ grant, deny ]
+ aliases:
+ - mode
+ type: str
+ policy_role_name:
+ description:
+ - (deprecated) Role to allow/deny access.
+ - One of I(policy_role_name) or I(policy_role_arn) are required.
+ - Used for modifying the Key Policy rather than modifying a grant and only
+ works on the default policy created through the AWS Console.
+ - This option has been deprecated, and will be removed in 2.13. Use I(policy) instead.
+ required: false
+ aliases:
+ - role_name
+ type: str
+ policy_role_arn:
+ description:
+ - (deprecated) ARN of role to allow/deny access.
+ - One of I(policy_role_name) or I(policy_role_arn) are required.
+ - Used for modifying the Key Policy rather than modifying a grant and only
+ works on the default policy created through the AWS Console.
+ - This option has been deprecated, and will be removed in 2.13. Use I(policy) instead.
+ type: str
+ required: false
+ aliases:
+ - role_arn
+ policy_grant_types:
+ description:
+ - (deprecated) List of grants to give to user/role. Likely "role,role grant" or "role,role grant,admin".
+ - Required when I(policy_mode=grant).
+ - Used for modifying the Key Policy rather than modifying a grant and only
+ works on the default policy created through the AWS Console.
+ - This option has been deprecated, and will be removed in 2.13. Use I(policy) instead.
+ required: false
+ aliases:
+ - grant_types
+ type: list
+ elements: str
+ policy_clean_invalid_entries:
+ description:
+ - (deprecated) If adding/removing a role and invalid grantees are found, remove them. These entries will cause an update to fail in all known cases.
+ - Only cleans if changes are being made.
+ - Used for modifying the Key Policy rather than modifying a grant and only
+ works on the default policy created through the AWS Console.
+ - This option has been deprecated, and will be removed in 2.13. Use I(policy) instead.
+ type: bool
+ default: true
+ aliases:
+ - clean_invalid_entries
+ state:
+ description: Whether a key should be present or absent. Note that making an
+ existing key absent only schedules a key for deletion. Passing a key that
+ is scheduled for deletion with state present will cancel key deletion.
+ required: False
+ choices:
+ - present
+ - absent
+ default: present
+ type: str
+ enabled:
+ description: Whether or not a key is enabled
+ default: True
+ type: bool
+ description:
+ description:
+ A description of the CMK. Use a description that helps you decide
+ whether the CMK is appropriate for a task.
+ type: str
+ tags:
+ description: A dictionary of tags to apply to a key.
+ type: dict
+ purge_tags:
+ description: Whether the I(tags) argument should cause tags not in the list to
+ be removed
+ default: False
+ type: bool
+ purge_grants:
+ description: Whether the I(grants) argument should cause grants not in the list to
+ be removed
+ default: False
+ type: bool
+ grants:
+ description:
+ - A list of grants to apply to the key. Each item must contain I(grantee_principal).
+ Each item can optionally contain I(retiring_principal), I(operations), I(constraints),
+ I(name).
+ - I(grantee_principal) and I(retiring_principal) must be ARNs
+ - 'For full documentation of suboptions see the boto3 documentation:'
+ - 'U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kms.html#KMS.Client.create_grant)'
+ type: list
+ elements: dict
+ suboptions:
+ grantee_principal:
+ description: The full ARN of the principal being granted permissions.
+ required: true
+ type: str
+ retiring_principal:
+ description: The full ARN of the principal permitted to revoke/retire the grant.
+ type: str
+ operations:
+ type: list
+ elements: str
+ description:
+ - A list of operations that the grantee may perform using the CMK.
+ choices: ['Decrypt', 'Encrypt', 'GenerateDataKey', 'GenerateDataKeyWithoutPlaintext', 'ReEncryptFrom', 'ReEncryptTo',
+ 'CreateGrant', 'RetireGrant', 'DescribeKey', 'Verify', 'Sign']
+ constraints:
+ description:
+ - Constraints is a dict containing C(encryption_context_subset) or C(encryption_context_equals),
+ either or both being a dict specifying an encryption context match.
+ See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html) or
+ U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kms.html#KMS.Client.create_grant)
+ type: dict
+ policy:
+ description:
+ - policy to apply to the KMS key.
+ - See U(https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html)
+ type: json
+author:
+ - Ted Timmons (@tedder)
+ - Will Thames (@willthames)
+ - Mark Chappell (@tremble)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Managing the KMS IAM Policy via policy_mode and policy_grant_types is fragile
+# and has been deprecated in favour of the policy option.
+- name: grant user-style access to production secrets
+ community.aws.aws_kms:
+ args:
+ alias: "alias/my_production_secrets"
+ policy_mode: grant
+ policy_role_name: "prod-appServerRole-1R5AQG2BSEL6L"
+ policy_grant_types: "role,role grant"
+- name: remove access to production secrets from role
+ community.aws.aws_kms:
+ args:
+ alias: "alias/my_production_secrets"
+ policy_mode: deny
+ policy_role_name: "prod-appServerRole-1R5AQG2BSEL6L"
+
+# Create a new KMS key
+- community.aws.aws_kms:
+ alias: mykey
+ tags:
+ Name: myKey
+ Purpose: protect_stuff
+
+# Update previous key with more tags
+- community.aws.aws_kms:
+ alias: mykey
+ tags:
+ Name: myKey
+ Purpose: protect_stuff
+ Owner: security_team
+
+# Update a known key with grants allowing an instance with the billing-prod IAM profile
+# to decrypt data encrypted with the environment: production, application: billing
+# encryption context
+- community.aws.aws_kms:
+ key_id: abcd1234-abcd-1234-5678-ef1234567890
+ grants:
+ - name: billing_prod
+ grantee_principal: arn:aws:iam::1234567890123:role/billing_prod
+ constraints:
+ encryption_context_equals:
+ environment: production
+ application: billing
+ operations:
+ - Decrypt
+ - RetireGrant
+
+- name: Update IAM policy on an existing KMS key
+ community.aws.aws_kms:
+ alias: my-kms-key
+ policy: '{"Version": "2012-10-17", "Id": "my-kms-key-permissions", "Statement": [ { <SOME STATEMENT> } ]}'
+ state: present
+
+- name: Example using lookup for policy json
+ community.aws.aws_kms:
+ alias: my-kms-key
+ policy: "{{ lookup('template', 'kms_iam_policy_template.json.j2') }}"
+ state: present
+'''
+
+RETURN = r'''
+key_id:
+ description: ID of key
+ type: str
+ returned: always
+ sample: abcd1234-abcd-1234-5678-ef1234567890
+key_arn:
+ description: ARN of key
+ type: str
+ returned: always
+ sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890
+key_state:
+ description: The state of the key
+ type: str
+ returned: always
+ sample: PendingDeletion
+key_usage:
+ description: The cryptographic operations for which you can use the key.
+ type: str
+ returned: always
+ sample: ENCRYPT_DECRYPT
+origin:
+ description: The source of the key's key material. When this value is C(AWS_KMS),
+ AWS KMS created the key material. When this value is C(EXTERNAL), the
+ key material was imported or the CMK lacks key material.
+ type: str
+ returned: always
+ sample: AWS_KMS
+aws_account_id:
+ description: The AWS Account ID that the key belongs to
+ type: str
+ returned: always
+ sample: 1234567890123
+creation_date:
+ description: Date of creation of the key
+ type: str
+ returned: always
+ sample: "2017-04-18T15:12:08.551000+10:00"
+description:
+ description: Description of the key
+ type: str
+ returned: always
+ sample: "My Key for Protecting important stuff"
+enabled:
+ description: Whether the key is enabled. True if C(KeyState) is true.
+ type: str
+ returned: always
+ sample: false
+aliases:
+ description: list of aliases associated with the key
+ type: list
+ returned: always
+ sample:
+ - aws/acm
+ - aws/ebs
+policies:
+ description: list of policy documents for the keys. Empty when access is denied even if there are policies.
+ type: list
+ returned: always
+ sample:
+ Version: "2012-10-17"
+ Id: "auto-ebs-2"
+ Statement:
+ - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS"
+ Effect: "Allow"
+ Principal:
+ AWS: "*"
+ Action:
+ - "kms:Encrypt"
+ - "kms:Decrypt"
+ - "kms:ReEncrypt*"
+ - "kms:GenerateDataKey*"
+ - "kms:CreateGrant"
+ - "kms:DescribeKey"
+ Resource: "*"
+ Condition:
+ StringEquals:
+ kms:CallerAccount: "111111111111"
+ kms:ViaService: "ec2.ap-southeast-2.amazonaws.com"
+ - Sid: "Allow direct access to key metadata to the account"
+ Effect: "Allow"
+ Principal:
+ AWS: "arn:aws:iam::111111111111:root"
+ Action:
+ - "kms:Describe*"
+ - "kms:Get*"
+ - "kms:List*"
+ - "kms:RevokeGrant"
+ Resource: "*"
+tags:
+ description: dictionary of tags applied to the key
+ type: dict
+ returned: always
+ sample:
+ Name: myKey
+ Purpose: protecting_stuff
+grants:
+ description: list of grants associated with a key
+ type: complex
+ returned: always
+ contains:
+ constraints:
+ description: Constraints on the encryption context that the grant allows.
+ See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html) for further details
+ type: dict
+ returned: always
+ sample:
+ encryption_context_equals:
+ "aws:lambda:_function_arn": "arn:aws:lambda:ap-southeast-2:012345678912:function:xyz"
+ creation_date:
+ description: Date of creation of the grant
+ type: str
+ returned: always
+ sample: "2017-04-18T15:12:08+10:00"
+ grant_id:
+ description: The unique ID for the grant
+ type: str
+ returned: always
+ sample: abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234
+ grantee_principal:
+ description: The principal that receives the grant's permissions
+ type: str
+ returned: always
+ sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz
+ issuing_account:
+ description: The AWS account under which the grant was issued
+ type: str
+ returned: always
+ sample: arn:aws:iam::01234567890:root
+ key_id:
+ description: The key ARN to which the grant applies.
+ type: str
+ returned: always
+ sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890
+ name:
+ description: The friendly name that identifies the grant
+ type: str
+ returned: always
+ sample: xyz
+ operations:
+ description: The list of operations permitted by the grant
+ type: list
+ returned: always
+ sample:
+ - Decrypt
+ - RetireGrant
+ retiring_principal:
+ description: The principal that can retire the grant
+ type: str
+ returned: always
+ sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz
+changes_needed:
+ description: grant types that would be changed/were changed.
+ type: dict
+ returned: always
+ sample: { "role": "add", "role grant": "add" }
+had_invalid_entries:
+ description: there are invalid (non-ARN) entries in the KMS entry. These don't count as a change, but will be removed if any changes are being made.
+ type: bool
+ returned: always
+'''
+
+# these mappings are used to go from simple labels to the actual 'Sid' values returned
+# by get_policy. They seem to be magic values.
+statement_label = {
+ 'role': 'Allow use of the key',
+ 'role grant': 'Allow attachment of persistent resources',
+ 'admin': 'Allow access for Key Administrators'
+}
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags, compare_policies
+from ansible.module_utils.six import string_types
+
+import json
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_iam_roles_with_backoff(connection):
+ paginator = connection.get_paginator('list_roles')
+ return paginator.paginate().build_full_result()
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_kms_keys_with_backoff(connection):
+ paginator = connection.get_paginator('list_keys')
+ return paginator.paginate().build_full_result()
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_kms_aliases_with_backoff(connection):
+ paginator = connection.get_paginator('list_aliases')
+ return paginator.paginate().build_full_result()
+
+
+def get_kms_aliases_lookup(connection):
+ _aliases = dict()
+ for alias in get_kms_aliases_with_backoff(connection)['Aliases']:
+ # Not all aliases are actually associated with a key
+ if 'TargetKeyId' in alias:
+ # strip off leading 'alias/' and add it to key's aliases
+ if alias['TargetKeyId'] in _aliases:
+ _aliases[alias['TargetKeyId']].append(alias['AliasName'][6:])
+ else:
+ _aliases[alias['TargetKeyId']] = [alias['AliasName'][6:]]
+ return _aliases
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_kms_tags_with_backoff(connection, key_id, **kwargs):
+ return connection.list_resource_tags(KeyId=key_id, **kwargs)
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_kms_grants_with_backoff(connection, key_id):
+ params = dict(KeyId=key_id)
+ paginator = connection.get_paginator('list_grants')
+ return paginator.paginate(**params).build_full_result()
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_kms_metadata_with_backoff(connection, key_id):
+ return connection.describe_key(KeyId=key_id)
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def list_key_policies_with_backoff(connection, key_id):
+ paginator = connection.get_paginator('list_key_policies')
+ return paginator.paginate(KeyId=key_id).build_full_result()
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_key_policy_with_backoff(connection, key_id, policy_name):
+ return connection.get_key_policy(KeyId=key_id, PolicyName=policy_name)
+
+
+def get_kms_tags(connection, module, key_id):
+ # Handle pagination here as list_resource_tags does not have
+ # a paginator
+ kwargs = {}
+ tags = []
+ more = True
+ while more:
+ try:
+ tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs)
+ tags.extend(tag_response['Tags'])
+ except is_boto3_error_code('AccessDeniedException'):
+ tag_response = {}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to obtain key tags")
+ if tag_response.get('NextMarker'):
+ kwargs['Marker'] = tag_response['NextMarker']
+ else:
+ more = False
+ return tags
+
+
+def get_kms_policies(connection, module, key_id):
+ try:
+ policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames']
+ return [get_key_policy_with_backoff(connection, key_id, policy)['Policy'] for
+ policy in policies]
+ except is_boto3_error_code('AccessDeniedException'):
+ return []
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to obtain key policies")
+
+
+def camel_to_snake_grant(grant):
+ ''' camel_to_snake_grant snakifies everything except the encryption context '''
+ constraints = grant.get('Constraints', {})
+ result = camel_dict_to_snake_dict(grant)
+ if 'EncryptionContextEquals' in constraints:
+ result['constraints']['encryption_context_equals'] = constraints['EncryptionContextEquals']
+ if 'EncryptionContextSubset' in constraints:
+ result['constraints']['encryption_context_subset'] = constraints['EncryptionContextSubset']
+ return result
+
+
+def get_key_details(connection, module, key_id):
+ try:
+ result = get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to obtain key metadata")
+ result['KeyArn'] = result.pop('Arn')
+
+ try:
+ aliases = get_kms_aliases_lookup(connection)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to obtain aliases")
+
+ current_rotation_status = connection.get_key_rotation_status(KeyId=key_id)
+ result['enable_key_rotation'] = current_rotation_status.get('KeyRotationEnabled')
+ result['aliases'] = aliases.get(result['KeyId'], [])
+
+ result = camel_dict_to_snake_dict(result)
+
+ # grants and tags get snakified differently
+ try:
+ result['grants'] = [camel_to_snake_grant(grant) for grant in
+ get_kms_grants_with_backoff(connection, key_id)['Grants']]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to obtain key grants")
+ tags = get_kms_tags(connection, module, key_id)
+ result['tags'] = boto3_tag_list_to_ansible_dict(tags, 'TagKey', 'TagValue')
+ result['policies'] = get_kms_policies(connection, module, key_id)
+ return result
+
+
+def get_kms_facts(connection, module):
+ try:
+ keys = get_kms_keys_with_backoff(connection)['Keys']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to obtain keys")
+
+ return [get_key_details(connection, module, key['KeyId']) for key in keys]
+
+
+def convert_grant_params(grant, key):
+ grant_params = dict(KeyId=key['key_arn'],
+ GranteePrincipal=grant['grantee_principal'])
+ if grant.get('operations'):
+ grant_params['Operations'] = grant['operations']
+ if grant.get('retiring_principal'):
+ grant_params['RetiringPrincipal'] = grant['retiring_principal']
+ if grant.get('name'):
+ grant_params['Name'] = grant['name']
+ if grant.get('constraints'):
+ grant_params['Constraints'] = dict()
+ if grant['constraints'].get('encryption_context_subset'):
+ grant_params['Constraints']['EncryptionContextSubset'] = grant['constraints']['encryption_context_subset']
+ if grant['constraints'].get('encryption_context_equals'):
+ grant_params['Constraints']['EncryptionContextEquals'] = grant['constraints']['encryption_context_equals']
+ return grant_params
+
+
+def different_grant(existing_grant, desired_grant):
+ if existing_grant.get('grantee_principal') != desired_grant.get('grantee_principal'):
+ return True
+ if existing_grant.get('retiring_principal') != desired_grant.get('retiring_principal'):
+ return True
+ if set(existing_grant.get('operations', [])) != set(desired_grant.get('operations')):
+ return True
+ if existing_grant.get('constraints') != desired_grant.get('constraints'):
+ return True
+ return False
+
+
+def compare_grants(existing_grants, desired_grants, purge_grants=False):
+ existing_dict = dict((eg['name'], eg) for eg in existing_grants)
+ desired_dict = dict((dg['name'], dg) for dg in desired_grants)
+ to_add_keys = set(desired_dict.keys()) - set(existing_dict.keys())
+ if purge_grants:
+ to_remove_keys = set(existing_dict.keys()) - set(desired_dict.keys())
+ else:
+ to_remove_keys = set()
+ to_change_candidates = set(existing_dict.keys()) & set(desired_dict.keys())
+ for candidate in to_change_candidates:
+ if different_grant(existing_dict[candidate], desired_dict[candidate]):
+ to_add_keys.add(candidate)
+ to_remove_keys.add(candidate)
+
+ to_add = []
+ to_remove = []
+ for key in to_add_keys:
+ grant = desired_dict[key]
+ to_add.append(grant)
+ for key in to_remove_keys:
+ grant = existing_dict[key]
+ to_remove.append(grant)
+ return to_add, to_remove
+
+
+def start_key_deletion(connection, module, key_metadata):
+ if key_metadata['KeyState'] == 'PendingDeletion':
+ return False
+
+ if module.check_mode:
+ return True
+
+ try:
+ connection.schedule_key_deletion(KeyId=key_metadata['Arn'])
+ return True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to schedule key for deletion")
+
+
+def cancel_key_deletion(connection, module, key):
+ key_id = key['key_arn']
+ if key['key_state'] != 'PendingDeletion':
+ return False
+
+ if module.check_mode:
+ return True
+
+ try:
+ connection.cancel_key_deletion(KeyId=key_id)
+ # key is disabled after deletion cancellation
+ # set this so that ensure_enabled_disabled works correctly
+ key['key_state'] = 'Disabled'
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to cancel key deletion")
+
+ return True
+
+
+def ensure_enabled_disabled(connection, module, key, enabled):
+ desired_state = 'Enabled'
+ if not enabled:
+ desired_state = 'Disabled'
+
+ if key['key_state'] == desired_state:
+ return False
+
+ key_id = key['key_arn']
+ if not module.check_mode:
+ if enabled:
+ try:
+ connection.enable_key(KeyId=key_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to enable key")
+ else:
+ try:
+ connection.disable_key(KeyId=key_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to disable key")
+
+ return True
+
+
+def update_alias(connection, module, key, alias):
+ alias = canonicalize_alias_name(alias)
+
+ if alias is None:
+ return False
+
+ key_id = key['key_arn']
+ aliases = get_kms_aliases_with_backoff(connection)['Aliases']
+ # We will only add new aliases, not rename existing ones
+ if alias in [_alias['AliasName'] for _alias in aliases]:
+ return False
+
+ if not module.check_mode:
+ try:
+ connection.create_alias(TargetKeyId=key_id, AliasName=alias)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed create key alias")
+
+ return True
+
+
+def update_description(connection, module, key, description):
+ if description is None:
+ return False
+ if key['description'] == description:
+ return False
+
+ key_id = key['key_arn']
+ if not module.check_mode:
+ try:
+ connection.update_key_description(KeyId=key_id, Description=description)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to update key description")
+
+ return True
+
+
+def update_tags(connection, module, key, desired_tags, purge_tags):
+ # purge_tags needs to be explicitly set, so an empty tags list means remove
+ # all tags
+
+ to_add, to_remove = compare_aws_tags(key['tags'], desired_tags, purge_tags)
+ if not (bool(to_add) or bool(to_remove)):
+ return False
+
+ key_id = key['key_arn']
+ if not module.check_mode:
+ if to_remove:
+ try:
+ connection.untag_resource(KeyId=key_id, TagKeys=to_remove)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to remove tag")
+ if to_add:
+ try:
+ tags = ansible_dict_to_boto3_tag_list(module.params['tags'], tag_name_key_name='TagKey', tag_value_key_name='TagValue')
+ connection.tag_resource(KeyId=key_id, Tags=tags)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to add tag to key")
+
+ return True
+
+
+def update_policy(connection, module, key, policy):
+ if policy is None:
+ return False
+ try:
+ new_policy = json.loads(policy)
+ except ValueError as e:
+ module.fail_json_aws(e, msg="Unable to parse new policy as JSON")
+
+ key_id = key['key_arn']
+ try:
+ keyret = connection.get_key_policy(KeyId=key_id, PolicyName='default')
+ original_policy = json.loads(keyret['Policy'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError):
+ # If we can't fetch the current policy assume we're making a change
+ # Could occur if we have PutKeyPolicy without GetKeyPolicy
+ original_policy = {}
+
+ if not compare_policies(original_policy, new_policy):
+ return False
+
+ if not module.check_mode:
+ try:
+ connection.put_key_policy(KeyId=key_id, PolicyName='default', Policy=policy)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to update key policy")
+
+ return True
+
+
+def update_key_rotation(connection, module, key, enable_key_rotation):
+ if enable_key_rotation is None:
+ return False
+ key_id = key['key_arn']
+ current_rotation_status = connection.get_key_rotation_status(KeyId=key_id)
+ if current_rotation_status.get('KeyRotationEnabled') == enable_key_rotation:
+ return False
+
+ if enable_key_rotation:
+ connection.enable_key_rotation(KeyId=key_id)
+ else:
+ connection.disable_key_rotation(KeyId=key_id)
+ return True
+
+
+def update_grants(connection, module, key, desired_grants, purge_grants):
+ existing_grants = key['grants']
+
+ to_add, to_remove = compare_grants(existing_grants, desired_grants, purge_grants)
+ if not (bool(to_add) or bool(to_remove)):
+ return False
+
+ key_id = key['key_arn']
+ if not module.check_mode:
+ for grant in to_remove:
+ try:
+ connection.retire_grant(KeyId=key_id, GrantId=grant['grant_id'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to retire grant")
+ for grant in to_add:
+ grant_params = convert_grant_params(grant, key)
+ try:
+ connection.create_grant(**grant_params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to create grant")
+
+ return True
+
+
+def update_key(connection, module, key):
+ changed = False
+
+ changed |= cancel_key_deletion(connection, module, key)
+ changed |= ensure_enabled_disabled(connection, module, key, module.params['enabled'])
+ changed |= update_alias(connection, module, key, module.params['alias'])
+ changed |= update_description(connection, module, key, module.params['description'])
+ changed |= update_tags(connection, module, key, module.params['tags'], module.params.get('purge_tags'))
+ changed |= update_policy(connection, module, key, module.params.get('policy'))
+ changed |= update_grants(connection, module, key, module.params.get('grants'), module.params.get('purge_grants'))
+ changed |= update_key_rotation(connection, module, key, module.params.get('enable_key_rotation'))
+
+ # make results consistent with kms_facts before returning
+ result = get_key_details(connection, module, key['key_arn'])
+ result['changed'] = changed
+ return result
+
+
+def create_key(connection, module):
+ params = dict(BypassPolicyLockoutSafetyCheck=False,
+ Tags=ansible_dict_to_boto3_tag_list(module.params['tags'], tag_name_key_name='TagKey', tag_value_key_name='TagValue'),
+ KeyUsage='ENCRYPT_DECRYPT',
+ Origin='AWS_KMS')
+
+ if module.check_mode:
+ return {'changed': True}
+
+ if module.params.get('description'):
+ params['Description'] = module.params['description']
+ if module.params.get('policy'):
+ params['Policy'] = module.params['policy']
+
+ try:
+ result = connection.create_key(**params)['KeyMetadata']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to create initial key")
+
+ key = get_key_details(connection, module, result['KeyId'])
+ update_alias(connection, module, key, module.params['alias'])
+ update_key_rotation(connection, module, key, module.params.get('enable_key_rotation'))
+
+ ensure_enabled_disabled(connection, module, key, module.params.get('enabled'))
+ update_grants(connection, module, key, module.params.get('grants'), False)
+
+ # make results consistent with kms_facts
+ result = get_key_details(connection, module, key['key_id'])
+ result['changed'] = True
+ return result
+
+
+def delete_key(connection, module, key_metadata):
+ changed = False
+
+ changed |= start_key_deletion(connection, module, key_metadata)
+
+ result = get_key_details(connection, module, key_metadata['Arn'])
+ result['changed'] = changed
+ return result
+
+
+def get_arn_from_role_name(iam, rolename):
+ ret = iam.get_role(RoleName=rolename)
+ if ret.get('Role') and ret['Role'].get('Arn'):
+ return ret['Role']['Arn']
+ raise Exception('could not find arn for name {0}.'.format(rolename))
+
+
+def _clean_statement_principals(statement, clean_invalid_entries):
+
+ # create Principal and 'AWS' so we can safely use them later.
+ if not isinstance(statement.get('Principal'), dict):
+ statement['Principal'] = dict()
+
+ # If we have a single AWS Principal, ensure we still have a list (to manipulate)
+ if 'AWS' in statement['Principal'] and isinstance(statement['Principal']['AWS'], string_types):
+ statement['Principal']['AWS'] = [statement['Principal']['AWS']]
+ if not isinstance(statement['Principal'].get('AWS'), list):
+ statement['Principal']['AWS'] = list()
+
+ invalid_entries = [item for item in statement['Principal']['AWS'] if not item.startswith('arn:aws:iam::')]
+ valid_entries = [item for item in statement['Principal']['AWS'] if item.startswith('arn:aws:iam::')]
+
+ if bool(invalid_entries) and clean_invalid_entries:
+ statement['Principal']['AWS'] = valid_entries
+ return True
+
+ return False
+
+
+def _do_statement_grant(statement, role_arn, grant_types, mode, grant_type):
+
+ if mode == 'grant':
+ if grant_type in grant_types:
+ if role_arn not in statement['Principal']['AWS']: # needs to be added.
+ statement['Principal']['AWS'].append(role_arn)
+ return 'add'
+ elif role_arn in statement['Principal']['AWS']: # not one the places the role should be
+ statement['Principal']['AWS'].remove(role_arn)
+ return 'remove'
+ return None
+
+ if mode == 'deny' and role_arn in statement['Principal']['AWS']:
+ # we don't selectively deny. that's a grant with a
+ # smaller list. so deny=remove all of this arn.
+ statement['Principal']['AWS'].remove(role_arn)
+ return 'remove'
+ return None
+
+
+def do_policy_grant(module, kms, keyarn, role_arn, grant_types, mode='grant', dry_run=True, clean_invalid_entries=True):
+ ret = {}
+ policy = json.loads(get_key_policy_with_backoff(kms, keyarn, 'default')['Policy'])
+
+ changes_needed = {}
+ assert_policy_shape(module, policy)
+ had_invalid_entries = False
+ for statement in policy['Statement']:
+ # We already tested that these are the only types in the statements
+ for grant_type in statement_label:
+ # Are we on this grant type's statement?
+ if statement['Sid'] != statement_label[grant_type]:
+ continue
+
+ had_invalid_entries |= _clean_statement_principals(statement, clean_invalid_entries)
+ change = _do_statement_grant(statement, role_arn, grant_types, mode, grant_type)
+ if change:
+ changes_needed[grant_type] = change
+
+ ret['changes_needed'] = changes_needed
+ ret['had_invalid_entries'] = had_invalid_entries
+ ret['new_policy'] = policy
+ ret['changed'] = bool(changes_needed)
+
+ if dry_run or not ret['changed']:
+ return ret
+
+ try:
+ policy_json_string = json.dumps(policy)
+ kms.put_key_policy(KeyId=keyarn, PolicyName='default', Policy=policy_json_string)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not update key_policy', new_policy=policy_json_string)
+
+ return ret
+
+
+def assert_policy_shape(module, policy):
+ '''Since the policy seems a little, uh, fragile, make sure we know approximately what we're looking at.'''
+ errors = []
+ if policy['Version'] != "2012-10-17":
+ errors.append('Unknown version/date ({0}) of policy. Things are probably different than we assumed they were.'.format(policy['Version']))
+
+ found_statement_type = {}
+ for statement in policy['Statement']:
+ for label, sidlabel in statement_label.items():
+ if statement['Sid'] == sidlabel:
+ found_statement_type[label] = True
+
+ for statementtype in statement_label:
+ if not found_statement_type.get(statementtype):
+ errors.append('Policy is missing {0}.'.format(statementtype))
+
+ if errors:
+ module.fail_json(msg='Problems asserting policy shape. Cowardly refusing to modify it', errors=errors, policy=policy)
+
+
+def canonicalize_alias_name(alias):
+ if alias is None:
+ return None
+ if alias.startswith('alias/'):
+ return alias
+ return 'alias/' + alias
+
+
+def fetch_key_metadata(connection, module, key_id, alias):
+
+ alias = canonicalize_alias_name(module.params.get('alias'))
+
+ try:
+ # Fetch by key_id where possible
+ if key_id:
+ return get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata']
+ # Or try alias as a backup
+ return get_kms_metadata_with_backoff(connection, alias)['KeyMetadata']
+
+ except connection.exceptions.NotFoundException:
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, 'Failed to fetch key metadata.')
+
+
+def update_policy_grants(connection, module, key_metadata, mode):
+ iam = module.client('iam')
+ key_id = key_metadata['Arn']
+
+ if module.params.get('policy_role_name') and not module.params.get('policy_role_arn'):
+ module.params['policy_role_arn'] = get_arn_from_role_name(iam, module.params['policy_role_name'])
+ if not module.params.get('policy_role_arn'):
+ module.fail_json(msg='policy_role_arn or policy_role_name is required to {0}'.format(module.params['policy_mode']))
+
+ # check the grant types for 'grant' only.
+ if mode == 'grant':
+ for grant_type in module.params['policy_grant_types']:
+ if grant_type not in statement_label:
+ module.fail_json(msg='{0} is an unknown grant type.'.format(grant_type))
+
+ return do_policy_grant(module, connection,
+ key_id,
+ module.params['policy_role_arn'],
+ module.params['policy_grant_types'],
+ mode=mode,
+ dry_run=module.check_mode,
+ clean_invalid_entries=module.params['policy_clean_invalid_entries'])
+
+
+def main():
+ argument_spec = dict(
+ alias=dict(aliases=['key_alias']),
+ policy_mode=dict(aliases=['mode'], choices=['grant', 'deny'], default='grant'),
+ policy_role_name=dict(aliases=['role_name']),
+ policy_role_arn=dict(aliases=['role_arn']),
+ policy_grant_types=dict(aliases=['grant_types'], type='list', elements='str'),
+ policy_clean_invalid_entries=dict(aliases=['clean_invalid_entries'], type='bool', default=True),
+ key_id=dict(aliases=['key_arn']),
+ description=dict(),
+ enabled=dict(type='bool', default=True),
+ tags=dict(type='dict', default={}),
+ purge_tags=dict(type='bool', default=False),
+ grants=dict(type='list', default=[], elements='dict'),
+ policy=dict(type='json'),
+ purge_grants=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ enable_key_rotation=(dict(type='bool'))
+ )
+
+ module = AnsibleAWSModule(
+ supports_check_mode=True,
+ argument_spec=argument_spec,
+ required_one_of=[['alias', 'key_id']],
+ )
+
+ mode = module.params['policy_mode']
+
+ kms = module.client('kms')
+
+ key_metadata = fetch_key_metadata(kms, module, module.params.get('key_id'), module.params.get('alias'))
+ # We can't create keys with a specific ID, if we can't access the key we'll have to fail
+ if module.params.get('state') == 'present' and module.params.get('key_id') and not key_metadata:
+ module.fail_json(msg="Could not find key with id %s to update")
+
+ if module.params.get('policy_grant_types') or mode == 'deny':
+ module.deprecate('Managing the KMS IAM Policy via policy_mode and policy_grant_types is fragile'
+ ' and has been deprecated in favour of the policy option.', date='2021-12-01', collection_name='community.aws')
+ result = update_policy_grants(kms, module, key_metadata, mode)
+ module.exit_json(**result)
+
+ if module.params.get('state') == 'absent':
+ if key_metadata is None:
+ module.exit_json(changed=False)
+ result = delete_key(kms, module, key_metadata)
+ module.exit_json(**result)
+
+ if key_metadata:
+ key_details = get_key_details(kms, module, key_metadata['Arn'])
+ result = update_key(kms, module, key_details)
+ module.exit_json(**result)
+
+ result = create_key(kms, module)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_kms_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_kms_facts.py
new file mode 100644
index 00000000..235b7bc5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_kms_facts.py
@@ -0,0 +1,431 @@
+#!/usr/bin/python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_kms_info
+version_added: 1.0.0
+short_description: Gather information about AWS KMS keys
+description:
+ - Gather information about AWS KMS keys including tags and grants
+ - This module was called C(aws_kms_facts) before Ansible 2.9. The usage did not change.
+author: "Will Thames (@willthames)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ The filters aren't natively supported by boto3, but are supported to provide similar
+ functionality to other modules. Standard tag filters (C(tag-key), C(tag-value) and
+ C(tag:tagName)) are available, as are C(key-id) and C(alias)
+ type: dict
+ pending_deletion:
+ description: Whether to get full details (tags, grants etc.) of keys pending deletion
+ default: False
+ type: bool
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all KMS keys
+- community.aws.aws_kms_info:
+
+# Gather information about all keys with a Name tag
+- community.aws.aws_kms_info:
+ filters:
+ tag-key: Name
+
+# Gather information about all keys with a specific name
+- community.aws.aws_kms_info:
+ filters:
+ "tag:Name": Example
+'''
+
+RETURN = '''
+keys:
+ description: list of keys
+ type: complex
+ returned: always
+ contains:
+ key_id:
+ description: ID of key
+ type: str
+ returned: always
+ sample: abcd1234-abcd-1234-5678-ef1234567890
+ key_arn:
+ description: ARN of key
+ type: str
+ returned: always
+ sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890
+ key_state:
+ description: The state of the key
+ type: str
+ returned: always
+ sample: PendingDeletion
+ key_usage:
+ description: The cryptographic operations for which you can use the key.
+ type: str
+ returned: always
+ sample: ENCRYPT_DECRYPT
+ origin:
+ description:
+ The source of the key's key material. When this value is C(AWS_KMS),
+ AWS KMS created the key material. When this value is C(EXTERNAL), the
+ key material was imported or the CMK lacks key material.
+ type: str
+ returned: always
+ sample: AWS_KMS
+ aws_account_id:
+ description: The AWS Account ID that the key belongs to
+ type: str
+ returned: always
+ sample: 1234567890123
+ creation_date:
+ description: Date of creation of the key
+ type: str
+ returned: always
+ sample: "2017-04-18T15:12:08.551000+10:00"
+ description:
+ description: Description of the key
+ type: str
+ returned: always
+ sample: "My Key for Protecting important stuff"
+ enabled:
+ description: Whether the key is enabled. True if C(KeyState) is true.
+ type: str
+ returned: always
+ sample: false
+ enable_key_rotation:
+ description: Whether the automatically key rotation every year is enabled. Returns None if key rotation status can't be determined.
+ type: bool
+ returned: always
+ sample: false
+ aliases:
+ description: list of aliases associated with the key
+ type: list
+ returned: always
+ sample:
+ - aws/acm
+ - aws/ebs
+ tags:
+ description: dictionary of tags applied to the key. Empty when access is denied even if there are tags.
+ type: dict
+ returned: always
+ sample:
+ Name: myKey
+ Purpose: protecting_stuff
+ policies:
+ description: list of policy documents for the keys. Empty when access is denied even if there are policies.
+ type: list
+ returned: always
+ sample:
+ Version: "2012-10-17"
+ Id: "auto-ebs-2"
+ Statement:
+ - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS"
+ Effect: "Allow"
+ Principal:
+ AWS: "*"
+ Action:
+ - "kms:Encrypt"
+ - "kms:Decrypt"
+ - "kms:ReEncrypt*"
+ - "kms:GenerateDataKey*"
+ - "kms:CreateGrant"
+ - "kms:DescribeKey"
+ Resource: "*"
+ Condition:
+ StringEquals:
+ kms:CallerAccount: "111111111111"
+ kms:ViaService: "ec2.ap-southeast-2.amazonaws.com"
+ - Sid: "Allow direct access to key metadata to the account"
+ Effect: "Allow"
+ Principal:
+ AWS: "arn:aws:iam::111111111111:root"
+ Action:
+ - "kms:Describe*"
+ - "kms:Get*"
+ - "kms:List*"
+ - "kms:RevokeGrant"
+ Resource: "*"
+ grants:
+ description: list of grants associated with a key
+ type: complex
+ returned: always
+ contains:
+ constraints:
+ description: Constraints on the encryption context that the grant allows.
+ See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html) for further details
+ type: dict
+ returned: always
+ sample:
+ encryption_context_equals:
+ "aws:lambda:_function_arn": "arn:aws:lambda:ap-southeast-2:012345678912:function:xyz"
+ creation_date:
+ description: Date of creation of the grant
+ type: str
+ returned: always
+ sample: "2017-04-18T15:12:08+10:00"
+ grant_id:
+ description: The unique ID for the grant
+ type: str
+ returned: always
+ sample: abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234
+ grantee_principal:
+ description: The principal that receives the grant's permissions
+ type: str
+ returned: always
+ sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz
+ issuing_account:
+ description: The AWS account under which the grant was issued
+ type: str
+ returned: always
+ sample: arn:aws:iam::01234567890:root
+ key_id:
+ description: The key ARN to which the grant applies.
+ type: str
+ returned: always
+ sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890
+ name:
+ description: The friendly name that identifies the grant
+ type: str
+ returned: always
+ sample: xyz
+ operations:
+ description: The list of operations permitted by the grant
+ type: list
+ returned: always
+ sample:
+ - Decrypt
+ - RetireGrant
+ retiring_principal:
+ description: The principal that can retire the grant
+ type: str
+ returned: always
+ sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz
+'''
+
+
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+# Caching lookup for aliases
+_aliases = dict()
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_kms_keys_with_backoff(connection):
+ paginator = connection.get_paginator('list_keys')
+ return paginator.paginate().build_full_result()
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_kms_aliases_with_backoff(connection):
+ paginator = connection.get_paginator('list_aliases')
+ return paginator.paginate().build_full_result()
+
+
+def get_kms_aliases_lookup(connection):
+ if not _aliases:
+ for alias in get_kms_aliases_with_backoff(connection)['Aliases']:
+ # Not all aliases are actually associated with a key
+ if 'TargetKeyId' in alias:
+ # strip off leading 'alias/' and add it to key's aliases
+ if alias['TargetKeyId'] in _aliases:
+ _aliases[alias['TargetKeyId']].append(alias['AliasName'][6:])
+ else:
+ _aliases[alias['TargetKeyId']] = [alias['AliasName'][6:]]
+ return _aliases
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_kms_tags_with_backoff(connection, key_id, **kwargs):
+ return connection.list_resource_tags(KeyId=key_id, **kwargs)
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_kms_grants_with_backoff(connection, key_id, **kwargs):
+ params = dict(KeyId=key_id)
+ if kwargs.get('tokens'):
+ params['GrantTokens'] = kwargs['tokens']
+ paginator = connection.get_paginator('list_grants')
+ return paginator.paginate(**params).build_full_result()
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_kms_metadata_with_backoff(connection, key_id):
+ return connection.describe_key(KeyId=key_id)
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def list_key_policies_with_backoff(connection, key_id):
+ paginator = connection.get_paginator('list_key_policies')
+ return paginator.paginate(KeyId=key_id).build_full_result()
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_key_policy_with_backoff(connection, key_id, policy_name):
+ return connection.get_key_policy(KeyId=key_id, PolicyName=policy_name)
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_enable_key_rotation_with_backoff(connection, key_id):
+ try:
+ current_rotation_status = connection.get_key_rotation_status(KeyId=key_id)
+ except is_boto3_error_code('AccessDeniedException') as e:
+ return None
+
+ return current_rotation_status.get('KeyRotationEnabled')
+
+
+def get_kms_tags(connection, module, key_id):
+ # Handle pagination here as list_resource_tags does not have
+ # a paginator
+ kwargs = {}
+ tags = []
+ more = True
+ while more:
+ try:
+ tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs)
+ tags.extend(tag_response['Tags'])
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] != 'AccessDeniedException':
+ module.fail_json(msg="Failed to obtain key tags",
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ else:
+ tag_response = {}
+ if tag_response.get('NextMarker'):
+ kwargs['Marker'] = tag_response['NextMarker']
+ else:
+ more = False
+ return tags
+
+
+def get_kms_policies(connection, module, key_id):
+ try:
+ policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames']
+ return [get_key_policy_with_backoff(connection, key_id, policy)['Policy'] for
+ policy in policies]
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] != 'AccessDeniedException':
+ module.fail_json(msg="Failed to obtain key policies",
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ else:
+ return []
+
+
+def key_matches_filter(key, filtr):
+ if filtr[0] == 'key-id':
+ return filtr[1] == key['key_id']
+ if filtr[0] == 'tag-key':
+ return filtr[1] in key['tags']
+ if filtr[0] == 'tag-value':
+ return filtr[1] in key['tags'].values()
+ if filtr[0] == 'alias':
+ return filtr[1] in key['aliases']
+ if filtr[0].startswith('tag:'):
+ return key['tags'][filtr[0][4:]] == filtr[1]
+
+
+def key_matches_filters(key, filters):
+ if not filters:
+ return True
+ else:
+ return all([key_matches_filter(key, filtr) for filtr in filters.items()])
+
+
+def get_key_details(connection, module, key_id, tokens=None):
+ if not tokens:
+ tokens = []
+ try:
+ result = get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata']
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Failed to obtain key metadata",
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ result['KeyArn'] = result.pop('Arn')
+
+ try:
+ aliases = get_kms_aliases_lookup(connection)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Failed to obtain aliases",
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ result['aliases'] = aliases.get(result['KeyId'], [])
+
+ if result['Origin'] == 'AWS_KMS':
+ result['enable_key_rotation'] = get_enable_key_rotation_with_backoff(connection, key_id)
+ else:
+ result['enable_key_rotation'] = None
+
+ if module.params.get('pending_deletion'):
+ return camel_dict_to_snake_dict(result)
+
+ try:
+ result['grants'] = get_kms_grants_with_backoff(connection, key_id, tokens=tokens)['Grants']
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Failed to obtain key grants",
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ tags = get_kms_tags(connection, module, key_id)
+
+ result = camel_dict_to_snake_dict(result)
+ result['tags'] = boto3_tag_list_to_ansible_dict(tags, 'TagKey', 'TagValue')
+ result['policies'] = get_kms_policies(connection, module, key_id)
+ return result
+
+
+def get_kms_info(connection, module):
+ try:
+ keys = get_kms_keys_with_backoff(connection)['Keys']
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Failed to obtain keys",
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ return [get_key_details(connection, module, key['KeyId']) for key in keys]
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(type='dict'),
+ pending_deletion=dict(type='bool', default=False),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+ if module._name == 'aws_kms_facts':
+ module.deprecate("The 'aws_kms_facts' module has been renamed to 'aws_kms_info'", date='2021-12-01', collection_name='community.aws')
+
+ try:
+ connection = module.client('kms')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ all_keys = get_kms_info(connection, module)
+ module.exit_json(keys=[key for key in all_keys if key_matches_filters(key, module.params['filters'])])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_kms_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_kms_info.py
new file mode 100644
index 00000000..235b7bc5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_kms_info.py
@@ -0,0 +1,431 @@
+#!/usr/bin/python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_kms_info
+version_added: 1.0.0
+short_description: Gather information about AWS KMS keys
+description:
+ - Gather information about AWS KMS keys including tags and grants
+ - This module was called C(aws_kms_facts) before Ansible 2.9. The usage did not change.
+author: "Will Thames (@willthames)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ The filters aren't natively supported by boto3, but are supported to provide similar
+ functionality to other modules. Standard tag filters (C(tag-key), C(tag-value) and
+ C(tag:tagName)) are available, as are C(key-id) and C(alias)
+ type: dict
+ pending_deletion:
+ description: Whether to get full details (tags, grants etc.) of keys pending deletion
+ default: False
+ type: bool
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all KMS keys
+- community.aws.aws_kms_info:
+
+# Gather information about all keys with a Name tag
+- community.aws.aws_kms_info:
+ filters:
+ tag-key: Name
+
+# Gather information about all keys with a specific name
+- community.aws.aws_kms_info:
+ filters:
+ "tag:Name": Example
+'''
+
+RETURN = '''
+keys:
+ description: list of keys
+ type: complex
+ returned: always
+ contains:
+ key_id:
+ description: ID of key
+ type: str
+ returned: always
+ sample: abcd1234-abcd-1234-5678-ef1234567890
+ key_arn:
+ description: ARN of key
+ type: str
+ returned: always
+ sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890
+ key_state:
+ description: The state of the key
+ type: str
+ returned: always
+ sample: PendingDeletion
+ key_usage:
+ description: The cryptographic operations for which you can use the key.
+ type: str
+ returned: always
+ sample: ENCRYPT_DECRYPT
+ origin:
+ description:
+ The source of the key's key material. When this value is C(AWS_KMS),
+ AWS KMS created the key material. When this value is C(EXTERNAL), the
+ key material was imported or the CMK lacks key material.
+ type: str
+ returned: always
+ sample: AWS_KMS
+ aws_account_id:
+ description: The AWS Account ID that the key belongs to
+ type: str
+ returned: always
+ sample: 1234567890123
+ creation_date:
+ description: Date of creation of the key
+ type: str
+ returned: always
+ sample: "2017-04-18T15:12:08.551000+10:00"
+ description:
+ description: Description of the key
+ type: str
+ returned: always
+ sample: "My Key for Protecting important stuff"
+ enabled:
+ description: Whether the key is enabled. True if C(KeyState) is true.
+ type: str
+ returned: always
+ sample: false
+ enable_key_rotation:
+ description: Whether the automatically key rotation every year is enabled. Returns None if key rotation status can't be determined.
+ type: bool
+ returned: always
+ sample: false
+ aliases:
+ description: list of aliases associated with the key
+ type: list
+ returned: always
+ sample:
+ - aws/acm
+ - aws/ebs
+ tags:
+ description: dictionary of tags applied to the key. Empty when access is denied even if there are tags.
+ type: dict
+ returned: always
+ sample:
+ Name: myKey
+ Purpose: protecting_stuff
+ policies:
+ description: list of policy documents for the keys. Empty when access is denied even if there are policies.
+ type: list
+ returned: always
+ sample:
+ Version: "2012-10-17"
+ Id: "auto-ebs-2"
+ Statement:
+ - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS"
+ Effect: "Allow"
+ Principal:
+ AWS: "*"
+ Action:
+ - "kms:Encrypt"
+ - "kms:Decrypt"
+ - "kms:ReEncrypt*"
+ - "kms:GenerateDataKey*"
+ - "kms:CreateGrant"
+ - "kms:DescribeKey"
+ Resource: "*"
+ Condition:
+ StringEquals:
+ kms:CallerAccount: "111111111111"
+ kms:ViaService: "ec2.ap-southeast-2.amazonaws.com"
+ - Sid: "Allow direct access to key metadata to the account"
+ Effect: "Allow"
+ Principal:
+ AWS: "arn:aws:iam::111111111111:root"
+ Action:
+ - "kms:Describe*"
+ - "kms:Get*"
+ - "kms:List*"
+ - "kms:RevokeGrant"
+ Resource: "*"
+ grants:
+ description: list of grants associated with a key
+ type: complex
+ returned: always
+ contains:
+ constraints:
+ description: Constraints on the encryption context that the grant allows.
+ See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html) for further details
+ type: dict
+ returned: always
+ sample:
+ encryption_context_equals:
+ "aws:lambda:_function_arn": "arn:aws:lambda:ap-southeast-2:012345678912:function:xyz"
+ creation_date:
+ description: Date of creation of the grant
+ type: str
+ returned: always
+ sample: "2017-04-18T15:12:08+10:00"
+ grant_id:
+ description: The unique ID for the grant
+ type: str
+ returned: always
+ sample: abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234
+ grantee_principal:
+ description: The principal that receives the grant's permissions
+ type: str
+ returned: always
+ sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz
+ issuing_account:
+ description: The AWS account under which the grant was issued
+ type: str
+ returned: always
+ sample: arn:aws:iam::01234567890:root
+ key_id:
+ description: The key ARN to which the grant applies.
+ type: str
+ returned: always
+ sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890
+ name:
+ description: The friendly name that identifies the grant
+ type: str
+ returned: always
+ sample: xyz
+ operations:
+ description: The list of operations permitted by the grant
+ type: list
+ returned: always
+ sample:
+ - Decrypt
+ - RetireGrant
+ retiring_principal:
+ description: The principal that can retire the grant
+ type: str
+ returned: always
+ sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz
+'''
+
+
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+# Caching lookup for aliases
+_aliases = dict()
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_kms_keys_with_backoff(connection):
+ paginator = connection.get_paginator('list_keys')
+ return paginator.paginate().build_full_result()
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_kms_aliases_with_backoff(connection):
+ paginator = connection.get_paginator('list_aliases')
+ return paginator.paginate().build_full_result()
+
+
+def get_kms_aliases_lookup(connection):
+ if not _aliases:
+ for alias in get_kms_aliases_with_backoff(connection)['Aliases']:
+ # Not all aliases are actually associated with a key
+ if 'TargetKeyId' in alias:
+ # strip off leading 'alias/' and add it to key's aliases
+ if alias['TargetKeyId'] in _aliases:
+ _aliases[alias['TargetKeyId']].append(alias['AliasName'][6:])
+ else:
+ _aliases[alias['TargetKeyId']] = [alias['AliasName'][6:]]
+ return _aliases
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_kms_tags_with_backoff(connection, key_id, **kwargs):
+ return connection.list_resource_tags(KeyId=key_id, **kwargs)
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_kms_grants_with_backoff(connection, key_id, **kwargs):
+ params = dict(KeyId=key_id)
+ if kwargs.get('tokens'):
+ params['GrantTokens'] = kwargs['tokens']
+ paginator = connection.get_paginator('list_grants')
+ return paginator.paginate(**params).build_full_result()
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_kms_metadata_with_backoff(connection, key_id):
+ return connection.describe_key(KeyId=key_id)
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def list_key_policies_with_backoff(connection, key_id):
+ paginator = connection.get_paginator('list_key_policies')
+ return paginator.paginate(KeyId=key_id).build_full_result()
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_key_policy_with_backoff(connection, key_id, policy_name):
+ return connection.get_key_policy(KeyId=key_id, PolicyName=policy_name)
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_enable_key_rotation_with_backoff(connection, key_id):
+ try:
+ current_rotation_status = connection.get_key_rotation_status(KeyId=key_id)
+ except is_boto3_error_code('AccessDeniedException') as e:
+ return None
+
+ return current_rotation_status.get('KeyRotationEnabled')
+
+
+def get_kms_tags(connection, module, key_id):
+ # Handle pagination here as list_resource_tags does not have
+ # a paginator
+ kwargs = {}
+ tags = []
+ more = True
+ while more:
+ try:
+ tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs)
+ tags.extend(tag_response['Tags'])
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] != 'AccessDeniedException':
+ module.fail_json(msg="Failed to obtain key tags",
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ else:
+ tag_response = {}
+ if tag_response.get('NextMarker'):
+ kwargs['Marker'] = tag_response['NextMarker']
+ else:
+ more = False
+ return tags
+
+
+def get_kms_policies(connection, module, key_id):
+ try:
+ policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames']
+ return [get_key_policy_with_backoff(connection, key_id, policy)['Policy'] for
+ policy in policies]
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] != 'AccessDeniedException':
+ module.fail_json(msg="Failed to obtain key policies",
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ else:
+ return []
+
+
+def key_matches_filter(key, filtr):
+ if filtr[0] == 'key-id':
+ return filtr[1] == key['key_id']
+ if filtr[0] == 'tag-key':
+ return filtr[1] in key['tags']
+ if filtr[0] == 'tag-value':
+ return filtr[1] in key['tags'].values()
+ if filtr[0] == 'alias':
+ return filtr[1] in key['aliases']
+ if filtr[0].startswith('tag:'):
+ return key['tags'][filtr[0][4:]] == filtr[1]
+
+
+def key_matches_filters(key, filters):
+ if not filters:
+ return True
+ else:
+ return all([key_matches_filter(key, filtr) for filtr in filters.items()])
+
+
+def get_key_details(connection, module, key_id, tokens=None):
+ if not tokens:
+ tokens = []
+ try:
+ result = get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata']
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Failed to obtain key metadata",
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ result['KeyArn'] = result.pop('Arn')
+
+ try:
+ aliases = get_kms_aliases_lookup(connection)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Failed to obtain aliases",
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ result['aliases'] = aliases.get(result['KeyId'], [])
+
+ if result['Origin'] == 'AWS_KMS':
+ result['enable_key_rotation'] = get_enable_key_rotation_with_backoff(connection, key_id)
+ else:
+ result['enable_key_rotation'] = None
+
+ if module.params.get('pending_deletion'):
+ return camel_dict_to_snake_dict(result)
+
+ try:
+ result['grants'] = get_kms_grants_with_backoff(connection, key_id, tokens=tokens)['Grants']
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Failed to obtain key grants",
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ tags = get_kms_tags(connection, module, key_id)
+
+ result = camel_dict_to_snake_dict(result)
+ result['tags'] = boto3_tag_list_to_ansible_dict(tags, 'TagKey', 'TagValue')
+ result['policies'] = get_kms_policies(connection, module, key_id)
+ return result
+
+
+def get_kms_info(connection, module):
+ try:
+ keys = get_kms_keys_with_backoff(connection)['Keys']
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Failed to obtain keys",
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ return [get_key_details(connection, module, key['KeyId']) for key in keys]
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(type='dict'),
+ pending_deletion=dict(type='bool', default=False),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+ if module._name == 'aws_kms_facts':
+ module.deprecate("The 'aws_kms_facts' module has been renamed to 'aws_kms_info'", date='2021-12-01', collection_name='community.aws')
+
+ try:
+ connection = module.client('kms')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ all_keys = get_kms_info(connection, module)
+ module.exit_json(keys=[key for key in all_keys if key_matches_filters(key, module.params['filters'])])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_region_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_region_facts.py
new file mode 100644
index 00000000..d0b74e3f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_region_facts.py
@@ -0,0 +1,98 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: aws_region_info
+short_description: Gather information about AWS regions.
+version_added: 1.0.0
+description:
+ - Gather information about AWS regions.
+ - This module was called C(aws_region_facts) before Ansible 2.9. The usage did not change.
+author: 'Henrique Rodrigues (@Sodki)'
+options:
+ filters:
+ description:
+ - A dict of filters to apply.
+ - Each dict item consists of a filter key and a filter value.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRegions.html) for possible filters.
+ - Filter names and values are case sensitive.
+ - You can use underscores instead of dashes (-) in the filter keys.
+ - Filter keys with underscores will take precedence in case of conflict.
+ default: {}
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements: [botocore, boto3]
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all regions
+- community.aws.aws_region_info:
+
+# Gather information about a single region
+- community.aws.aws_region_info:
+ filters:
+ region-name: eu-west-1
+'''
+
+RETURN = '''
+regions:
+ returned: on success
+ description: >
+ Regions that match the provided filters. Each element consists of a dict with all the information related
+ to that region.
+ type: list
+ sample: "[{
+ 'endpoint': 'ec2.us-west-1.amazonaws.com',
+ 'region_name': 'us-west-1'
+ }]"
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(default={}, type='dict')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'aws_region_facts':
+ module.deprecate("The 'aws_region_facts' module has been renamed to 'aws_region_info'", date='2021-12-01', collection_name='community.aws')
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ # Replace filter key underscores with dashes, for compatibility
+ sanitized_filters = dict(module.params.get('filters'))
+ for k in module.params.get('filters').keys():
+ if "_" in k:
+ sanitized_filters[k.replace('_', '-')] = sanitized_filters[k]
+ del sanitized_filters[k]
+
+ try:
+ regions = connection.describe_regions(
+ Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)
+ )
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to describe regions.")
+
+ module.exit_json(regions=[camel_dict_to_snake_dict(r) for r in regions['Regions']])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_region_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_region_info.py
new file mode 100644
index 00000000..d0b74e3f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_region_info.py
@@ -0,0 +1,98 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: aws_region_info
+short_description: Gather information about AWS regions.
+version_added: 1.0.0
+description:
+ - Gather information about AWS regions.
+ - This module was called C(aws_region_facts) before Ansible 2.9. The usage did not change.
+author: 'Henrique Rodrigues (@Sodki)'
+options:
+ filters:
+ description:
+ - A dict of filters to apply.
+ - Each dict item consists of a filter key and a filter value.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRegions.html) for possible filters.
+ - Filter names and values are case sensitive.
+ - You can use underscores instead of dashes (-) in the filter keys.
+ - Filter keys with underscores will take precedence in case of conflict.
+ default: {}
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements: [botocore, boto3]
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all regions
+- community.aws.aws_region_info:
+
+# Gather information about a single region
+- community.aws.aws_region_info:
+ filters:
+ region-name: eu-west-1
+'''
+
+RETURN = '''
+regions:
+ returned: on success
+ description: >
+ Regions that match the provided filters. Each element consists of a dict with all the information related
+ to that region.
+ type: list
+ sample: "[{
+ 'endpoint': 'ec2.us-west-1.amazonaws.com',
+ 'region_name': 'us-west-1'
+ }]"
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(default={}, type='dict')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'aws_region_facts':
+ module.deprecate("The 'aws_region_facts' module has been renamed to 'aws_region_info'", date='2021-12-01', collection_name='community.aws')
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ # Replace filter key underscores with dashes, for compatibility
+ sanitized_filters = dict(module.params.get('filters'))
+ for k in module.params.get('filters').keys():
+ if "_" in k:
+ sanitized_filters[k.replace('_', '-')] = sanitized_filters[k]
+ del sanitized_filters[k]
+
+ try:
+ regions = connection.describe_regions(
+ Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)
+ )
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to describe regions.")
+
+ module.exit_json(regions=[camel_dict_to_snake_dict(r) for r in regions['Regions']])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_s3_bucket_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_s3_bucket_facts.py
new file mode 100644
index 00000000..cd8b81f3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_s3_bucket_facts.py
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_s3_bucket_info
+version_added: 1.0.0
+short_description: Lists S3 buckets in AWS
+requirements:
+ - boto3 >= 1.4.4
+ - python >= 2.6
+description:
+ - Lists S3 buckets in AWS
+ - This module was called C(aws_s3_bucket_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.aws.aws_s3_bucket_info) module no longer returns C(ansible_facts)!
+author: "Gerben Geijteman (@hyperized)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Note: Only AWS S3 is currently supported
+
+# Lists all s3 buckets
+- community.aws.aws_s3_bucket_info:
+ register: result
+
+- name: List buckets
+ ansible.builtin.debug:
+ msg: "{{ result['buckets'] }}"
+'''
+
+RETURN = '''
+buckets:
+ description: "List of buckets"
+ returned: always
+ sample:
+ - creation_date: '2017-07-06 15:05:12 +00:00'
+ name: my_bucket
+ type: list
+'''
+
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def get_bucket_list(module, connection):
+ """
+ Return result of list_buckets json encoded
+ :param module:
+ :param connection:
+ :return:
+ """
+ try:
+ buckets = camel_dict_to_snake_dict(connection.list_buckets())['buckets']
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+
+ return buckets
+
+
+def main():
+ """
+ Get list of S3 buckets
+ :return:
+ """
+
+ # Ensure we have an empty dict
+ result = {}
+
+ # Including ec2 argument spec
+ module = AnsibleAWSModule(argument_spec={}, supports_check_mode=True)
+ is_old_facts = module._name == 'aws_s3_bucket_facts'
+ if is_old_facts:
+ module.deprecate("The 'aws_s3_bucket_facts' module has been renamed to 'aws_s3_bucket_info', "
+ "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws')
+
+ # Set up connection
+ try:
+ connection = module.client('s3')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ # Gather results
+ result['buckets'] = get_bucket_list(module, connection)
+
+ # Send exit
+ if is_old_facts:
+ module.exit_json(msg="Retrieved s3 facts.", ansible_facts=result)
+ else:
+ module.exit_json(msg="Retrieved s3 info.", **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_s3_bucket_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_s3_bucket_info.py
new file mode 100644
index 00000000..cd8b81f3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_s3_bucket_info.py
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_s3_bucket_info
+version_added: 1.0.0
+short_description: Lists S3 buckets in AWS
+requirements:
+ - boto3 >= 1.4.4
+ - python >= 2.6
+description:
+ - Lists S3 buckets in AWS
+ - This module was called C(aws_s3_bucket_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.aws.aws_s3_bucket_info) module no longer returns C(ansible_facts)!
+author: "Gerben Geijteman (@hyperized)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Note: Only AWS S3 is currently supported
+
+# Lists all s3 buckets
+- community.aws.aws_s3_bucket_info:
+ register: result
+
+- name: List buckets
+ ansible.builtin.debug:
+ msg: "{{ result['buckets'] }}"
+'''
+
+RETURN = '''
+buckets:
+ description: "List of buckets"
+ returned: always
+ sample:
+ - creation_date: '2017-07-06 15:05:12 +00:00'
+ name: my_bucket
+ type: list
+'''
+
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def get_bucket_list(module, connection):
+ """
+ Return result of list_buckets json encoded
+ :param module:
+ :param connection:
+ :return:
+ """
+ try:
+ buckets = camel_dict_to_snake_dict(connection.list_buckets())['buckets']
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+
+ return buckets
+
+
+def main():
+ """
+ Get list of S3 buckets
+ :return:
+ """
+
+ # Ensure we have an empty dict
+ result = {}
+
+ # Including ec2 argument spec
+ module = AnsibleAWSModule(argument_spec={}, supports_check_mode=True)
+ is_old_facts = module._name == 'aws_s3_bucket_facts'
+ if is_old_facts:
+ module.deprecate("The 'aws_s3_bucket_facts' module has been renamed to 'aws_s3_bucket_info', "
+ "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws')
+
+ # Set up connection
+ try:
+ connection = module.client('s3')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ # Gather results
+ result['buckets'] = get_bucket_list(module, connection)
+
+ # Send exit
+ if is_old_facts:
+ module.exit_json(msg="Retrieved s3 facts.", ansible_facts=result)
+ else:
+ module.exit_json(msg="Retrieved s3 info.", **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_s3_cors.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_s3_cors.py
new file mode 100644
index 00000000..820530dc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_s3_cors.py
@@ -0,0 +1,167 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: aws_s3_cors
+version_added: 1.0.0
+short_description: Manage CORS for S3 buckets in AWS
+description:
+ - Manage CORS for S3 buckets in AWS
+author: "Oyvind Saltvik (@fivethreeo)"
+options:
+ name:
+ description:
+ - Name of the s3 bucket
+ required: true
+ type: str
+ rules:
+ description:
+ - Cors rules to put on the s3 bucket
+ type: list
+ elements: str
+ state:
+ description:
+ - Create or remove cors on the s3 bucket
+ required: true
+ choices: [ 'present', 'absent' ]
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create a simple cors for s3 bucket
+- community.aws.aws_s3_cors:
+ name: mys3bucket
+ state: present
+ rules:
+ - allowed_origins:
+ - http://www.example.com/
+ allowed_methods:
+ - GET
+ - POST
+ allowed_headers:
+ - Authorization
+ expose_headers:
+ - x-amz-server-side-encryption
+ - x-amz-request-id
+ max_age_seconds: 30000
+
+# Remove cors for s3 bucket
+- community.aws.aws_s3_cors:
+ name: mys3bucket
+ state: absent
+'''
+
+RETURN = r'''
+changed:
+ description: check to see if a change was made to the rules
+ returned: always
+ type: bool
+ sample: true
+name:
+ description: name of bucket
+ returned: always
+ type: str
+ sample: 'bucket-name'
+rules:
+ description: list of current rules
+ returned: always
+ type: list
+ sample: [
+ {
+ "allowed_headers": [
+ "Authorization"
+ ],
+ "allowed_methods": [
+ "GET"
+ ],
+ "allowed_origins": [
+ "*"
+ ],
+ "max_age_seconds": 30000
+ }
+ ]
+'''
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, compare_policies
+
+
+def create_or_update_bucket_cors(connection, module):
+
+ name = module.params.get("name")
+ rules = module.params.get("rules", [])
+ changed = False
+
+ try:
+ current_camel_rules = connection.get_bucket_cors(Bucket=name)['CORSRules']
+ except ClientError:
+ current_camel_rules = []
+
+ new_camel_rules = snake_dict_to_camel_dict(rules, capitalize_first=True)
+ # compare_policies() takes two dicts and makes them hashable for comparison
+ if compare_policies(new_camel_rules, current_camel_rules):
+ changed = True
+
+ if changed:
+ try:
+ cors = connection.put_bucket_cors(Bucket=name, CORSConfiguration={'CORSRules': new_camel_rules})
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to update CORS for bucket {0}".format(name))
+
+ module.exit_json(changed=changed, name=name, rules=rules)
+
+
+def destroy_bucket_cors(connection, module):
+
+ name = module.params.get("name")
+ changed = False
+
+ try:
+ cors = connection.delete_bucket_cors(Bucket=name)
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to delete CORS for bucket {0}".format(name))
+
+ module.exit_json(changed=changed)
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(required=True, type='str'),
+ rules=dict(type='list', elements='str'),
+ state=dict(type='str', choices=['present', 'absent'], required=True)
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ client = module.client('s3')
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ create_or_update_bucket_cors(client, module)
+ elif state == 'absent':
+ destroy_bucket_cors(client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_secret.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_secret.py
new file mode 100644
index 00000000..962501d5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_secret.py
@@ -0,0 +1,401 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, REY Remi
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: aws_secret
+version_added: 1.0.0
+short_description: Manage secrets stored in AWS Secrets Manager.
+description:
+ - Create, update, and delete secrets stored in AWS Secrets Manager.
+author: "REY Remi (@rrey)"
+requirements: [ 'botocore>=1.10.0', 'boto3' ]
+options:
+ name:
+ description:
+ - Friendly name for the secret you are creating.
+ required: true
+ type: str
+ state:
+ description:
+ - Whether the secret should be exist or not.
+ default: 'present'
+ choices: ['present', 'absent']
+ type: str
+ recovery_window:
+ description:
+ - Only used if state is absent.
+ - Specifies the number of days that Secrets Manager waits before it can delete the secret.
+ - If set to 0, the deletion is forced without recovery.
+ default: 30
+ type: int
+ description:
+ description:
+ - Specifies a user-provided description of the secret.
+ type: str
+ kms_key_id:
+ description:
+ - Specifies the ARN or alias of the AWS KMS customer master key (CMK) to be
+ used to encrypt the `secret_string` or `secret_binary` values in the versions stored in this secret.
+ type: str
+ secret_type:
+ description:
+ - Specifies the type of data that you want to encrypt.
+ choices: ['binary', 'string']
+ default: 'string'
+ type: str
+ secret:
+ description:
+ - Specifies string or binary data that you want to encrypt and store in the new version of the secret.
+ default: ""
+ type: str
+ tags:
+ description:
+ - Specifies a list of user-defined tags that are attached to the secret.
+ type: dict
+ rotation_lambda:
+ description:
+ - Specifies the ARN of the Lambda function that can rotate the secret.
+ type: str
+ rotation_interval:
+ description:
+ - Specifies the number of days between automatic scheduled rotations of the secret.
+ default: 30
+ type: int
+extends_documentation_fragment:
+- amazon.aws.ec2
+- amazon.aws.aws
+
+'''
+
+
+EXAMPLES = r'''
+- name: Add string to AWS Secrets Manager
+ community.aws.aws_secret:
+ name: 'test_secret_string'
+ state: present
+ secret_type: 'string'
+ secret: "{{ super_secret_string }}"
+
+- name: remove string from AWS Secrets Manager
+ community.aws.aws_secret:
+ name: 'test_secret_string'
+ state: absent
+ secret_type: 'string'
+ secret: "{{ super_secret_string }}"
+'''
+
+
+RETURN = r'''
+secret:
+ description: The secret information
+ returned: always
+ type: complex
+ contains:
+ arn:
+ description: The ARN of the secret
+ returned: always
+ type: str
+ sample: arn:aws:secretsmanager:eu-west-1:xxxxxxxxxx:secret:xxxxxxxxxxx
+ last_accessed_date:
+ description: The date the secret was last accessed
+ returned: always
+ type: str
+ sample: '2018-11-20T01:00:00+01:00'
+ last_changed_date:
+ description: The date the secret was last modified.
+ returned: always
+ type: str
+ sample: '2018-11-20T12:16:38.433000+01:00'
+ name:
+ description: The secret name.
+ returned: always
+ type: str
+ sample: my_secret
+ rotation_enabled:
+ description: The secret rotation status.
+ returned: always
+ type: bool
+ sample: false
+ version_ids_to_stages:
+ description: Provide the secret version ids and the associated secret stage.
+ returned: always
+ type: dict
+ sample: { "dc1ed59b-6d8e-4450-8b41-536dfe4600a9": [ "AWSCURRENT" ] }
+'''
+
+from ansible.module_utils._text import to_bytes
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, compare_aws_tags, ansible_dict_to_boto3_tag_list
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+
+class Secret(object):
+ """An object representation of the Secret described by the self.module args"""
+ def __init__(self, name, secret_type, secret, description="", kms_key_id=None,
+ tags=None, lambda_arn=None, rotation_interval=None):
+ self.name = name
+ self.description = description
+ self.kms_key_id = kms_key_id
+ if secret_type == "binary":
+ self.secret_type = "SecretBinary"
+ else:
+ self.secret_type = "SecretString"
+ self.secret = secret
+ self.tags = tags or {}
+ self.rotation_enabled = False
+ if lambda_arn:
+ self.rotation_enabled = True
+ self.rotation_lambda_arn = lambda_arn
+ self.rotation_rules = {"AutomaticallyAfterDays": int(rotation_interval)}
+
+ @property
+ def create_args(self):
+ args = {
+ "Name": self.name
+ }
+ if self.description:
+ args["Description"] = self.description
+ if self.kms_key_id:
+ args["KmsKeyId"] = self.kms_key_id
+ if self.tags:
+ args["Tags"] = ansible_dict_to_boto3_tag_list(self.tags)
+ args[self.secret_type] = self.secret
+ return args
+
+ @property
+ def update_args(self):
+ args = {
+ "SecretId": self.name
+ }
+ if self.description:
+ args["Description"] = self.description
+ if self.kms_key_id:
+ args["KmsKeyId"] = self.kms_key_id
+ args[self.secret_type] = self.secret
+ return args
+
+ @property
+ def boto3_tags(self):
+ return ansible_dict_to_boto3_tag_list(self.Tags)
+
+ def as_dict(self):
+ result = self.__dict__
+ result.pop("tags")
+ return snake_dict_to_camel_dict(result)
+
+
+class SecretsManagerInterface(object):
+ """An interface with SecretsManager"""
+
+ def __init__(self, module):
+ self.module = module
+ self.client = self.module.client('secretsmanager')
+
+ def get_secret(self, name):
+ try:
+ secret = self.client.describe_secret(SecretId=name)
+ except self.client.exceptions.ResourceNotFoundException:
+ secret = None
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Failed to describe secret")
+ return secret
+
+ def create_secret(self, secret):
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+ try:
+ created_secret = self.client.create_secret(**secret.create_args)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to create secret")
+
+ if secret.rotation_enabled:
+ response = self.update_rotation(secret)
+ created_secret["VersionId"] = response.get("VersionId")
+ return created_secret
+
+ def update_secret(self, secret):
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+
+ try:
+ response = self.client.update_secret(**secret.update_args)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to update secret")
+ return response
+
+ def restore_secret(self, name):
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+ try:
+ response = self.client.restore_secret(SecretId=name)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to restore secret")
+ return response
+
+ def delete_secret(self, name, recovery_window):
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+ try:
+ if recovery_window == 0:
+ response = self.client.delete_secret(SecretId=name, ForceDeleteWithoutRecovery=True)
+ else:
+ response = self.client.delete_secret(SecretId=name, RecoveryWindowInDays=recovery_window)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to delete secret")
+ return response
+
+ def update_rotation(self, secret):
+ if secret.rotation_enabled:
+ try:
+ response = self.client.rotate_secret(
+ SecretId=secret.name,
+ RotationLambdaARN=secret.rotation_lambda_arn,
+ RotationRules=secret.rotation_rules)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to rotate secret secret")
+ else:
+ try:
+ response = self.client.cancel_rotate_secret(SecretId=secret.name)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to cancel rotation")
+ return response
+
+ def tag_secret(self, secret_name, tags):
+ try:
+ self.client.tag_resource(SecretId=secret_name, Tags=tags)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to add tag(s) to secret")
+
+ def untag_secret(self, secret_name, tag_keys):
+ try:
+ self.client.untag_resource(SecretId=secret_name, TagKeys=tag_keys)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to remove tag(s) from secret")
+
+ def secrets_match(self, desired_secret, current_secret):
+ """Compare secrets except tags and rotation
+
+ Args:
+ desired_secret: camel dict representation of the desired secret state.
+ current_secret: secret reference as returned by the secretsmanager api.
+
+ Returns: bool
+ """
+ if desired_secret.description != current_secret.get("Description", ""):
+ return False
+ if desired_secret.kms_key_id != current_secret.get("KmsKeyId"):
+ return False
+ current_secret_value = self.client.get_secret_value(SecretId=current_secret.get("Name"))
+ if desired_secret.secret_type == 'SecretBinary':
+ desired_value = to_bytes(desired_secret.secret)
+ else:
+ desired_value = desired_secret.secret
+ if desired_value != current_secret_value.get(desired_secret.secret_type):
+ return False
+ return True
+
+
+def rotation_match(desired_secret, current_secret):
+ """Compare secrets rotation configuration
+
+ Args:
+ desired_secret: camel dict representation of the desired secret state.
+ current_secret: secret reference as returned by the secretsmanager api.
+
+ Returns: bool
+ """
+ if desired_secret.rotation_enabled != current_secret.get("RotationEnabled", False):
+ return False
+ if desired_secret.rotation_enabled:
+ if desired_secret.rotation_lambda_arn != current_secret.get("RotationLambdaARN"):
+ return False
+ if desired_secret.rotation_rules != current_secret.get("RotationRules"):
+ return False
+ return True
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec={
+ 'name': dict(required=True),
+ 'state': dict(choices=['present', 'absent'], default='present'),
+ 'description': dict(default=""),
+ 'kms_key_id': dict(),
+ 'secret_type': dict(choices=['binary', 'string'], default="string"),
+ 'secret': dict(default=""),
+ 'tags': dict(type='dict', default={}),
+ 'rotation_lambda': dict(),
+ 'rotation_interval': dict(type='int', default=30),
+ 'recovery_window': dict(type='int', default=30),
+ },
+ supports_check_mode=True,
+ )
+
+ changed = False
+ state = module.params.get('state')
+ secrets_mgr = SecretsManagerInterface(module)
+ recovery_window = module.params.get('recovery_window')
+ secret = Secret(
+ module.params.get('name'),
+ module.params.get('secret_type'),
+ module.params.get('secret'),
+ description=module.params.get('description'),
+ kms_key_id=module.params.get('kms_key_id'),
+ tags=module.params.get('tags'),
+ lambda_arn=module.params.get('rotation_lambda'),
+ rotation_interval=module.params.get('rotation_interval')
+ )
+
+ current_secret = secrets_mgr.get_secret(secret.name)
+
+ if state == 'absent':
+ if current_secret:
+ if not current_secret.get("DeletedDate"):
+ result = camel_dict_to_snake_dict(secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window))
+ changed = True
+ elif current_secret.get("DeletedDate") and recovery_window == 0:
+ result = camel_dict_to_snake_dict(secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window))
+ changed = True
+ else:
+ result = "secret does not exist"
+ if state == 'present':
+ if current_secret is None:
+ result = secrets_mgr.create_secret(secret)
+ changed = True
+ else:
+ if current_secret.get("DeletedDate"):
+ secrets_mgr.restore_secret(secret.name)
+ changed = True
+ if not secrets_mgr.secrets_match(secret, current_secret):
+ result = secrets_mgr.update_secret(secret)
+ changed = True
+ if not rotation_match(secret, current_secret):
+ result = secrets_mgr.update_rotation(secret)
+ changed = True
+ current_tags = boto3_tag_list_to_ansible_dict(current_secret.get('Tags', []))
+ tags_to_add, tags_to_remove = compare_aws_tags(current_tags, secret.tags)
+ if tags_to_add:
+ secrets_mgr.tag_secret(secret.name, ansible_dict_to_boto3_tag_list(tags_to_add))
+ changed = True
+ if tags_to_remove:
+ secrets_mgr.untag_secret(secret.name, tags_to_remove)
+ changed = True
+ result = camel_dict_to_snake_dict(secrets_mgr.get_secret(secret.name))
+ result.pop("response_metadata")
+ module.exit_json(changed=changed, secret=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_ses_identity.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_ses_identity.py
new file mode 100644
index 00000000..d3c88156
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_ses_identity.py
@@ -0,0 +1,543 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_ses_identity
+version_added: 1.0.0
+short_description: Manages SES email and domain identity
+description:
+ - This module allows the user to manage verified email and domain identity for SES.
+ - This covers verifying and removing identities as well as setting up complaint, bounce
+ and delivery notification settings.
+author: Ed Costello (@orthanc)
+
+options:
+ identity:
+ description:
+ - This is the email address or domain to verify / delete.
+ - If this contains an '@' then it will be considered an email. Otherwise it will be considered a domain.
+ required: true
+ type: str
+ state:
+ description: Whether to create(or update) or delete the identity.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ bounce_notifications:
+ description:
+ - Setup the SNS topic used to report bounce notifications.
+ - If omitted, bounce notifications will not be delivered to a SNS topic.
+ - If bounce notifications are not delivered to a SNS topic, I(feedback_forwarding) must be enabled.
+ suboptions:
+ topic:
+ description:
+ - The ARN of the topic to send notifications to.
+ - If omitted, notifications will not be delivered to a SNS topic.
+ include_headers:
+ description:
+ - Whether or not to include headers when delivering to the SNS topic.
+ - If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic.
+ type: bool
+ default: No
+ type: dict
+ complaint_notifications:
+ description:
+ - Setup the SNS topic used to report complaint notifications.
+ - If omitted, complaint notifications will not be delivered to a SNS topic.
+ - If complaint notifications are not delivered to a SNS topic, I(feedback_forwarding) must be enabled.
+ suboptions:
+ topic:
+ description:
+ - The ARN of the topic to send notifications to.
+ - If omitted, notifications will not be delivered to a SNS topic.
+ include_headers:
+ description:
+ - Whether or not to include headers when delivering to the SNS topic.
+ - If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic.
+ type: bool
+ default: No
+ type: dict
+ delivery_notifications:
+ description:
+ - Setup the SNS topic used to report delivery notifications.
+ - If omitted, delivery notifications will not be delivered to a SNS topic.
+ suboptions:
+ topic:
+ description:
+ - The ARN of the topic to send notifications to.
+ - If omitted, notifications will not be delivered to a SNS topic.
+ include_headers:
+ description:
+ - Whether or not to include headers when delivering to the SNS topic.
+ - If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic.
+ type: bool
+ default: No
+ type: dict
+ feedback_forwarding:
+ description:
+ - Whether or not to enable feedback forwarding.
+ - This can only be false if both I(bounce_notifications) and I(complaint_notifications) specify SNS topics.
+ type: 'bool'
+ default: True
+requirements: [ 'botocore', 'boto3' ]
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Ensure example@example.com email identity exists
+ community.aws.aws_ses_identity:
+ identity: example@example.com
+ state: present
+
+- name: Delete example@example.com email identity
+ community.aws.aws_ses_identity:
+ email: example@example.com
+ state: absent
+
+- name: Ensure example.com domain identity exists
+ community.aws.aws_ses_identity:
+ identity: example.com
+ state: present
+
+# Create an SNS topic and send bounce and complaint notifications to it
+# instead of emailing the identity owner
+- name: Ensure complaints-topic exists
+ community.aws.sns_topic:
+ name: "complaints-topic"
+ state: present
+ purge_subscriptions: False
+ register: topic_info
+
+- name: Deliver feedback to topic instead of owner email
+ community.aws.aws_ses_identity:
+ identity: example@example.com
+ state: present
+ complaint_notifications:
+ topic: "{{ topic_info.sns_arn }}"
+ include_headers: True
+ bounce_notifications:
+ topic: "{{ topic_info.sns_arn }}"
+ include_headers: False
+ feedback_forwarding: False
+
+# Create an SNS topic for delivery notifications and leave complaints
+# Being forwarded to the identity owner email
+- name: Ensure delivery-notifications-topic exists
+ community.aws.sns_topic:
+ name: "delivery-notifications-topic"
+ state: present
+ purge_subscriptions: False
+ register: topic_info
+
+- name: Delivery notifications to topic
+ community.aws.aws_ses_identity:
+ identity: example@example.com
+ state: present
+ delivery_notifications:
+ topic: "{{ topic_info.sns_arn }}"
+'''
+
+RETURN = '''
+identity:
+ description: The identity being modified.
+ returned: success
+ type: str
+ sample: example@example.com
+identity_arn:
+ description: The arn of the identity being modified.
+ returned: success
+ type: str
+ sample: arn:aws:ses:us-east-1:12345678:identity/example@example.com
+verification_attributes:
+ description: The verification information for the identity.
+ returned: success
+ type: complex
+ sample: {
+ "verification_status": "Pending",
+ "verification_token": "...."
+ }
+ contains:
+ verification_status:
+ description: The verification status of the identity.
+ type: str
+ sample: "Pending"
+ verification_token:
+ description: The verification token for a domain identity.
+ type: str
+notification_attributes:
+ description: The notification setup for the identity.
+ returned: success
+ type: complex
+ sample: {
+ "bounce_topic": "arn:aws:sns:....",
+ "complaint_topic": "arn:aws:sns:....",
+ "delivery_topic": "arn:aws:sns:....",
+ "forwarding_enabled": false,
+ "headers_in_bounce_notifications_enabled": true,
+ "headers_in_complaint_notifications_enabled": true,
+ "headers_in_delivery_notifications_enabled": true
+ }
+ contains:
+ bounce_topic:
+ description:
+ - The ARN of the topic bounce notifications are delivered to.
+ - Omitted if bounce notifications are not delivered to a topic.
+ type: str
+ complaint_topic:
+ description:
+ - The ARN of the topic complaint notifications are delivered to.
+ - Omitted if complaint notifications are not delivered to a topic.
+ type: str
+ delivery_topic:
+ description:
+ - The ARN of the topic delivery notifications are delivered to.
+ - Omitted if delivery notifications are not delivered to a topic.
+ type: str
+ forwarding_enabled:
+ description: Whether or not feedback forwarding is enabled.
+ type: bool
+ headers_in_bounce_notifications_enabled:
+ description: Whether or not headers are included in messages delivered to the bounce topic.
+ type: bool
+ headers_in_complaint_notifications_enabled:
+ description: Whether or not headers are included in messages delivered to the complaint topic.
+ type: bool
+ headers_in_delivery_notifications_enabled:
+ description: Whether or not headers are included in messages delivered to the delivery topic.
+ type: bool
+'''
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+import time
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def get_verification_attributes(connection, module, identity, retries=0, retryDelay=10):
+ # Unpredictably get_identity_verification_attributes doesn't include the identity even when we've
+ # just registered it. Suspect this is an eventual consistency issue on AWS side.
+ # Don't want this complexity exposed users of the module as they'd have to retry to ensure
+ # a consistent return from the module.
+ # To avoid this we have an internal retry that we use only after registering the identity.
+ for attempt in range(0, retries + 1):
+ try:
+ response = connection.get_identity_verification_attributes(Identities=[identity], aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to retrieve identity verification attributes for {identity}'.format(identity=identity))
+ identity_verification = response['VerificationAttributes']
+ if identity in identity_verification:
+ break
+ time.sleep(retryDelay)
+ if identity not in identity_verification:
+ return None
+ return identity_verification[identity]
+
+
+def get_identity_notifications(connection, module, identity, retries=0, retryDelay=10):
+ # Unpredictably get_identity_notifications doesn't include the notifications when we've
+ # just registered the identity.
+ # Don't want this complexity exposed users of the module as they'd have to retry to ensure
+ # a consistent return from the module.
+ # To avoid this we have an internal retry that we use only when getting the current notification
+ # status for return.
+ for attempt in range(0, retries + 1):
+ try:
+ response = connection.get_identity_notification_attributes(Identities=[identity], aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to retrieve identity notification attributes for {identity}'.format(identity=identity))
+ notification_attributes = response['NotificationAttributes']
+
+ # No clear AWS docs on when this happens, but it appears sometimes identities are not included in
+ # in the notification attributes when the identity is first registered. Suspect that this is caused by
+ # eventual consistency within the AWS services. It's been observed in builds so we need to handle it.
+ #
+ # When this occurs, just return None and we'll assume no identity notification settings have been changed
+ # from the default which is reasonable if this is just eventual consistency on creation.
+ # See: https://github.com/ansible/ansible/issues/36065
+ if identity in notification_attributes:
+ break
+ else:
+ # Paranoia check for coding errors, we only requested one identity, so if we get a different one
+ # something has gone very wrong.
+ if len(notification_attributes) != 0:
+ module.fail_json(
+ msg='Unexpected identity found in notification attributes, expected {0} but got {1!r}.'.format(
+ identity,
+ notification_attributes.keys(),
+ )
+ )
+ time.sleep(retryDelay)
+ if identity not in notification_attributes:
+ return None
+ return notification_attributes[identity]
+
+
+def desired_topic(module, notification_type):
+ arg_dict = module.params.get(notification_type.lower() + '_notifications')
+ if arg_dict:
+ return arg_dict.get('topic', None)
+ else:
+ return None
+
+
+def update_notification_topic(connection, module, identity, identity_notifications, notification_type):
+ topic_key = notification_type + 'Topic'
+ if identity_notifications is None:
+ # If there is no configuration for notifications cannot be being sent to topics
+ # hence assume None as the current state.
+ current = None
+ elif topic_key in identity_notifications:
+ current = identity_notifications[topic_key]
+ else:
+ # If there is information on the notifications setup but no information on the
+ # particular notification topic it's pretty safe to assume there's no topic for
+ # this notification. AWS API docs suggest this information will always be
+ # included but best to be defensive
+ current = None
+
+ required = desired_topic(module, notification_type)
+
+ if current != required:
+ try:
+ if not module.check_mode:
+ connection.set_identity_notification_topic(Identity=identity, NotificationType=notification_type, SnsTopic=required, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to set identity notification topic for {identity} {notification_type}'.format(
+ identity=identity,
+ notification_type=notification_type,
+ ))
+ return True
+ return False
+
+
+def update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type):
+ arg_dict = module.params.get(notification_type.lower() + '_notifications')
+ header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled'
+ if identity_notifications is None:
+ # If there is no configuration for topic notifications, headers cannot be being
+ # forwarded, hence assume false.
+ current = False
+ elif header_key in identity_notifications:
+ current = identity_notifications[header_key]
+ else:
+ # AWS API doc indicates that the headers in fields are optional. Unfortunately
+ # it's not clear on what this means. But it's a pretty safe assumption that it means
+ # headers are not included since most API consumers would interpret absence as false.
+ current = False
+
+ if arg_dict is not None and 'include_headers' in arg_dict:
+ required = arg_dict['include_headers']
+ else:
+ required = False
+
+ if current != required:
+ try:
+ if not module.check_mode:
+ connection.set_identity_headers_in_notifications_enabled(Identity=identity, NotificationType=notification_type, Enabled=required,
+ aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to set identity headers in notification for {identity} {notification_type}'.format(
+ identity=identity,
+ notification_type=notification_type,
+ ))
+ return True
+ return False
+
+
+def update_feedback_forwarding(connection, module, identity, identity_notifications):
+ if identity_notifications is None:
+ # AWS requires feedback forwarding to be enabled unless bounces and complaints
+ # are being handled by SNS topics. So in the absence of identity_notifications
+ # information existing feedback forwarding must be on.
+ current = True
+ elif 'ForwardingEnabled' in identity_notifications:
+ current = identity_notifications['ForwardingEnabled']
+ else:
+ # If there is information on the notifications setup but no information on the
+ # forwarding state it's pretty safe to assume forwarding is off. AWS API docs
+ # suggest this information will always be included but best to be defensive
+ current = False
+
+ required = module.params.get('feedback_forwarding')
+
+ if current != required:
+ try:
+ if not module.check_mode:
+ connection.set_identity_feedback_forwarding_enabled(Identity=identity, ForwardingEnabled=required, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to set identity feedback forwarding for {identity}'.format(identity=identity))
+ return True
+ return False
+
+
+def create_mock_notifications_response(module):
+ resp = {
+ "ForwardingEnabled": module.params.get('feedback_forwarding'),
+ }
+ for notification_type in ('Bounce', 'Complaint', 'Delivery'):
+ arg_dict = module.params.get(notification_type.lower() + '_notifications')
+ if arg_dict is not None and 'topic' in arg_dict:
+ resp[notification_type + 'Topic'] = arg_dict['topic']
+
+ header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled'
+ if arg_dict is not None and 'include_headers' in arg_dict:
+ resp[header_key] = arg_dict['include_headers']
+ else:
+ resp[header_key] = False
+ return resp
+
+
+def update_identity_notifications(connection, module):
+ identity = module.params.get('identity')
+ changed = False
+ identity_notifications = get_identity_notifications(connection, module, identity)
+
+ for notification_type in ('Bounce', 'Complaint', 'Delivery'):
+ changed |= update_notification_topic(connection, module, identity, identity_notifications, notification_type)
+ changed |= update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type)
+
+ changed |= update_feedback_forwarding(connection, module, identity, identity_notifications)
+
+ if changed or identity_notifications is None:
+ if module.check_mode:
+ identity_notifications = create_mock_notifications_response(module)
+ else:
+ identity_notifications = get_identity_notifications(connection, module, identity, retries=4)
+ return changed, identity_notifications
+
+
+def validate_params_for_identity_present(module):
+ if module.params.get('feedback_forwarding') is False:
+ if not (desired_topic(module, 'Bounce') and desired_topic(module, 'Complaint')):
+ module.fail_json(msg="Invalid Parameter Value 'False' for 'feedback_forwarding'. AWS requires "
+ "feedback forwarding to be enabled unless bounces and complaints are handled by SNS topics")
+
+
+def create_or_update_identity(connection, module, region, account_id):
+ identity = module.params.get('identity')
+ changed = False
+ verification_attributes = get_verification_attributes(connection, module, identity)
+ if verification_attributes is None:
+ try:
+ if not module.check_mode:
+ if '@' in identity:
+ connection.verify_email_identity(EmailAddress=identity, aws_retry=True)
+ else:
+ connection.verify_domain_identity(Domain=identity, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to verify identity {identity}'.format(identity=identity))
+ if module.check_mode:
+ verification_attributes = {
+ "VerificationStatus": "Pending",
+ }
+ else:
+ verification_attributes = get_verification_attributes(connection, module, identity, retries=4)
+ changed = True
+ elif verification_attributes['VerificationStatus'] not in ('Pending', 'Success'):
+ module.fail_json(msg="Identity " + identity + " in bad status " + verification_attributes['VerificationStatus'],
+ verification_attributes=camel_dict_to_snake_dict(verification_attributes))
+
+ if verification_attributes is None:
+ module.fail_json(msg='Unable to load identity verification attributes after registering identity.')
+
+ notifications_changed, notification_attributes = update_identity_notifications(connection, module)
+ changed |= notifications_changed
+
+ if notification_attributes is None:
+ module.fail_json(msg='Unable to load identity notification attributes.')
+
+ identity_arn = 'arn:aws:ses:' + region + ':' + account_id + ':identity/' + identity
+
+ module.exit_json(
+ changed=changed,
+ identity=identity,
+ identity_arn=identity_arn,
+ verification_attributes=camel_dict_to_snake_dict(verification_attributes),
+ notification_attributes=camel_dict_to_snake_dict(notification_attributes),
+ )
+
+
+def destroy_identity(connection, module):
+ identity = module.params.get('identity')
+ changed = False
+ verification_attributes = get_verification_attributes(connection, module, identity)
+ if verification_attributes is not None:
+ try:
+ if not module.check_mode:
+ connection.delete_identity(Identity=identity, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to delete identity {identity}'.format(identity=identity))
+ changed = True
+
+ module.exit_json(
+ changed=changed,
+ identity=identity,
+ )
+
+
+def get_account_id(module):
+ sts = module.client('sts')
+ try:
+ caller_identity = sts.get_caller_identity()
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to retrieve caller identity')
+ return caller_identity['Account']
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec={
+ "identity": dict(required=True, type='str'),
+ "state": dict(default='present', choices=['present', 'absent']),
+ "bounce_notifications": dict(type='dict'),
+ "complaint_notifications": dict(type='dict'),
+ "delivery_notifications": dict(type='dict'),
+ "feedback_forwarding": dict(default=True, type='bool'),
+ },
+ supports_check_mode=True,
+ )
+
+ for notification_type in ('bounce', 'complaint', 'delivery'):
+ param_name = notification_type + '_notifications'
+ arg_dict = module.params.get(param_name)
+ if arg_dict:
+ extra_keys = [x for x in arg_dict.keys() if x not in ('topic', 'include_headers')]
+ if extra_keys:
+ module.fail_json(msg='Unexpected keys ' + str(extra_keys) + ' in ' + param_name + ' valid keys are topic or include_headers')
+
+ # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs.
+ # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but
+ # the ansible build runs multiple instances of the test in parallel that's caused throttling
+ # failures so apply a jittered backoff to call SES calls.
+ connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff())
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ region = module.region
+ account_id = get_account_id(module)
+ validate_params_for_identity_present(module)
+ create_or_update_identity(connection, module, region, account_id)
+ else:
+ destroy_identity(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_ses_identity_policy.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_ses_identity_policy.py
new file mode 100644
index 00000000..bb743c6b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_ses_identity_policy.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_ses_identity_policy
+version_added: 1.0.0
+short_description: Manages SES sending authorization policies
+description:
+ - This module allows the user to manage sending authorization policies associated with an SES identity (email or domain).
+ - SES authorization sending policies can be used to control what actors are able to send email
+ on behalf of the validated identity and what conditions must be met by the sent emails.
+author: Ed Costello (@orthanc)
+
+options:
+ identity:
+ description: |
+ The SES identity to attach or remove a policy from. This can be either the full ARN or just
+ the verified email or domain.
+ required: true
+ type: str
+ policy_name:
+ description: The name used to identify the policy within the scope of the identity it's attached to.
+ required: true
+ type: str
+ policy:
+ description: A properly formatted JSON sending authorization policy. Required when I(state=present).
+ type: json
+ state:
+ description: Whether to create(or update) or delete the authorization policy on the identity.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+requirements: [ 'botocore', 'boto3' ]
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: add sending authorization policy to domain identity
+ community.aws.aws_ses_identity_policy:
+ identity: example.com
+ policy_name: ExamplePolicy
+ policy: "{{ lookup('template', 'policy.json.j2') }}"
+ state: present
+
+- name: add sending authorization policy to email identity
+ community.aws.aws_ses_identity_policy:
+ identity: example@example.com
+ policy_name: ExamplePolicy
+ policy: "{{ lookup('template', 'policy.json.j2') }}"
+ state: present
+
+- name: add sending authorization policy to identity using ARN
+ community.aws.aws_ses_identity_policy:
+ identity: "arn:aws:ses:us-east-1:12345678:identity/example.com"
+ policy_name: ExamplePolicy
+ policy: "{{ lookup('template', 'policy.json.j2') }}"
+ state: present
+
+- name: remove sending authorization policy
+ community.aws.aws_ses_identity_policy:
+ identity: example.com
+ policy_name: ExamplePolicy
+ state: absent
+'''
+
+RETURN = '''
+policies:
+ description: A list of all policies present on the identity after the operation.
+ returned: success
+ type: list
+ sample: [ExamplePolicy]
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies, AWSRetry
+
+import json
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def get_identity_policy(connection, module, identity, policy_name):
+ try:
+ response = connection.get_identity_policies(Identity=identity, PolicyNames=[policy_name], aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to retrieve identity policy {policy}'.format(policy=policy_name))
+ policies = response['Policies']
+ if policy_name in policies:
+ return policies[policy_name]
+ return None
+
+
+def create_or_update_identity_policy(connection, module):
+ identity = module.params.get('identity')
+ policy_name = module.params.get('policy_name')
+ required_policy = module.params.get('policy')
+ required_policy_dict = json.loads(required_policy)
+
+ changed = False
+ policy = get_identity_policy(connection, module, identity, policy_name)
+ policy_dict = json.loads(policy) if policy else None
+ if compare_policies(policy_dict, required_policy_dict):
+ changed = True
+ try:
+ if not module.check_mode:
+ connection.put_identity_policy(Identity=identity, PolicyName=policy_name, Policy=required_policy, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to put identity policy {policy}'.format(policy=policy_name))
+
+ # Load the list of applied policies to include in the response.
+ # In principle we should be able to just return the response, but given
+ # eventual consistency behaviours in AWS it's plausible that we could
+ # end up with a list that doesn't contain the policy we just added.
+ # So out of paranoia check for this case and if we're missing the policy
+ # just make sure it's present.
+ #
+ # As a nice side benefit this also means the return is correct in check mode
+ try:
+ policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)['PolicyNames']
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to list identity policies')
+ if policy_name is not None and policy_name not in policies_present:
+ policies_present = list(policies_present)
+ policies_present.append(policy_name)
+ module.exit_json(
+ changed=changed,
+ policies=policies_present,
+ )
+
+
+def delete_identity_policy(connection, module):
+ identity = module.params.get('identity')
+ policy_name = module.params.get('policy_name')
+
+ changed = False
+ try:
+ policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)['PolicyNames']
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to list identity policies')
+ if policy_name in policies_present:
+ try:
+ if not module.check_mode:
+ connection.delete_identity_policy(Identity=identity, PolicyName=policy_name, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to delete identity policy {policy}'.format(policy=policy_name))
+ changed = True
+ policies_present = list(policies_present)
+ policies_present.remove(policy_name)
+
+ module.exit_json(
+ changed=changed,
+ policies=policies_present,
+ )
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec={
+ 'identity': dict(required=True, type='str'),
+ 'state': dict(default='present', choices=['present', 'absent']),
+ 'policy_name': dict(required=True, type='str'),
+ 'policy': dict(type='json', default=None),
+ },
+ required_if=[['state', 'present', ['policy']]],
+ supports_check_mode=True,
+ )
+
+ # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs.
+ # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but
+ # the ansible build runs multiple instances of the test in parallel that's caused throttling
+ # failures so apply a jittered backoff to call SES calls.
+ connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff())
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ create_or_update_identity_policy(connection, module)
+ else:
+ delete_identity_policy(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_ses_rule_set.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_ses_rule_set.py
new file mode 100644
index 00000000..0996497c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_ses_rule_set.py
@@ -0,0 +1,251 @@
+#!/usr/bin/python
+# Copyright (c) 2017, Ben Tomasik <ben@tomasik.io>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_ses_rule_set
+version_added: 1.0.0
+short_description: Manages SES inbound receipt rule sets
+description:
+ - The M(community.aws.aws_ses_rule_set) module allows you to create, delete, and manage SES receipt rule sets
+author:
+ - "Ben Tomasik (@tomislacker)"
+ - "Ed Costello (@orthanc)"
+requirements: [ boto3, botocore ]
+options:
+ name:
+ description:
+ - The name of the receipt rule set.
+ required: True
+ type: str
+ state:
+ description:
+ - Whether to create (or update) or destroy the receipt rule set.
+ required: False
+ default: present
+ choices: ["absent", "present"]
+ type: str
+ active:
+ description:
+ - Whether or not this rule set should be the active rule set. Only has an impact if I(state) is C(present).
+ - If omitted, the active rule set will not be changed.
+ - If C(True) then this rule set will be made active and all others inactive.
+ - if C(False) then this rule set will be deactivated. Be careful with this as you can end up with no active rule set.
+ type: bool
+ required: False
+ force:
+ description:
+ - When deleting a rule set, deactivate it first (AWS prevents deletion of the active rule set).
+ type: bool
+ required: False
+ default: False
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = """
+# Note: None of these examples set aws_access_key, aws_secret_key, or region.
+# It is assumed that their matching environment variables are set.
+---
+- name: Create default rule set and activate it if not already
+ community.aws.aws_ses_rule_set:
+ name: default-rule-set
+ state: present
+ active: yes
+
+- name: Create some arbitrary rule set but do not activate it
+ community.aws.aws_ses_rule_set:
+ name: arbitrary-rule-set
+ state: present
+
+- name: Explicitly deactivate the default rule set leaving no active rule set
+ community.aws.aws_ses_rule_set:
+ name: default-rule-set
+ state: present
+ active: no
+
+- name: Remove an arbitrary inactive rule set
+ community.aws.aws_ses_rule_set:
+ name: arbitrary-rule-set
+ state: absent
+
+- name: Remove an ruleset even if we have to first deactivate it to remove it
+ community.aws.aws_ses_rule_set:
+ name: default-rule-set
+ state: absent
+ force: yes
+"""
+
+RETURN = """
+active:
+ description: if the SES rule set is active
+ returned: success if I(state) is C(present)
+ type: bool
+ sample: true
+rule_sets:
+ description: The list of SES receipt rule sets that exist after any changes.
+ returned: success
+ type: list
+ sample: [{
+ "created_timestamp": "2018-02-25T01:20:32.690000+00:00",
+ "name": "default-rule-set"
+ }]
+"""
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+
+def list_rule_sets(client, module):
+ try:
+ response = client.list_receipt_rule_sets(aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't list rule sets.")
+ return response['RuleSets']
+
+
+def rule_set_in(name, rule_sets):
+ return any([s for s in rule_sets if s['Name'] == name])
+
+
+def ruleset_active(client, module, name):
+ try:
+ active_rule_set = client.describe_active_receipt_rule_set(aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't get the active rule set.")
+ if active_rule_set is not None and 'Metadata' in active_rule_set:
+ return name == active_rule_set['Metadata']['Name']
+ else:
+ # Metadata was not set meaning there is no active rule set
+ return False
+
+
+def deactivate_rule_set(client, module):
+ try:
+ # No ruleset name deactivates all rulesets
+ client.set_active_receipt_rule_set(aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't set active rule set to None.")
+
+
+def update_active_rule_set(client, module, name, desired_active):
+ check_mode = module.check_mode
+
+ active = ruleset_active(client, module, name)
+
+ changed = False
+ if desired_active is not None:
+ if desired_active and not active:
+ if not check_mode:
+ try:
+ client.set_active_receipt_rule_set(RuleSetName=name, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't set active rule set to {0}.".format(name))
+ changed = True
+ active = True
+ elif not desired_active and active:
+ if not check_mode:
+ deactivate_rule_set(client, module)
+ changed = True
+ active = False
+ return changed, active
+
+
+def create_or_update_rule_set(client, module):
+ name = module.params.get('name')
+ check_mode = module.check_mode
+ changed = False
+
+ rule_sets = list_rule_sets(client, module)
+ if not rule_set_in(name, rule_sets):
+ if not check_mode:
+ try:
+ client.create_receipt_rule_set(RuleSetName=name, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't create rule set {0}.".format(name))
+ changed = True
+ rule_sets = list(rule_sets)
+ rule_sets.append({
+ 'Name': name,
+ })
+
+ (active_changed, active) = update_active_rule_set(client, module, name, module.params.get('active'))
+ changed |= active_changed
+
+ module.exit_json(
+ changed=changed,
+ active=active,
+ rule_sets=[camel_dict_to_snake_dict(x) for x in rule_sets],
+ )
+
+
+def remove_rule_set(client, module):
+ name = module.params.get('name')
+ check_mode = module.check_mode
+ changed = False
+
+ rule_sets = list_rule_sets(client, module)
+ if rule_set_in(name, rule_sets):
+ active = ruleset_active(client, module, name)
+ if active and not module.params.get('force'):
+ module.fail_json(
+ msg="Couldn't delete rule set {0} because it is currently active. Set force=true to delete an active ruleset.".format(name),
+ error={
+ "code": "CannotDelete",
+ "message": "Cannot delete active rule set: {0}".format(name),
+ }
+ )
+ if not check_mode:
+ if active and module.params.get('force'):
+ deactivate_rule_set(client, module)
+ try:
+ client.delete_receipt_rule_set(RuleSetName=name, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete rule set {0}.".format(name))
+ changed = True
+ rule_sets = [x for x in rule_sets if x['Name'] != name]
+
+ module.exit_json(
+ changed=changed,
+ rule_sets=[camel_dict_to_snake_dict(x) for x in rule_sets],
+ )
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ active=dict(type='bool'),
+ force=dict(type='bool', default=False),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ state = module.params.get('state')
+
+ # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs.
+ # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but
+ # the ansible build runs multiple instances of the test in parallel that's caused throttling
+ # failures so apply a jittered backoff to call SES calls.
+ client = module.client('ses', retry_decorator=AWSRetry.jittered_backoff())
+
+ if state == 'absent':
+ remove_rule_set(client, module)
+ else:
+ create_or_update_rule_set(client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_sgw_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_sgw_facts.py
new file mode 100644
index 00000000..adf7dde8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_sgw_facts.py
@@ -0,0 +1,359 @@
+#!/usr/bin/python
+# Copyright: (c) 2018, Loic BLOT (@nerzhul) <loic.blot@unix-experience.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# This module is sponsored by E.T.A.I. (www.etai.fr)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_sgw_info
+version_added: 1.0.0
+short_description: Fetch AWS Storage Gateway information
+description:
+ - Fetch AWS Storage Gateway information
+ - This module was called C(aws_sgw_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+author: Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
+options:
+ gather_local_disks:
+ description:
+ - Gather local disks attached to the storage gateway.
+ type: bool
+ required: false
+ default: true
+ gather_tapes:
+ description:
+ - Gather tape information for storage gateways in tape mode.
+ type: bool
+ required: false
+ default: true
+ gather_file_shares:
+ description:
+ - Gather file share information for storage gateways in s3 mode.
+ type: bool
+ required: false
+ default: true
+ gather_volumes:
+ description:
+ - Gather volume information for storage gateways in iSCSI (cached & stored) modes.
+ type: bool
+ required: false
+ default: true
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+RETURN = '''
+gateways:
+ description: list of gateway objects
+ returned: always
+ type: complex
+ contains:
+ gateway_arn:
+ description: "Storage Gateway ARN"
+ returned: always
+ type: str
+ sample: "arn:aws:storagegateway:eu-west-1:367709993819:gateway/sgw-9999F888"
+ gateway_id:
+ description: "Storage Gateway ID"
+ returned: always
+ type: str
+ sample: "sgw-9999F888"
+ gateway_name:
+ description: "Storage Gateway friendly name"
+ returned: always
+ type: str
+ sample: "my-sgw-01"
+ gateway_operational_state:
+ description: "Storage Gateway operational state"
+ returned: always
+ type: str
+ sample: "ACTIVE"
+ gateway_type:
+ description: "Storage Gateway type"
+ returned: always
+ type: str
+ sample: "FILE_S3"
+ file_shares:
+ description: "Storage gateway file shares"
+ returned: when gateway_type == "FILE_S3"
+ type: complex
+ contains:
+ file_share_arn:
+ description: "File share ARN"
+ returned: always
+ type: str
+ sample: "arn:aws:storagegateway:eu-west-1:399805793479:share/share-AF999C88"
+ file_share_id:
+ description: "File share ID"
+ returned: always
+ type: str
+ sample: "share-AF999C88"
+ file_share_status:
+ description: "File share status"
+ returned: always
+ type: str
+ sample: "AVAILABLE"
+ tapes:
+ description: "Storage Gateway tapes"
+ returned: when gateway_type == "VTL"
+ type: complex
+ contains:
+ tape_arn:
+ description: "Tape ARN"
+ returned: always
+ type: str
+ sample: "arn:aws:storagegateway:eu-west-1:399805793479:tape/tape-AF999C88"
+ tape_barcode:
+ description: "Tape ARN"
+ returned: always
+ type: str
+ sample: "tape-AF999C88"
+ tape_size_in_bytes:
+ description: "Tape ARN"
+ returned: always
+ type: int
+ sample: 555887569
+ tape_status:
+ description: "Tape ARN"
+ returned: always
+ type: str
+ sample: "AVAILABLE"
+ local_disks:
+ description: "Storage gateway local disks"
+ returned: always
+ type: complex
+ contains:
+ disk_allocation_type:
+ description: "Disk allocation type"
+ returned: always
+ type: str
+ sample: "CACHE STORAGE"
+ disk_id:
+ description: "Disk ID on the system"
+ returned: always
+ type: str
+ sample: "pci-0000:00:1f.0"
+ disk_node:
+ description: "Disk parent block device"
+ returned: always
+ type: str
+ sample: "/dev/sdb"
+ disk_path:
+ description: "Disk path used for the cache"
+ returned: always
+ type: str
+ sample: "/dev/nvme1n1"
+ disk_size_in_bytes:
+ description: "Disk size in bytes"
+ returned: always
+ type: int
+ sample: 107374182400
+ disk_status:
+ description: "Disk status"
+ returned: always
+ type: str
+ sample: "present"
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: "Get AWS storage gateway information"
+ community.aws.aws_sgw_info:
+
+- name: "Get AWS storage gateway information for region eu-west-3"
+ community.aws.aws_sgw_info:
+ region: eu-west-3
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+class SGWInformationManager(object):
+ def __init__(self, client, module):
+ self.client = client
+ self.module = module
+ self.name = self.module.params.get('name')
+
+ def fetch(self):
+ gateways = self.list_gateways()
+ for gateway in gateways:
+ if self.module.params.get('gather_local_disks'):
+ self.list_local_disks(gateway)
+ # File share gateway
+ if gateway["gateway_type"] == "FILE_S3" and self.module.params.get('gather_file_shares'):
+ self.list_gateway_file_shares(gateway)
+ # Volume tape gateway
+ elif gateway["gateway_type"] == "VTL" and self.module.params.get('gather_tapes'):
+ self.list_gateway_vtl(gateway)
+ # iSCSI gateway
+ elif gateway["gateway_type"] in ["CACHED", "STORED"] and self.module.params.get('gather_volumes'):
+ self.list_gateway_volumes(gateway)
+
+ self.module.exit_json(gateways=gateways)
+
+ """
+ List all storage gateways for the AWS endpoint.
+ """
+ def list_gateways(self):
+ try:
+ paginator = self.client.get_paginator('list_gateways')
+ response = paginator.paginate(
+ PaginationConfig={
+ 'PageSize': 100,
+ }
+ ).build_full_result()
+
+ gateways = []
+ for gw in response["Gateways"]:
+ gateways.append(camel_dict_to_snake_dict(gw))
+
+ return gateways
+
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't list storage gateways")
+
+ """
+ Read file share objects from AWS API response.
+ Drop the gateway_arn attribute from response, as it will be duplicate with parent object.
+ """
+ @staticmethod
+ def _read_gateway_fileshare_response(fileshares, aws_reponse):
+ for share in aws_reponse["FileShareInfoList"]:
+ share_obj = camel_dict_to_snake_dict(share)
+ if "gateway_arn" in share_obj:
+ del share_obj["gateway_arn"]
+ fileshares.append(share_obj)
+
+ return aws_reponse["NextMarker"] if "NextMarker" in aws_reponse else None
+
+ """
+ List file shares attached to AWS storage gateway when in S3 mode.
+ """
+ def list_gateway_file_shares(self, gateway):
+ try:
+ response = self.client.list_file_shares(
+ GatewayARN=gateway["gateway_arn"],
+ Limit=100
+ )
+
+ gateway["file_shares"] = []
+ marker = self._read_gateway_fileshare_response(gateway["file_shares"], response)
+
+ while marker is not None:
+ response = self.client.list_file_shares(
+ GatewayARN=gateway["gateway_arn"],
+ Marker=marker,
+ Limit=100
+ )
+
+ marker = self._read_gateway_fileshare_response(gateway["file_shares"], response)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't list gateway file shares")
+
+ """
+ List storage gateway local disks
+ """
+ def list_local_disks(self, gateway):
+ try:
+ gateway['local_disks'] = [camel_dict_to_snake_dict(disk) for disk in
+ self.client.list_local_disks(GatewayARN=gateway["gateway_arn"])['Disks']]
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't list storage gateway local disks")
+
+ """
+ Read tape objects from AWS API response.
+ Drop the gateway_arn attribute from response, as it will be duplicate with parent object.
+ """
+ @staticmethod
+ def _read_gateway_tape_response(tapes, aws_response):
+ for tape in aws_response["TapeInfos"]:
+ tape_obj = camel_dict_to_snake_dict(tape)
+ if "gateway_arn" in tape_obj:
+ del tape_obj["gateway_arn"]
+ tapes.append(tape_obj)
+
+ return aws_response["Marker"] if "Marker" in aws_response else None
+
+ """
+ List VTL & VTS attached to AWS storage gateway in VTL mode
+ """
+ def list_gateway_vtl(self, gateway):
+ try:
+ response = self.client.list_tapes(
+ Limit=100
+ )
+
+ gateway["tapes"] = []
+ marker = self._read_gateway_tape_response(gateway["tapes"], response)
+
+ while marker is not None:
+ response = self.client.list_tapes(
+ Marker=marker,
+ Limit=100
+ )
+
+ marker = self._read_gateway_tape_response(gateway["tapes"], response)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't list storage gateway tapes")
+
+ """
+ List volumes attached to AWS storage gateway in CACHED or STORAGE mode
+ """
+ def list_gateway_volumes(self, gateway):
+ try:
+ paginator = self.client.get_paginator('list_volumes')
+ response = paginator.paginate(
+ GatewayARN=gateway["gateway_arn"],
+ PaginationConfig={
+ 'PageSize': 100,
+ }
+ ).build_full_result()
+
+ gateway["volumes"] = []
+ for volume in response["VolumeInfos"]:
+ volume_obj = camel_dict_to_snake_dict(volume)
+ if "gateway_arn" in volume_obj:
+ del volume_obj["gateway_arn"]
+ if "gateway_id" in volume_obj:
+ del volume_obj["gateway_id"]
+
+ gateway["volumes"].append(volume_obj)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't list storage gateway volumes")
+
+
+def main():
+ argument_spec = dict(
+ gather_local_disks=dict(type='bool', default=True),
+ gather_tapes=dict(type='bool', default=True),
+ gather_file_shares=dict(type='bool', default=True),
+ gather_volumes=dict(type='bool', default=True)
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+ if module._name == 'aws_sgw_facts':
+ module.deprecate("The 'aws_sgw_facts' module has been renamed to 'aws_sgw_info'", date='2021-12-01', collection_name='community.aws')
+ client = module.client('storagegateway')
+
+ if client is None: # this should never happen
+ module.fail_json(msg='Unknown error, failed to create storagegateway client, no information from boto.')
+
+ SGWInformationManager(client, module).fetch()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_sgw_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_sgw_info.py
new file mode 100644
index 00000000..adf7dde8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_sgw_info.py
@@ -0,0 +1,359 @@
+#!/usr/bin/python
+# Copyright: (c) 2018, Loic BLOT (@nerzhul) <loic.blot@unix-experience.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# This module is sponsored by E.T.A.I. (www.etai.fr)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_sgw_info
+version_added: 1.0.0
+short_description: Fetch AWS Storage Gateway information
+description:
+ - Fetch AWS Storage Gateway information
+ - This module was called C(aws_sgw_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+author: Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
+options:
+ gather_local_disks:
+ description:
+ - Gather local disks attached to the storage gateway.
+ type: bool
+ required: false
+ default: true
+ gather_tapes:
+ description:
+ - Gather tape information for storage gateways in tape mode.
+ type: bool
+ required: false
+ default: true
+ gather_file_shares:
+ description:
+ - Gather file share information for storage gateways in s3 mode.
+ type: bool
+ required: false
+ default: true
+ gather_volumes:
+ description:
+ - Gather volume information for storage gateways in iSCSI (cached & stored) modes.
+ type: bool
+ required: false
+ default: true
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+RETURN = '''
+gateways:
+ description: list of gateway objects
+ returned: always
+ type: complex
+ contains:
+ gateway_arn:
+ description: "Storage Gateway ARN"
+ returned: always
+ type: str
+ sample: "arn:aws:storagegateway:eu-west-1:367709993819:gateway/sgw-9999F888"
+ gateway_id:
+ description: "Storage Gateway ID"
+ returned: always
+ type: str
+ sample: "sgw-9999F888"
+ gateway_name:
+ description: "Storage Gateway friendly name"
+ returned: always
+ type: str
+ sample: "my-sgw-01"
+ gateway_operational_state:
+ description: "Storage Gateway operational state"
+ returned: always
+ type: str
+ sample: "ACTIVE"
+ gateway_type:
+ description: "Storage Gateway type"
+ returned: always
+ type: str
+ sample: "FILE_S3"
+ file_shares:
+ description: "Storage gateway file shares"
+ returned: when gateway_type == "FILE_S3"
+ type: complex
+ contains:
+ file_share_arn:
+ description: "File share ARN"
+ returned: always
+ type: str
+ sample: "arn:aws:storagegateway:eu-west-1:399805793479:share/share-AF999C88"
+ file_share_id:
+ description: "File share ID"
+ returned: always
+ type: str
+ sample: "share-AF999C88"
+ file_share_status:
+ description: "File share status"
+ returned: always
+ type: str
+ sample: "AVAILABLE"
+ tapes:
+ description: "Storage Gateway tapes"
+ returned: when gateway_type == "VTL"
+ type: complex
+ contains:
+ tape_arn:
+ description: "Tape ARN"
+ returned: always
+ type: str
+ sample: "arn:aws:storagegateway:eu-west-1:399805793479:tape/tape-AF999C88"
+ tape_barcode:
+ description: "Tape ARN"
+ returned: always
+ type: str
+ sample: "tape-AF999C88"
+ tape_size_in_bytes:
+ description: "Tape ARN"
+ returned: always
+ type: int
+ sample: 555887569
+ tape_status:
+ description: "Tape ARN"
+ returned: always
+ type: str
+ sample: "AVAILABLE"
+ local_disks:
+ description: "Storage gateway local disks"
+ returned: always
+ type: complex
+ contains:
+ disk_allocation_type:
+ description: "Disk allocation type"
+ returned: always
+ type: str
+ sample: "CACHE STORAGE"
+ disk_id:
+ description: "Disk ID on the system"
+ returned: always
+ type: str
+ sample: "pci-0000:00:1f.0"
+ disk_node:
+ description: "Disk parent block device"
+ returned: always
+ type: str
+ sample: "/dev/sdb"
+ disk_path:
+ description: "Disk path used for the cache"
+ returned: always
+ type: str
+ sample: "/dev/nvme1n1"
+ disk_size_in_bytes:
+ description: "Disk size in bytes"
+ returned: always
+ type: int
+ sample: 107374182400
+ disk_status:
+ description: "Disk status"
+ returned: always
+ type: str
+ sample: "present"
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: "Get AWS storage gateway information"
+ community.aws.aws_sgw_info:
+
+- name: "Get AWS storage gateway information for region eu-west-3"
+ community.aws.aws_sgw_info:
+ region: eu-west-3
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+class SGWInformationManager(object):
+ def __init__(self, client, module):
+ self.client = client
+ self.module = module
+ self.name = self.module.params.get('name')
+
+ def fetch(self):
+ gateways = self.list_gateways()
+ for gateway in gateways:
+ if self.module.params.get('gather_local_disks'):
+ self.list_local_disks(gateway)
+ # File share gateway
+ if gateway["gateway_type"] == "FILE_S3" and self.module.params.get('gather_file_shares'):
+ self.list_gateway_file_shares(gateway)
+ # Volume tape gateway
+ elif gateway["gateway_type"] == "VTL" and self.module.params.get('gather_tapes'):
+ self.list_gateway_vtl(gateway)
+ # iSCSI gateway
+ elif gateway["gateway_type"] in ["CACHED", "STORED"] and self.module.params.get('gather_volumes'):
+ self.list_gateway_volumes(gateway)
+
+ self.module.exit_json(gateways=gateways)
+
+ """
+ List all storage gateways for the AWS endpoint.
+ """
+ def list_gateways(self):
+ try:
+ paginator = self.client.get_paginator('list_gateways')
+ response = paginator.paginate(
+ PaginationConfig={
+ 'PageSize': 100,
+ }
+ ).build_full_result()
+
+ gateways = []
+ for gw in response["Gateways"]:
+ gateways.append(camel_dict_to_snake_dict(gw))
+
+ return gateways
+
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't list storage gateways")
+
+ """
+ Read file share objects from AWS API response.
+ Drop the gateway_arn attribute from response, as it will be duplicate with parent object.
+ """
+ @staticmethod
+ def _read_gateway_fileshare_response(fileshares, aws_reponse):
+ for share in aws_reponse["FileShareInfoList"]:
+ share_obj = camel_dict_to_snake_dict(share)
+ if "gateway_arn" in share_obj:
+ del share_obj["gateway_arn"]
+ fileshares.append(share_obj)
+
+ return aws_reponse["NextMarker"] if "NextMarker" in aws_reponse else None
+
+ """
+ List file shares attached to AWS storage gateway when in S3 mode.
+ """
+ def list_gateway_file_shares(self, gateway):
+ try:
+ response = self.client.list_file_shares(
+ GatewayARN=gateway["gateway_arn"],
+ Limit=100
+ )
+
+ gateway["file_shares"] = []
+ marker = self._read_gateway_fileshare_response(gateway["file_shares"], response)
+
+ while marker is not None:
+ response = self.client.list_file_shares(
+ GatewayARN=gateway["gateway_arn"],
+ Marker=marker,
+ Limit=100
+ )
+
+ marker = self._read_gateway_fileshare_response(gateway["file_shares"], response)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't list gateway file shares")
+
+ """
+ List storage gateway local disks
+ """
+ def list_local_disks(self, gateway):
+ try:
+ gateway['local_disks'] = [camel_dict_to_snake_dict(disk) for disk in
+ self.client.list_local_disks(GatewayARN=gateway["gateway_arn"])['Disks']]
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't list storage gateway local disks")
+
+ """
+ Read tape objects from AWS API response.
+ Drop the gateway_arn attribute from response, as it will be duplicate with parent object.
+ """
+ @staticmethod
+ def _read_gateway_tape_response(tapes, aws_response):
+ for tape in aws_response["TapeInfos"]:
+ tape_obj = camel_dict_to_snake_dict(tape)
+ if "gateway_arn" in tape_obj:
+ del tape_obj["gateway_arn"]
+ tapes.append(tape_obj)
+
+ return aws_response["Marker"] if "Marker" in aws_response else None
+
+ """
+ List VTL & VTS attached to AWS storage gateway in VTL mode
+ """
+ def list_gateway_vtl(self, gateway):
+ try:
+ response = self.client.list_tapes(
+ Limit=100
+ )
+
+ gateway["tapes"] = []
+ marker = self._read_gateway_tape_response(gateway["tapes"], response)
+
+ while marker is not None:
+ response = self.client.list_tapes(
+ Marker=marker,
+ Limit=100
+ )
+
+ marker = self._read_gateway_tape_response(gateway["tapes"], response)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't list storage gateway tapes")
+
+ """
+ List volumes attached to AWS storage gateway in CACHED or STORAGE mode
+ """
+ def list_gateway_volumes(self, gateway):
+ try:
+ paginator = self.client.get_paginator('list_volumes')
+ response = paginator.paginate(
+ GatewayARN=gateway["gateway_arn"],
+ PaginationConfig={
+ 'PageSize': 100,
+ }
+ ).build_full_result()
+
+ gateway["volumes"] = []
+ for volume in response["VolumeInfos"]:
+ volume_obj = camel_dict_to_snake_dict(volume)
+ if "gateway_arn" in volume_obj:
+ del volume_obj["gateway_arn"]
+ if "gateway_id" in volume_obj:
+ del volume_obj["gateway_id"]
+
+ gateway["volumes"].append(volume_obj)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't list storage gateway volumes")
+
+
+def main():
+ argument_spec = dict(
+ gather_local_disks=dict(type='bool', default=True),
+ gather_tapes=dict(type='bool', default=True),
+ gather_file_shares=dict(type='bool', default=True),
+ gather_volumes=dict(type='bool', default=True)
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+ if module._name == 'aws_sgw_facts':
+ module.deprecate("The 'aws_sgw_facts' module has been renamed to 'aws_sgw_info'", date='2021-12-01', collection_name='community.aws')
+ client = module.client('storagegateway')
+
+ if client is None: # this should never happen
+ module.fail_json(msg='Unknown error, failed to create storagegateway client, no information from boto.')
+
+ SGWInformationManager(client, module).fetch()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_ssm_parameter_store.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_ssm_parameter_store.py
new file mode 100644
index 00000000..32c1df62
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_ssm_parameter_store.py
@@ -0,0 +1,260 @@
+#!/usr/bin/python
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_ssm_parameter_store
+version_added: 1.0.0
+short_description: Manage key-value pairs in aws parameter store.
+description:
+ - Manage key-value pairs in aws parameter store.
+options:
+ name:
+ description:
+ - Parameter key name.
+ required: true
+ type: str
+ description:
+ description:
+ - Parameter key description.
+ required: false
+ type: str
+ value:
+ description:
+ - Parameter value.
+ required: false
+ type: str
+ state:
+ description:
+ - Creates or modifies an existing parameter.
+ - Deletes a parameter.
+ required: false
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ string_type:
+ description:
+ - Parameter String type.
+ required: false
+ choices: ['String', 'StringList', 'SecureString']
+ default: String
+ type: str
+ decryption:
+ description:
+ - Work with SecureString type to get plain text secrets
+ type: bool
+ required: false
+ default: true
+ key_id:
+ description:
+ - AWS KMS key to decrypt the secrets.
+ - The default key (C(alias/aws/ssm)) is automatically generated the first
+ time it's requested.
+ required: false
+ default: alias/aws/ssm
+ type: str
+ overwrite_value:
+ description:
+ - Option to overwrite an existing value if it already exists.
+ required: false
+ choices: ['never', 'changed', 'always']
+ default: changed
+ type: str
+author:
+ - Nathan Webster (@nathanwebsterdotme)
+ - Bill Wang (@ozbillwang) <ozbillwang@gmail.com>
+ - Michael De La Rue (@mikedlr)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements: [ botocore, boto3 ]
+'''
+
+EXAMPLES = '''
+- name: Create or update key/value pair in aws parameter store
+ community.aws.aws_ssm_parameter_store:
+ name: "Hello"
+ description: "This is your first key"
+ value: "World"
+
+- name: Delete the key
+ community.aws.aws_ssm_parameter_store:
+ name: "Hello"
+ state: absent
+
+- name: Create or update secure key/value pair with default kms key (aws/ssm)
+ community.aws.aws_ssm_parameter_store:
+ name: "Hello"
+ description: "This is your first key"
+ string_type: "SecureString"
+ value: "World"
+
+- name: Create or update secure key/value pair with nominated kms key
+ community.aws.aws_ssm_parameter_store:
+ name: "Hello"
+ description: "This is your first key"
+ string_type: "SecureString"
+ key_id: "alias/demo"
+ value: "World"
+
+- name: Always update a parameter store value and create a new version
+ community.aws.aws_ssm_parameter_store:
+ name: "overwrite_example"
+ description: "This example will always overwrite the value"
+ string_type: "String"
+ value: "Test1234"
+ overwrite_value: "always"
+
+- name: recommend to use with aws_ssm lookup plugin
+ ansible.builtin.debug:
+ msg: "{{ lookup('amazon.aws.aws_ssm', 'hello') }}"
+'''
+
+RETURN = '''
+put_parameter:
+ description: Add one or more parameters to the system.
+ returned: success
+ type: dict
+delete_parameter:
+ description: Delete a parameter from the system.
+ returned: success
+ type: dict
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+try:
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+
+def update_parameter(client, module, args):
+ changed = False
+ response = {}
+
+ try:
+ response = client.put_parameter(**args)
+ changed = True
+ except ClientError as e:
+ module.fail_json_aws(e, msg="setting parameter")
+
+ return changed, response
+
+
+def create_update_parameter(client, module):
+ changed = False
+ existing_parameter = None
+ response = {}
+
+ args = dict(
+ Name=module.params.get('name'),
+ Value=module.params.get('value'),
+ Type=module.params.get('string_type')
+ )
+
+ if (module.params.get('overwrite_value') in ("always", "changed")):
+ args.update(Overwrite=True)
+ else:
+ args.update(Overwrite=False)
+
+ if module.params.get('description'):
+ args.update(Description=module.params.get('description'))
+
+ if module.params.get('string_type') == 'SecureString':
+ args.update(KeyId=module.params.get('key_id'))
+
+ try:
+ existing_parameter = client.get_parameter(Name=args['Name'], WithDecryption=True)
+ except Exception:
+ pass
+
+ if existing_parameter:
+ if (module.params.get('overwrite_value') == 'always'):
+
+ (changed, response) = update_parameter(client, module, args)
+
+ elif (module.params.get('overwrite_value') == 'changed'):
+ if existing_parameter['Parameter']['Type'] != args['Type']:
+ (changed, response) = update_parameter(client, module, args)
+
+ if existing_parameter['Parameter']['Value'] != args['Value']:
+ (changed, response) = update_parameter(client, module, args)
+
+ if args.get('Description'):
+ # Description field not available from get_parameter function so get it from describe_parameters
+ describe_existing_parameter = None
+ try:
+ describe_existing_parameter_paginator = client.get_paginator('describe_parameters')
+ describe_existing_parameter = describe_existing_parameter_paginator.paginate(
+ Filters=[{"Key": "Name", "Values": [args['Name']]}]).build_full_result()
+
+ except ClientError as e:
+ module.fail_json_aws(e, msg="getting description value")
+
+ if describe_existing_parameter['Parameters'][0]['Description'] != args['Description']:
+ (changed, response) = update_parameter(client, module, args)
+ else:
+ (changed, response) = update_parameter(client, module, args)
+
+ return changed, response
+
+
+def delete_parameter(client, module):
+ response = {}
+
+ try:
+ response = client.delete_parameter(
+ Name=module.params.get('name')
+ )
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ParameterNotFound':
+ return False, {}
+ module.fail_json_aws(e, msg="deleting parameter")
+
+ return True, response
+
+
+def setup_client(module):
+ connection = module.client('ssm')
+ return connection
+
+
+def setup_module_object():
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(),
+ value=dict(required=False, no_log=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ string_type=dict(default='String', choices=['String', 'StringList', 'SecureString']),
+ decryption=dict(default=True, type='bool'),
+ key_id=dict(default="alias/aws/ssm"),
+ overwrite_value=dict(default='changed', choices=['never', 'changed', 'always']),
+ )
+
+ return AnsibleAWSModule(
+ argument_spec=argument_spec,
+ )
+
+
+def main():
+ module = setup_module_object()
+ state = module.params.get('state')
+ client = setup_client(module)
+
+ invocations = {
+ "present": create_update_parameter,
+ "absent": delete_parameter,
+ }
+ (changed, response) = invocations[state](client, module)
+ module.exit_json(changed=changed, response=response)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_step_functions_state_machine.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_step_functions_state_machine.py
new file mode 100644
index 00000000..5ab13baa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_step_functions_state_machine.py
@@ -0,0 +1,232 @@
+#!/usr/bin/python
+# Copyright (c) 2019, Tom De Keyser (@tdekeyser)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_step_functions_state_machine
+version_added: 1.0.0
+
+short_description: Manage AWS Step Functions state machines
+
+
+description:
+ - Create, update and delete state machines in AWS Step Functions.
+ - Calling the module in C(state=present) for an existing AWS Step Functions state machine
+ will attempt to update the state machine definition, IAM Role, or tags with the provided data.
+
+options:
+ name:
+ description:
+ - Name of the state machine
+ required: true
+ type: str
+ definition:
+ description:
+ - The Amazon States Language definition of the state machine. See
+ U(https://docs.aws.amazon.com/step-functions/latest/dg/concepts-amazon-states-language.html) for more
+ information on the Amazon States Language.
+ - "This parameter is required when C(state=present)."
+ type: json
+ role_arn:
+ description:
+ - The ARN of the IAM Role that will be used by the state machine for its executions.
+ - "This parameter is required when C(state=present)."
+ type: str
+ state:
+ description:
+ - Desired state for the state machine
+ default: present
+ choices: [ present, absent ]
+ type: str
+ tags:
+ description:
+ - A hash/dictionary of tags to add to the new state machine or to add/remove from an existing one.
+ type: dict
+ purge_tags:
+ description:
+ - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter.
+ If the I(tags) parameter is not set then tags will not be modified.
+ default: yes
+ type: bool
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+
+author:
+ - Tom De Keyser (@tdekeyser)
+'''
+
+EXAMPLES = '''
+# Create a new AWS Step Functions state machine
+- name: Setup HelloWorld state machine
+ community.aws.aws_step_functions_state_machine:
+ name: "HelloWorldStateMachine"
+ definition: "{{ lookup('file','state_machine.json') }}"
+ role_arn: arn:aws:iam::987654321012:role/service-role/invokeLambdaStepFunctionsRole
+ tags:
+ project: helloWorld
+
+# Update an existing state machine
+- name: Change IAM Role and tags of HelloWorld state machine
+ community.aws.aws_step_functions_state_machine:
+ name: HelloWorldStateMachine
+ definition: "{{ lookup('file','state_machine.json') }}"
+ role_arn: arn:aws:iam::987654321012:role/service-role/anotherStepFunctionsRole
+ tags:
+ otherTag: aDifferentTag
+
+# Remove the AWS Step Functions state machine
+- name: Delete HelloWorld state machine
+ community.aws.aws_step_functions_state_machine:
+ name: HelloWorldStateMachine
+ state: absent
+'''
+
+RETURN = '''
+state_machine_arn:
+ description: ARN of the AWS Step Functions state machine
+ type: str
+ returned: always
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_tag_list,
+ AWSRetry,
+ compare_aws_tags,
+ boto3_tag_list_to_ansible_dict,
+ )
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def manage_state_machine(state, sfn_client, module):
+ state_machine_arn = get_state_machine_arn(sfn_client, module)
+
+ if state == 'present':
+ if state_machine_arn is None:
+ create(sfn_client, module)
+ else:
+ update(state_machine_arn, sfn_client, module)
+ elif state == 'absent':
+ if state_machine_arn is not None:
+ remove(state_machine_arn, sfn_client, module)
+
+ check_mode(module, msg='State is up-to-date.')
+ module.exit_json(changed=False)
+
+
+def create(sfn_client, module):
+ check_mode(module, msg='State machine would be created.', changed=True)
+
+ tags = module.params.get('tags')
+ sfn_tags = ansible_dict_to_boto3_tag_list(tags, tag_name_key_name='key', tag_value_key_name='value') if tags else []
+
+ state_machine = sfn_client.create_state_machine(
+ name=module.params.get('name'),
+ definition=module.params.get('definition'),
+ roleArn=module.params.get('role_arn'),
+ tags=sfn_tags
+ )
+ module.exit_json(changed=True, state_machine_arn=state_machine.get('stateMachineArn'))
+
+
+def remove(state_machine_arn, sfn_client, module):
+ check_mode(module, msg='State machine would be deleted: {0}'.format(state_machine_arn), changed=True)
+
+ sfn_client.delete_state_machine(stateMachineArn=state_machine_arn)
+ module.exit_json(changed=True, state_machine_arn=state_machine_arn)
+
+
+def update(state_machine_arn, sfn_client, module):
+ tags_to_add, tags_to_remove = compare_tags(state_machine_arn, sfn_client, module)
+
+ if params_changed(state_machine_arn, sfn_client, module) or tags_to_add or tags_to_remove:
+ check_mode(module, msg='State machine would be updated: {0}'.format(state_machine_arn), changed=True)
+
+ sfn_client.update_state_machine(
+ stateMachineArn=state_machine_arn,
+ definition=module.params.get('definition'),
+ roleArn=module.params.get('role_arn')
+ )
+ sfn_client.untag_resource(
+ resourceArn=state_machine_arn,
+ tagKeys=tags_to_remove
+ )
+ sfn_client.tag_resource(
+ resourceArn=state_machine_arn,
+ tags=ansible_dict_to_boto3_tag_list(tags_to_add, tag_name_key_name='key', tag_value_key_name='value')
+ )
+
+ module.exit_json(changed=True, state_machine_arn=state_machine_arn)
+
+
+def compare_tags(state_machine_arn, sfn_client, module):
+ new_tags = module.params.get('tags')
+ current_tags = sfn_client.list_tags_for_resource(resourceArn=state_machine_arn).get('tags')
+ return compare_aws_tags(boto3_tag_list_to_ansible_dict(current_tags), new_tags if new_tags else {}, module.params.get('purge_tags'))
+
+
+def params_changed(state_machine_arn, sfn_client, module):
+ """
+ Check whether the state machine definition or IAM Role ARN is different
+ from the existing state machine parameters.
+ """
+ current = sfn_client.describe_state_machine(stateMachineArn=state_machine_arn)
+ return current.get('definition') != module.params.get('definition') or current.get('roleArn') != module.params.get('role_arn')
+
+
+def get_state_machine_arn(sfn_client, module):
+ """
+ Finds the state machine ARN based on the name parameter. Returns None if
+ there is no state machine with this name.
+ """
+ target_name = module.params.get('name')
+ all_state_machines = sfn_client.list_state_machines(aws_retry=True).get('stateMachines')
+
+ for state_machine in all_state_machines:
+ if state_machine.get('name') == target_name:
+ return state_machine.get('stateMachineArn')
+
+
+def check_mode(module, msg='', changed=False):
+ if module.check_mode:
+ module.exit_json(changed=changed, output=msg)
+
+
+def main():
+ module_args = dict(
+ name=dict(type='str', required=True),
+ definition=dict(type='json'),
+ role_arn=dict(type='str'),
+ state=dict(choices=['present', 'absent'], default='present'),
+ tags=dict(default=None, type='dict'),
+ purge_tags=dict(default=True, type='bool'),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=module_args,
+ required_if=[('state', 'present', ['role_arn']), ('state', 'present', ['definition'])],
+ supports_check_mode=True
+ )
+
+ sfn_client = module.client('stepfunctions', retry_decorator=AWSRetry.jittered_backoff(retries=5))
+ state = module.params.get('state')
+
+ try:
+ manage_state_machine(state, sfn_client, module)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to manage state machine')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_step_functions_state_machine_execution.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_step_functions_state_machine_execution.py
new file mode 100644
index 00000000..f9e1d3fa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_step_functions_state_machine_execution.py
@@ -0,0 +1,193 @@
+#!/usr/bin/python
+# Copyright (c) 2019, Prasad Katti (@prasadkatti)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_step_functions_state_machine_execution
+version_added: 1.0.0
+
+short_description: Start or stop execution of an AWS Step Functions state machine.
+
+
+description:
+ - Start or stop execution of a state machine in AWS Step Functions.
+
+options:
+ action:
+ description: Desired action (start or stop) for a state machine execution.
+ default: start
+ choices: [ start, stop ]
+ type: str
+ name:
+ description: Name of the execution.
+ type: str
+ execution_input:
+ description: The JSON input data for the execution.
+ type: json
+ default: {}
+ state_machine_arn:
+ description: The ARN of the state machine that will be executed.
+ type: str
+ execution_arn:
+ description: The ARN of the execution you wish to stop.
+ type: str
+ cause:
+ description: A detailed explanation of the cause for stopping the execution.
+ type: str
+ default: ''
+ error:
+ description: The error code of the failure to pass in when stopping the execution.
+ type: str
+ default: ''
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+
+author:
+ - Prasad Katti (@prasadkatti)
+'''
+
+EXAMPLES = '''
+- name: Start an execution of a state machine
+ community.aws.aws_step_functions_state_machine_execution:
+ name: an_execution_name
+ execution_input: '{ "IsHelloWorldExample": true }'
+ state_machine_arn: "arn:aws:states:us-west-2:682285639423:stateMachine:HelloWorldStateMachine"
+
+- name: Stop an execution of a state machine
+ community.aws.aws_step_functions_state_machine_execution:
+ action: stop
+ execution_arn: "arn:aws:states:us-west-2:682285639423:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8"
+ cause: "cause of task failure"
+ error: "error code of the failure"
+'''
+
+RETURN = '''
+execution_arn:
+ description: ARN of the AWS Step Functions state machine execution.
+ type: str
+ returned: if action == start and changed == True
+ sample: "arn:aws:states:us-west-2:682285639423:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8"
+start_date:
+ description: The date the execution is started.
+ type: str
+ returned: if action == start and changed == True
+ sample: "2019-11-02T22:39:49.071000-07:00"
+stop_date:
+ description: The date the execution is stopped.
+ type: str
+ returned: if action == stop
+ sample: "2019-11-02T22:39:49.071000-07:00"
+'''
+
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def start_execution(module, sfn_client):
+ '''
+ start_execution uses execution name to determine if a previous execution already exists.
+ If an execution by the provided name exists, call client.start_execution will not be called.
+ '''
+
+ state_machine_arn = module.params.get('state_machine_arn')
+ name = module.params.get('name')
+ execution_input = module.params.get('execution_input')
+
+ try:
+ # list_executions is eventually consistent
+ page_iterators = sfn_client.get_paginator('list_executions').paginate(stateMachineArn=state_machine_arn)
+
+ for execution in page_iterators.build_full_result()['executions']:
+ if name == execution['name']:
+ check_mode(module, msg='State machine execution already exists.', changed=False)
+ module.exit_json(changed=False)
+
+ check_mode(module, msg='State machine execution would be started.', changed=True)
+ res_execution = sfn_client.start_execution(
+ stateMachineArn=state_machine_arn,
+ name=name,
+ input=execution_input
+ )
+ except (ClientError, BotoCoreError) as e:
+ if e.response['Error']['Code'] == 'ExecutionAlreadyExists':
+ # this will never be executed anymore
+ module.exit_json(changed=False)
+ module.fail_json_aws(e, msg="Failed to start execution.")
+
+ module.exit_json(changed=True, **camel_dict_to_snake_dict(res_execution))
+
+
+def stop_execution(module, sfn_client):
+
+ cause = module.params.get('cause')
+ error = module.params.get('error')
+ execution_arn = module.params.get('execution_arn')
+
+ try:
+ # describe_execution is eventually consistent
+ execution_status = sfn_client.describe_execution(executionArn=execution_arn)['status']
+ if execution_status != 'RUNNING':
+ check_mode(module, msg='State machine execution is not running.', changed=False)
+ module.exit_json(changed=False)
+
+ check_mode(module, msg='State machine execution would be stopped.', changed=True)
+ res = sfn_client.stop_execution(
+ executionArn=execution_arn,
+ cause=cause,
+ error=error
+ )
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to stop execution.")
+
+ module.exit_json(changed=True, **camel_dict_to_snake_dict(res))
+
+
+def check_mode(module, msg='', changed=False):
+ if module.check_mode:
+ module.exit_json(changed=changed, output=msg)
+
+
+def main():
+ module_args = dict(
+ action=dict(choices=['start', 'stop'], default='start'),
+ name=dict(type='str'),
+ execution_input=dict(type='json', default={}),
+ state_machine_arn=dict(type='str'),
+ cause=dict(type='str', default=''),
+ error=dict(type='str', default=''),
+ execution_arn=dict(type='str')
+ )
+ module = AnsibleAWSModule(
+ argument_spec=module_args,
+ required_if=[('action', 'start', ['name', 'state_machine_arn']),
+ ('action', 'stop', ['execution_arn']),
+ ],
+ supports_check_mode=True
+ )
+
+ sfn_client = module.client('stepfunctions')
+
+ action = module.params.get('action')
+ if action == "start":
+ start_execution(module, sfn_client)
+ else:
+ stop_execution(module, sfn_client)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_waf_condition.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_waf_condition.py
new file mode 100644
index 00000000..006caaad
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_waf_condition.py
@@ -0,0 +1,733 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Will Thames
+# Copyright (c) 2015 Mike Mochan
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: aws_waf_condition
+short_description: Create and delete WAF Conditions
+version_added: 1.0.0
+description:
+ - Read the AWS documentation for WAF
+ U(https://aws.amazon.com/documentation/waf/)
+
+author:
+ - Will Thames (@willthames)
+ - Mike Mochan (@mmochan)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+options:
+ name:
+ description: Name of the Web Application Firewall condition to manage.
+ required: true
+ type: str
+ type:
+ description: The type of matching to perform.
+ choices:
+ - byte
+ - geo
+ - ip
+ - regex
+ - size
+ - sql
+ - xss
+ type: str
+ required: true
+ filters:
+ description:
+ - A list of the filters against which to match.
+ - For I(type=byte), valid keys are I(field_to_match), I(position), I(header), I(transformation) and I(target_string).
+ - For I(type=geo), the only valid key is I(country).
+ - For I(type=ip), the only valid key is I(ip_address).
+ - For I(type=regex), valid keys are I(field_to_match), I(transformation) and I(regex_pattern).
+ - For I(type=size), valid keys are I(field_to_match), I(transformation), I(comparison) and I(size).
+ - For I(type=sql), valid keys are I(field_to_match) and I(transformation).
+ - For I(type=xss), valid keys are I(field_to_match) and I(transformation).
+ - Required when I(state=present).
+ type: list
+ elements: dict
+ suboptions:
+ field_to_match:
+ description:
+ - The field upon which to perform the match.
+ - Valid when I(type=byte), I(type=regex), I(type=sql) or I(type=xss).
+ type: str
+ choices: ['uri', 'query_string', 'header', 'method', 'body']
+ position:
+ description:
+ - Where in the field the match needs to occur.
+ - Only valid when I(type=byte).
+ type: str
+ choices: ['exactly', 'starts_with', 'ends_with', 'contains', 'contains_word']
+ header:
+ description:
+ - Which specific header should be matched.
+ - Required when I(field_to_match=header).
+ - Valid when I(type=byte).
+ type: str
+ transformation:
+ description:
+ - A transform to apply on the field prior to performing the match.
+ - Valid when I(type=byte), I(type=regex), I(type=sql) or I(type=xss).
+ type: str
+ choices: ['none', 'compress_white_space', 'html_entity_decode', 'lowercase', 'cmd_line', 'url_decode']
+ country:
+ description:
+ - Value of geo constraint (typically a two letter country code).
+ - The only valid key when I(type=geo).
+ type: str
+ ip_address:
+ description:
+ - An IP Address or CIDR to match.
+ - The only valid key when I(type=ip).
+ type: str
+ regex_pattern:
+ description:
+ - A dict describing the regular expressions used to perform the match.
+ - Only valid when I(type=regex).
+ type: dict
+ suboptions:
+ name:
+ description: A name to describe the set of patterns.
+ type: str
+ regex_strings:
+ description: A list of regular expressions to match.
+ type: list
+ elements: str
+ comparison:
+ description:
+ - What type of comparison to perform.
+ - Only valid key when I(type=size).
+ type: str
+ choices: ['EQ', 'NE', 'LE', 'LT', 'GE', 'GT']
+ size:
+ description:
+ - The size of the field (in bytes).
+ - Only valid key when I(type=size).
+ type: int
+ target_string:
+ description:
+ - The string to search for.
+ - May be up to 50 bytes.
+ - Valid when I(type=byte).
+ type: str
+ purge_filters:
+ description:
+ - Whether to remove existing filters from a condition if not passed in I(filters).
+ default: false
+ type: bool
+ waf_regional:
+ description: Whether to use waf-regional module.
+ default: false
+ required: no
+ type: bool
+ state:
+ description: Whether the condition should be C(present) or C(absent).
+ choices:
+ - present
+ - absent
+ default: present
+ type: str
+
+'''
+
+EXAMPLES = r'''
+ - name: create WAF byte condition
+ community.aws.aws_waf_condition:
+ name: my_byte_condition
+ filters:
+ - field_to_match: header
+ position: STARTS_WITH
+ target_string: Hello
+ header: Content-type
+ type: byte
+
+ - name: create WAF geo condition
+ community.aws.aws_waf_condition:
+ name: my_geo_condition
+ filters:
+ - country: US
+ - country: AU
+ - country: AT
+ type: geo
+
+ - name: create IP address condition
+ community.aws.aws_waf_condition:
+ name: "{{ resource_prefix }}_ip_condition"
+ filters:
+ - ip_address: "10.0.0.0/8"
+ - ip_address: "192.168.0.0/24"
+ type: ip
+
+ - name: create WAF regex condition
+ community.aws.aws_waf_condition:
+ name: my_regex_condition
+ filters:
+ - field_to_match: query_string
+ regex_pattern:
+ name: greetings
+ regex_strings:
+ - '[hH]ello'
+ - '^Hi there'
+ - '.*Good Day to You'
+ type: regex
+
+ - name: create WAF size condition
+ community.aws.aws_waf_condition:
+ name: my_size_condition
+ filters:
+ - field_to_match: query_string
+ size: 300
+ comparison: GT
+ type: size
+
+ - name: create WAF sql injection condition
+ community.aws.aws_waf_condition:
+ name: my_sql_condition
+ filters:
+ - field_to_match: query_string
+ transformation: url_decode
+ type: sql
+
+ - name: create WAF xss condition
+ community.aws.aws_waf_condition:
+ name: my_xss_condition
+ filters:
+ - field_to_match: query_string
+ transformation: url_decode
+ type: xss
+
+'''
+
+RETURN = r'''
+condition:
+ description: Condition returned by operation.
+ returned: always
+ type: complex
+ contains:
+ condition_id:
+ description: Type-agnostic ID for the condition.
+ returned: when state is present
+ type: str
+ sample: dd74b1ff-8c06-4a4f-897a-6b23605de413
+ byte_match_set_id:
+ description: ID for byte match set.
+ returned: always
+ type: str
+ sample: c4882c96-837b-44a2-a762-4ea87dbf812b
+ byte_match_tuples:
+ description: List of byte match tuples.
+ returned: always
+ type: complex
+ contains:
+ field_to_match:
+ description: Field to match.
+ returned: always
+ type: complex
+ contains:
+ data:
+ description: Which specific header (if type is header).
+ type: str
+ sample: content-type
+ type:
+ description: Type of field
+ type: str
+ sample: HEADER
+ positional_constraint:
+ description: Position in the field to match.
+ type: str
+ sample: STARTS_WITH
+ target_string:
+ description: String to look for.
+ type: str
+ sample: Hello
+ text_transformation:
+ description: Transformation to apply to the field before matching.
+ type: str
+ sample: NONE
+ geo_match_constraints:
+ description: List of geographical constraints.
+ returned: when type is geo and state is present
+ type: complex
+ contains:
+ type:
+ description: Type of geo constraint.
+ type: str
+ sample: Country
+ value:
+ description: Value of geo constraint (typically a country code).
+ type: str
+ sample: AT
+ geo_match_set_id:
+ description: ID of the geo match set.
+ returned: when type is geo and state is present
+ type: str
+ sample: dd74b1ff-8c06-4a4f-897a-6b23605de413
+ ip_set_descriptors:
+ description: list of IP address filters
+ returned: when type is ip and state is present
+ type: complex
+ contains:
+ type:
+ description: Type of IP address (IPV4 or IPV6).
+ returned: always
+ type: str
+ sample: IPV4
+ value:
+ description: IP address.
+ returned: always
+ type: str
+ sample: 10.0.0.0/8
+ ip_set_id:
+ description: ID of condition.
+ returned: when type is ip and state is present
+ type: str
+ sample: 78ad334a-3535-4036-85e6-8e11e745217b
+ name:
+ description: Name of condition.
+ returned: when state is present
+ type: str
+ sample: my_waf_condition
+ regex_match_set_id:
+ description: ID of the regex match set.
+ returned: when type is regex and state is present
+ type: str
+ sample: 5ea3f6a8-3cd3-488b-b637-17b79ce7089c
+ regex_match_tuples:
+ description: List of regex matches.
+ returned: when type is regex and state is present
+ type: complex
+ contains:
+ field_to_match:
+ description: Field on which the regex match is applied.
+ type: complex
+ contains:
+ type:
+ description: The field name.
+ returned: when type is regex and state is present
+ type: str
+ sample: QUERY_STRING
+ regex_pattern_set_id:
+ description: ID of the regex pattern.
+ type: str
+ sample: 6fdf7f2d-9091-445c-aef2-98f3c051ac9e
+ text_transformation:
+ description: transformation applied to the text before matching
+ type: str
+ sample: NONE
+ size_constraint_set_id:
+ description: ID of the size constraint set.
+ returned: when type is size and state is present
+ type: str
+ sample: de84b4b3-578b-447e-a9a0-0db35c995656
+ size_constraints:
+ description: List of size constraints to apply.
+ returned: when type is size and state is present
+ type: complex
+ contains:
+ comparison_operator:
+ description: Comparison operator to apply.
+ type: str
+ sample: GT
+ field_to_match:
+ description: Field on which the size constraint is applied.
+ type: complex
+ contains:
+ type:
+ description: Field name.
+ type: str
+ sample: QUERY_STRING
+ size:
+ description: Size to compare against the field.
+ type: int
+ sample: 300
+ text_transformation:
+ description: Transformation applied to the text before matching.
+ type: str
+ sample: NONE
+ sql_injection_match_set_id:
+ description: ID of the SQL injection match set.
+ returned: when type is sql and state is present
+ type: str
+ sample: de84b4b3-578b-447e-a9a0-0db35c995656
+ sql_injection_match_tuples:
+ description: List of SQL injection match sets.
+ returned: when type is sql and state is present
+ type: complex
+ contains:
+ field_to_match:
+ description: Field on which the SQL injection match is applied.
+ type: complex
+ contains:
+ type:
+ description: Field name.
+ type: str
+ sample: QUERY_STRING
+ text_transformation:
+ description: Transformation applied to the text before matching.
+ type: str
+ sample: URL_DECODE
+ xss_match_set_id:
+ description: ID of the XSS match set.
+ returned: when type is xss and state is present
+ type: str
+ sample: de84b4b3-578b-447e-a9a0-0db35c995656
+ xss_match_tuples:
+ description: List of XSS match sets.
+ returned: when type is xss and state is present
+ type: complex
+ contains:
+ field_to_match:
+ description: Field on which the XSS match is applied.
+ type: complex
+ contains:
+ type:
+ description: Field name
+ type: str
+ sample: QUERY_STRING
+ text_transformation:
+ description: transformation applied to the text before matching.
+ type: str
+ sample: URL_DECODE
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, compare_policies
+from ansible_collections.amazon.aws.plugins.module_utils.waf import run_func_with_change_token_backoff, MATCH_LOOKUP
+from ansible_collections.amazon.aws.plugins.module_utils.waf import get_rule_with_backoff, list_rules_with_backoff, list_regional_rules_with_backoff
+
+
+class Condition(object):
+
+ def __init__(self, client, module):
+ self.client = client
+ self.module = module
+ self.type = module.params['type']
+ self.method_suffix = MATCH_LOOKUP[self.type]['method']
+ self.conditionset = MATCH_LOOKUP[self.type]['conditionset']
+ self.conditionsets = MATCH_LOOKUP[self.type]['conditionset'] + 's'
+ self.conditionsetid = MATCH_LOOKUP[self.type]['conditionset'] + 'Id'
+ self.conditiontuple = MATCH_LOOKUP[self.type]['conditiontuple']
+ self.conditiontuples = MATCH_LOOKUP[self.type]['conditiontuple'] + 's'
+ self.conditiontype = MATCH_LOOKUP[self.type]['type']
+
+ def format_for_update(self, condition_set_id):
+ # Prep kwargs
+ kwargs = dict()
+ kwargs['Updates'] = list()
+
+ for filtr in self.module.params.get('filters'):
+ # Only for ip_set
+ if self.type == 'ip':
+ # there might be a better way of detecting an IPv6 address
+ if ':' in filtr.get('ip_address'):
+ ip_type = 'IPV6'
+ else:
+ ip_type = 'IPV4'
+ condition_insert = {'Type': ip_type, 'Value': filtr.get('ip_address')}
+
+ # Specific for geo_match_set
+ if self.type == 'geo':
+ condition_insert = dict(Type='Country', Value=filtr.get('country'))
+
+ # Common For everything but ip_set and geo_match_set
+ if self.type not in ('ip', 'geo'):
+
+ condition_insert = dict(FieldToMatch=dict(Type=filtr.get('field_to_match').upper()),
+ TextTransformation=filtr.get('transformation', 'none').upper())
+
+ if filtr.get('field_to_match').upper() == "HEADER":
+ if filtr.get('header'):
+ condition_insert['FieldToMatch']['Data'] = filtr.get('header').lower()
+ else:
+ self.module.fail_json(msg=str("DATA required when HEADER requested"))
+
+ # Specific for byte_match_set
+ if self.type == 'byte':
+ condition_insert['TargetString'] = filtr.get('target_string')
+ condition_insert['PositionalConstraint'] = filtr.get('position')
+
+ # Specific for size_constraint_set
+ if self.type == 'size':
+ condition_insert['ComparisonOperator'] = filtr.get('comparison')
+ condition_insert['Size'] = filtr.get('size')
+
+ # Specific for regex_match_set
+ if self.type == 'regex':
+ condition_insert['RegexPatternSetId'] = self.ensure_regex_pattern_present(filtr.get('regex_pattern'))['RegexPatternSetId']
+
+ kwargs['Updates'].append({'Action': 'INSERT', self.conditiontuple: condition_insert})
+
+ kwargs[self.conditionsetid] = condition_set_id
+ return kwargs
+
+ def format_for_deletion(self, condition):
+ return {'Updates': [{'Action': 'DELETE', self.conditiontuple: current_condition_tuple}
+ for current_condition_tuple in condition[self.conditiontuples]],
+ self.conditionsetid: condition[self.conditionsetid]}
+
+ @AWSRetry.exponential_backoff()
+ def list_regex_patterns_with_backoff(self, **params):
+ return self.client.list_regex_pattern_sets(**params)
+
+ @AWSRetry.exponential_backoff()
+ def get_regex_pattern_set_with_backoff(self, regex_pattern_set_id):
+ return self.client.get_regex_pattern_set(RegexPatternSetId=regex_pattern_set_id)
+
+ def list_regex_patterns(self):
+ # at time of writing(2017-11-20) no regex pattern paginator exists
+ regex_patterns = []
+ params = {}
+ while True:
+ try:
+ response = self.list_regex_patterns_with_backoff(**params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Could not list regex patterns')
+ regex_patterns.extend(response['RegexPatternSets'])
+ if 'NextMarker' in response:
+ params['NextMarker'] = response['NextMarker']
+ else:
+ break
+ return regex_patterns
+
+ def get_regex_pattern_by_name(self, name):
+ existing_regex_patterns = self.list_regex_patterns()
+ regex_lookup = dict((item['Name'], item['RegexPatternSetId']) for item in existing_regex_patterns)
+ if name in regex_lookup:
+ return self.get_regex_pattern_set_with_backoff(regex_lookup[name])['RegexPatternSet']
+ else:
+ return None
+
+ def ensure_regex_pattern_present(self, regex_pattern):
+ name = regex_pattern['name']
+
+ pattern_set = self.get_regex_pattern_by_name(name)
+ if not pattern_set:
+ pattern_set = run_func_with_change_token_backoff(self.client, self.module, {'Name': name},
+ self.client.create_regex_pattern_set)['RegexPatternSet']
+ missing = set(regex_pattern['regex_strings']) - set(pattern_set['RegexPatternStrings'])
+ extra = set(pattern_set['RegexPatternStrings']) - set(regex_pattern['regex_strings'])
+ if not missing and not extra:
+ return pattern_set
+ updates = [{'Action': 'INSERT', 'RegexPatternString': pattern} for pattern in missing]
+ updates.extend([{'Action': 'DELETE', 'RegexPatternString': pattern} for pattern in extra])
+ run_func_with_change_token_backoff(self.client, self.module,
+ {'RegexPatternSetId': pattern_set['RegexPatternSetId'], 'Updates': updates},
+ self.client.update_regex_pattern_set, wait=True)
+ return self.get_regex_pattern_set_with_backoff(pattern_set['RegexPatternSetId'])['RegexPatternSet']
+
+ def delete_unused_regex_pattern(self, regex_pattern_set_id):
+ try:
+ regex_pattern_set = self.client.get_regex_pattern_set(RegexPatternSetId=regex_pattern_set_id)['RegexPatternSet']
+ updates = list()
+ for regex_pattern_string in regex_pattern_set['RegexPatternStrings']:
+ updates.append({'Action': 'DELETE', 'RegexPatternString': regex_pattern_string})
+ run_func_with_change_token_backoff(self.client, self.module,
+ {'RegexPatternSetId': regex_pattern_set_id, 'Updates': updates},
+ self.client.update_regex_pattern_set)
+
+ run_func_with_change_token_backoff(self.client, self.module,
+ {'RegexPatternSetId': regex_pattern_set_id},
+ self.client.delete_regex_pattern_set, wait=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ if e.response['Error']['Code'] == 'WAFNonexistentItemException':
+ return
+ self.module.fail_json_aws(e, msg='Could not delete regex pattern')
+
+ def get_condition_by_name(self, name):
+ all_conditions = [d for d in self.list_conditions() if d['Name'] == name]
+ if all_conditions:
+ return all_conditions[0][self.conditionsetid]
+
+ @AWSRetry.exponential_backoff()
+ def get_condition_by_id_with_backoff(self, condition_set_id):
+ params = dict()
+ params[self.conditionsetid] = condition_set_id
+ func = getattr(self.client, 'get_' + self.method_suffix)
+ return func(**params)[self.conditionset]
+
+ def get_condition_by_id(self, condition_set_id):
+ try:
+ return self.get_condition_by_id_with_backoff(condition_set_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Could not get condition')
+
+ def list_conditions(self):
+ method = 'list_' + self.method_suffix + 's'
+ try:
+ paginator = self.client.get_paginator(method)
+ func = paginator.paginate().build_full_result
+ except botocore.exceptions.OperationNotPageableError:
+ # list_geo_match_sets and list_regex_match_sets do not have a paginator
+ func = getattr(self.client, method)
+ try:
+ return func()[self.conditionsets]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Could not list %s conditions' % self.type)
+
+ def tidy_up_regex_patterns(self, regex_match_set):
+ all_regex_match_sets = self.list_conditions()
+ all_match_set_patterns = list()
+ for rms in all_regex_match_sets:
+ all_match_set_patterns.extend(conditiontuple['RegexPatternSetId']
+ for conditiontuple in self.get_condition_by_id(rms[self.conditionsetid])[self.conditiontuples])
+ for filtr in regex_match_set[self.conditiontuples]:
+ if filtr['RegexPatternSetId'] not in all_match_set_patterns:
+ self.delete_unused_regex_pattern(filtr['RegexPatternSetId'])
+
+ def find_condition_in_rules(self, condition_set_id):
+ rules_in_use = []
+ try:
+ if self.client.__class__.__name__ == 'WAF':
+ all_rules = list_rules_with_backoff(self.client)
+ elif self.client.__class__.__name__ == 'WAFRegional':
+ all_rules = list_regional_rules_with_backoff(self.client)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Could not list rules')
+ for rule in all_rules:
+ try:
+ rule_details = get_rule_with_backoff(self.client, rule['RuleId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Could not get rule details')
+ if condition_set_id in [predicate['DataId'] for predicate in rule_details['Predicates']]:
+ rules_in_use.append(rule_details['Name'])
+ return rules_in_use
+
+ def find_and_delete_condition(self, condition_set_id):
+ current_condition = self.get_condition_by_id(condition_set_id)
+ in_use_rules = self.find_condition_in_rules(condition_set_id)
+ if in_use_rules:
+ rulenames = ', '.join(in_use_rules)
+ self.module.fail_json(msg="Condition %s is in use by %s" % (current_condition['Name'], rulenames))
+ if current_condition[self.conditiontuples]:
+ # Filters are deleted using update with the DELETE action
+ func = getattr(self.client, 'update_' + self.method_suffix)
+ params = self.format_for_deletion(current_condition)
+ try:
+ # We do not need to wait for the conditiontuple delete because we wait later for the delete_* call
+ run_func_with_change_token_backoff(self.client, self.module, params, func)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Could not delete filters from condition')
+ func = getattr(self.client, 'delete_' + self.method_suffix)
+ params = dict()
+ params[self.conditionsetid] = condition_set_id
+ try:
+ run_func_with_change_token_backoff(self.client, self.module, params, func, wait=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Could not delete condition')
+ # tidy up regex patterns
+ if self.type == 'regex':
+ self.tidy_up_regex_patterns(current_condition)
+ return True, {}
+
+ def find_missing(self, update, current_condition):
+ missing = []
+ for desired in update['Updates']:
+ found = False
+ desired_condition = desired[self.conditiontuple]
+ current_conditions = current_condition[self.conditiontuples]
+ for condition in current_conditions:
+ if not compare_policies(condition, desired_condition):
+ found = True
+ if not found:
+ missing.append(desired)
+ return missing
+
+ def find_and_update_condition(self, condition_set_id):
+ current_condition = self.get_condition_by_id(condition_set_id)
+ update = self.format_for_update(condition_set_id)
+ missing = self.find_missing(update, current_condition)
+ if self.module.params.get('purge_filters'):
+ extra = [{'Action': 'DELETE', self.conditiontuple: current_tuple}
+ for current_tuple in current_condition[self.conditiontuples]
+ if current_tuple not in [desired[self.conditiontuple] for desired in update['Updates']]]
+ else:
+ extra = []
+ changed = bool(missing or extra)
+ if changed:
+ update['Updates'] = missing + extra
+ func = getattr(self.client, 'update_' + self.method_suffix)
+ try:
+ result = run_func_with_change_token_backoff(self.client, self.module, update, func, wait=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Could not update condition')
+ return changed, self.get_condition_by_id(condition_set_id)
+
+ def ensure_condition_present(self):
+ name = self.module.params['name']
+ condition_set_id = self.get_condition_by_name(name)
+ if condition_set_id:
+ return self.find_and_update_condition(condition_set_id)
+ else:
+ params = dict()
+ params['Name'] = name
+ func = getattr(self.client, 'create_' + self.method_suffix)
+ try:
+ condition = run_func_with_change_token_backoff(self.client, self.module, params, func)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Could not create condition')
+ return self.find_and_update_condition(condition[self.conditionset][self.conditionsetid])
+
+ def ensure_condition_absent(self):
+ condition_set_id = self.get_condition_by_name(self.module.params['name'])
+ if condition_set_id:
+ return self.find_and_delete_condition(condition_set_id)
+ return False, {}
+
+
+def main():
+ filters_subspec = dict(
+ country=dict(),
+ field_to_match=dict(choices=['uri', 'query_string', 'header', 'method', 'body']),
+ header=dict(),
+ transformation=dict(choices=['none', 'compress_white_space',
+ 'html_entity_decode', 'lowercase',
+ 'cmd_line', 'url_decode']),
+ position=dict(choices=['exactly', 'starts_with', 'ends_with',
+ 'contains', 'contains_word']),
+ comparison=dict(choices=['EQ', 'NE', 'LE', 'LT', 'GE', 'GT']),
+ target_string=dict(), # Bytes
+ size=dict(type='int'),
+ ip_address=dict(),
+ regex_pattern=dict(),
+ )
+ argument_spec = dict(
+ name=dict(required=True),
+ type=dict(required=True, choices=['byte', 'geo', 'ip', 'regex', 'size', 'sql', 'xss']),
+ filters=dict(type='list', elements='dict'),
+ purge_filters=dict(type='bool', default=False),
+ waf_regional=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[['state', 'present', ['filters']]])
+ state = module.params.get('state')
+
+ resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
+ client = module.client(resource)
+
+ condition = Condition(client, module)
+
+ if state == 'present':
+ (changed, results) = condition.ensure_condition_present()
+ # return a condition agnostic ID for use by aws_waf_rule
+ results['ConditionId'] = results[condition.conditionsetid]
+ else:
+ (changed, results) = condition.ensure_condition_absent()
+
+ module.exit_json(changed=changed, condition=camel_dict_to_snake_dict(results))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_waf_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_waf_facts.py
new file mode 100644
index 00000000..98840668
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_waf_facts.py
@@ -0,0 +1,146 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: aws_waf_info
+short_description: Retrieve information for WAF ACLs, Rule , Conditions and Filters.
+version_added: 1.0.0
+description:
+ - Retrieve information for WAF ACLs, Rule , Conditions and Filters.
+ - This module was called C(aws_waf_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+options:
+ name:
+ description:
+ - The name of a Web Application Firewall.
+ type: str
+ waf_regional:
+ description: Whether to use the waf-regional module.
+ default: false
+ required: no
+ type: bool
+
+author:
+ - Mike Mochan (@mmochan)
+ - Will Thames (@willthames)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: obtain all WAF information
+ community.aws.aws_waf_info:
+
+- name: obtain all information for a single WAF
+ community.aws.aws_waf_info:
+ name: test_waf
+
+- name: obtain all information for a single WAF Regional
+ community.aws.aws_waf_info:
+ name: test_waf
+ waf_regional: true
+'''
+
+RETURN = '''
+wafs:
+ description: The WAFs that match the passed arguments.
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: A friendly name or description of the WebACL.
+ returned: always
+ type: str
+ sample: test_waf
+ default_action:
+ description: The action to perform if none of the Rules contained in the WebACL match.
+ returned: always
+ type: int
+ sample: BLOCK
+ metric_name:
+ description: A friendly name or description for the metrics for this WebACL.
+ returned: always
+ type: str
+ sample: test_waf_metric
+ rules:
+ description: An array that contains the action for each Rule in a WebACL , the priority of the Rule.
+ returned: always
+ type: complex
+ contains:
+ action:
+ description: The action to perform if the Rule matches.
+ returned: always
+ type: str
+ sample: BLOCK
+ metric_name:
+ description: A friendly name or description for the metrics for this Rule.
+ returned: always
+ type: str
+ sample: ipblockrule
+ name:
+ description: A friendly name or description of the Rule.
+ returned: always
+ type: str
+ sample: ip_block_rule
+ predicates:
+ description: The Predicates list contains a Predicate for each
+ ByteMatchSet, IPSet, SizeConstraintSet, SqlInjectionMatchSet or XssMatchSet
+ object in a Rule.
+ returned: always
+ type: list
+ sample:
+ [
+ {
+ "byte_match_set_id": "47b822b5-abcd-1234-faaf-1234567890",
+ "byte_match_tuples": [
+ {
+ "field_to_match": {
+ "type": "QUERY_STRING"
+ },
+ "positional_constraint": "STARTS_WITH",
+ "target_string": "bobbins",
+ "text_transformation": "NONE"
+ }
+ ],
+ "name": "bobbins",
+ "negated": false,
+ "type": "ByteMatch"
+ }
+ ]
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.waf import list_web_acls, get_web_acl
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=False),
+ waf_regional=dict(type='bool', default=False)
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'aws_waf_facts':
+ module.deprecate("The 'aws_waf_facts' module has been renamed to 'aws_waf_info'", date='2021-12-01', collection_name='community.aws')
+
+ resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
+ client = module.client(resource)
+ web_acls = list_web_acls(client, module)
+ name = module.params['name']
+ if name:
+ web_acls = [web_acl for web_acl in web_acls if
+ web_acl['Name'] == name]
+ if not web_acls:
+ module.fail_json(msg="WAF named %s not found" % name)
+ module.exit_json(wafs=[get_web_acl(client, module, web_acl['WebACLId'])
+ for web_acl in web_acls])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_waf_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_waf_info.py
new file mode 100644
index 00000000..98840668
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_waf_info.py
@@ -0,0 +1,146 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: aws_waf_info
+short_description: Retrieve information for WAF ACLs, Rule , Conditions and Filters.
+version_added: 1.0.0
+description:
+ - Retrieve information for WAF ACLs, Rule , Conditions and Filters.
+ - This module was called C(aws_waf_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+options:
+ name:
+ description:
+ - The name of a Web Application Firewall.
+ type: str
+ waf_regional:
+ description: Whether to use the waf-regional module.
+ default: false
+ required: no
+ type: bool
+
+author:
+ - Mike Mochan (@mmochan)
+ - Will Thames (@willthames)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: obtain all WAF information
+ community.aws.aws_waf_info:
+
+- name: obtain all information for a single WAF
+ community.aws.aws_waf_info:
+ name: test_waf
+
+- name: obtain all information for a single WAF Regional
+ community.aws.aws_waf_info:
+ name: test_waf
+ waf_regional: true
+'''
+
+RETURN = '''
+wafs:
+ description: The WAFs that match the passed arguments.
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: A friendly name or description of the WebACL.
+ returned: always
+ type: str
+ sample: test_waf
+ default_action:
+ description: The action to perform if none of the Rules contained in the WebACL match.
+ returned: always
+ type: int
+ sample: BLOCK
+ metric_name:
+ description: A friendly name or description for the metrics for this WebACL.
+ returned: always
+ type: str
+ sample: test_waf_metric
+ rules:
+ description: An array that contains the action for each Rule in a WebACL , the priority of the Rule.
+ returned: always
+ type: complex
+ contains:
+ action:
+ description: The action to perform if the Rule matches.
+ returned: always
+ type: str
+ sample: BLOCK
+ metric_name:
+ description: A friendly name or description for the metrics for this Rule.
+ returned: always
+ type: str
+ sample: ipblockrule
+ name:
+ description: A friendly name or description of the Rule.
+ returned: always
+ type: str
+ sample: ip_block_rule
+ predicates:
+ description: The Predicates list contains a Predicate for each
+ ByteMatchSet, IPSet, SizeConstraintSet, SqlInjectionMatchSet or XssMatchSet
+ object in a Rule.
+ returned: always
+ type: list
+ sample:
+ [
+ {
+ "byte_match_set_id": "47b822b5-abcd-1234-faaf-1234567890",
+ "byte_match_tuples": [
+ {
+ "field_to_match": {
+ "type": "QUERY_STRING"
+ },
+ "positional_constraint": "STARTS_WITH",
+ "target_string": "bobbins",
+ "text_transformation": "NONE"
+ }
+ ],
+ "name": "bobbins",
+ "negated": false,
+ "type": "ByteMatch"
+ }
+ ]
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.waf import list_web_acls, get_web_acl
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=False),
+ waf_regional=dict(type='bool', default=False)
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'aws_waf_facts':
+ module.deprecate("The 'aws_waf_facts' module has been renamed to 'aws_waf_info'", date='2021-12-01', collection_name='community.aws')
+
+ resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
+ client = module.client(resource)
+ web_acls = list_web_acls(client, module)
+ name = module.params['name']
+ if name:
+ web_acls = [web_acl for web_acl in web_acls if
+ web_acl['Name'] == name]
+ if not web_acls:
+ module.fail_json(msg="WAF named %s not found" % name)
+ module.exit_json(wafs=[get_web_acl(client, module, web_acl['WebACLId'])
+ for web_acl in web_acls])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_waf_rule.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_waf_rule.py
new file mode 100644
index 00000000..ce28559b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_waf_rule.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Will Thames
+# Copyright (c) 2015 Mike Mochan
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: aws_waf_rule
+short_description: Create and delete WAF Rules
+version_added: 1.0.0
+description:
+ - Read the AWS documentation for WAF
+ U(https://aws.amazon.com/documentation/waf/).
+
+author:
+ - Mike Mochan (@mmochan)
+ - Will Thames (@willthames)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+options:
+ name:
+ description: Name of the Web Application Firewall rule.
+ required: yes
+ type: str
+ metric_name:
+ description:
+ - A friendly name or description for the metrics for the rule.
+ - The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain whitespace.
+ - You can't change I(metric_name) after you create the rule.
+ - Defaults to the same as I(name) with disallowed characters removed.
+ type: str
+ state:
+ description: Whether the rule should be present or absent.
+ choices:
+ - present
+ - absent
+ default: present
+ type: str
+ conditions:
+ description: >
+ List of conditions used in the rule. M(community.aws.aws_waf_condition) can be used to
+ create new conditions.
+ type: list
+ elements: dict
+ suboptions:
+ type:
+ required: true
+ type: str
+ choices: ['byte','geo','ip','size','sql','xss']
+ description: The type of rule to match.
+ negated:
+ required: true
+ type: bool
+ description: Whether the condition should be negated.
+ condition:
+ required: true
+ type: str
+ description: The name of the condition. The condition must already exist.
+ purge_conditions:
+ description:
+ - Whether or not to remove conditions that are not passed when updating `conditions`.
+ default: false
+ type: bool
+ waf_regional:
+ description: Whether to use waf-regional module.
+ default: false
+ required: false
+ type: bool
+'''
+
+EXAMPLES = r'''
+ - name: create WAF rule
+ community.aws.aws_waf_rule:
+ name: my_waf_rule
+ conditions:
+ - name: my_regex_condition
+ type: regex
+ negated: no
+ - name: my_geo_condition
+ type: geo
+ negated: no
+ - name: my_byte_condition
+ type: byte
+ negated: yes
+
+ - name: remove WAF rule
+ community.aws.aws_waf_rule:
+ name: "my_waf_rule"
+ state: absent
+'''
+
+RETURN = r'''
+rule:
+ description: WAF rule contents
+ returned: always
+ type: complex
+ contains:
+ metric_name:
+ description: Metric name for the rule.
+ returned: always
+ type: str
+ sample: ansibletest1234rule
+ name:
+ description: Friendly name for the rule.
+ returned: always
+ type: str
+ sample: ansible-test-1234_rule
+ predicates:
+ description: List of conditions used in the rule.
+ returned: always
+ type: complex
+ contains:
+ data_id:
+ description: ID of the condition.
+ returned: always
+ type: str
+ sample: 8251acdb-526c-42a8-92bc-d3d13e584166
+ negated:
+ description: Whether the sense of the condition is negated.
+ returned: always
+ type: bool
+ sample: false
+ type:
+ description: type of the condition.
+ returned: always
+ type: str
+ sample: ByteMatch
+ rule_id:
+ description: ID of the WAF rule.
+ returned: always
+ type: str
+ sample: 15de0cbc-9204-4e1f-90e6-69b2f415c261
+'''
+
+import re
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.waf import (
+ MATCH_LOOKUP,
+ list_regional_rules_with_backoff,
+ list_rules_with_backoff,
+ run_func_with_change_token_backoff,
+ get_web_acl_with_backoff,
+ list_web_acls_with_backoff,
+ list_regional_web_acls_with_backoff,
+)
+
+
+def get_rule_by_name(client, module, name):
+ rules = [d['RuleId'] for d in list_rules(client, module) if d['Name'] == name]
+ if rules:
+ return rules[0]
+
+
+def get_rule(client, module, rule_id):
+ try:
+ return client.get_rule(RuleId=rule_id)['Rule']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not get WAF rule')
+
+
+def list_rules(client, module):
+ if client.__class__.__name__ == 'WAF':
+ try:
+ return list_rules_with_backoff(client)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not list WAF rules')
+ elif client.__class__.__name__ == 'WAFRegional':
+ try:
+ return list_regional_rules_with_backoff(client)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not list WAF Regional rules')
+
+
+def list_regional_rules(client, module):
+ try:
+ return list_regional_rules_with_backoff(client)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not list WAF rules')
+
+
+def find_and_update_rule(client, module, rule_id):
+ rule = get_rule(client, module, rule_id)
+ rule_id = rule['RuleId']
+
+ existing_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP)
+ desired_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP)
+ all_conditions = dict()
+
+ for condition_type in MATCH_LOOKUP:
+ method = 'list_' + MATCH_LOOKUP[condition_type]['method'] + 's'
+ all_conditions[condition_type] = dict()
+ try:
+ paginator = client.get_paginator(method)
+ func = paginator.paginate().build_full_result
+ except (KeyError, botocore.exceptions.OperationNotPageableError):
+ # list_geo_match_sets and list_regex_match_sets do not have a paginator
+ # and throw different exceptions
+ func = getattr(client, method)
+ try:
+ pred_results = func()[MATCH_LOOKUP[condition_type]['conditionset'] + 's']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not list %s conditions' % condition_type)
+ for pred in pred_results:
+ pred['DataId'] = pred[MATCH_LOOKUP[condition_type]['conditionset'] + 'Id']
+ all_conditions[condition_type][pred['Name']] = camel_dict_to_snake_dict(pred)
+ all_conditions[condition_type][pred['DataId']] = camel_dict_to_snake_dict(pred)
+
+ for condition in module.params['conditions']:
+ desired_conditions[condition['type']][condition['name']] = condition
+
+ reverse_condition_types = dict((v['type'], k) for (k, v) in MATCH_LOOKUP.items())
+ for condition in rule['Predicates']:
+ existing_conditions[reverse_condition_types[condition['Type']]][condition['DataId']] = camel_dict_to_snake_dict(condition)
+
+ insertions = list()
+ deletions = list()
+
+ for condition_type in desired_conditions:
+ for (condition_name, condition) in desired_conditions[condition_type].items():
+ if condition_name not in all_conditions[condition_type]:
+ module.fail_json(msg="Condition %s of type %s does not exist" % (condition_name, condition_type))
+ condition['data_id'] = all_conditions[condition_type][condition_name]['data_id']
+ if condition['data_id'] not in existing_conditions[condition_type]:
+ insertions.append(format_for_insertion(condition))
+
+ if module.params['purge_conditions']:
+ for condition_type in existing_conditions:
+ deletions.extend([format_for_deletion(condition) for condition in existing_conditions[condition_type].values()
+ if not all_conditions[condition_type][condition['data_id']]['name'] in desired_conditions[condition_type]])
+
+ changed = bool(insertions or deletions)
+ update = {
+ 'RuleId': rule_id,
+ 'Updates': insertions + deletions
+ }
+ if changed:
+ try:
+ run_func_with_change_token_backoff(client, module, update, client.update_rule, wait=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not update rule conditions')
+
+ return changed, get_rule(client, module, rule_id)
+
+
+def format_for_insertion(condition):
+ return dict(Action='INSERT',
+ Predicate=dict(Negated=condition['negated'],
+ Type=MATCH_LOOKUP[condition['type']]['type'],
+ DataId=condition['data_id']))
+
+
+def format_for_deletion(condition):
+ return dict(Action='DELETE',
+ Predicate=dict(Negated=condition['negated'],
+ Type=condition['type'],
+ DataId=condition['data_id']))
+
+
+def remove_rule_conditions(client, module, rule_id):
+ conditions = get_rule(client, module, rule_id)['Predicates']
+ updates = [format_for_deletion(camel_dict_to_snake_dict(condition)) for condition in conditions]
+ try:
+ run_func_with_change_token_backoff(client, module, {'RuleId': rule_id, 'Updates': updates}, client.update_rule)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not remove rule conditions')
+
+
+def ensure_rule_present(client, module):
+ name = module.params['name']
+ rule_id = get_rule_by_name(client, module, name)
+ params = dict()
+ if rule_id:
+ return find_and_update_rule(client, module, rule_id)
+ else:
+ params['Name'] = module.params['name']
+ metric_name = module.params['metric_name']
+ if not metric_name:
+ metric_name = re.sub(r'[^a-zA-Z0-9]', '', module.params['name'])
+ params['MetricName'] = metric_name
+ try:
+ new_rule = run_func_with_change_token_backoff(client, module, params, client.create_rule)['Rule']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not create rule')
+ return find_and_update_rule(client, module, new_rule['RuleId'])
+
+
+def find_rule_in_web_acls(client, module, rule_id):
+ web_acls_in_use = []
+ try:
+ if client.__class__.__name__ == 'WAF':
+ all_web_acls = list_web_acls_with_backoff(client)
+ elif client.__class__.__name__ == 'WAFRegional':
+ all_web_acls = list_regional_web_acls_with_backoff(client)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not list Web ACLs')
+ for web_acl in all_web_acls:
+ try:
+ web_acl_details = get_web_acl_with_backoff(client, web_acl['WebACLId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not get Web ACL details')
+ if rule_id in [rule['RuleId'] for rule in web_acl_details['Rules']]:
+ web_acls_in_use.append(web_acl_details['Name'])
+ return web_acls_in_use
+
+
+def ensure_rule_absent(client, module):
+ rule_id = get_rule_by_name(client, module, module.params['name'])
+ in_use_web_acls = find_rule_in_web_acls(client, module, rule_id)
+ if in_use_web_acls:
+ web_acl_names = ', '.join(in_use_web_acls)
+ module.fail_json(msg="Rule %s is in use by Web ACL(s) %s" %
+ (module.params['name'], web_acl_names))
+ if rule_id:
+ remove_rule_conditions(client, module, rule_id)
+ try:
+ return True, run_func_with_change_token_backoff(client, module, {'RuleId': rule_id}, client.delete_rule, wait=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not delete rule')
+ return False, {}
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ metric_name=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ conditions=dict(type='list', elements='dict'),
+ purge_conditions=dict(type='bool', default=False),
+ waf_regional=dict(type='bool', default=False),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+ state = module.params.get('state')
+
+ resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
+ client = module.client(resource)
+ if state == 'present':
+ (changed, results) = ensure_rule_present(client, module)
+ else:
+ (changed, results) = ensure_rule_absent(client, module)
+
+ module.exit_json(changed=changed, rule=camel_dict_to_snake_dict(results))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_waf_web_acl.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_waf_web_acl.py
new file mode 100644
index 00000000..7cdf770a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/aws_waf_web_acl.py
@@ -0,0 +1,361 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: aws_waf_web_acl
+short_description: Create and delete WAF Web ACLs.
+version_added: 1.0.0
+description:
+ - Read the AWS documentation for WAF
+ U(https://aws.amazon.com/documentation/waf/).
+
+author:
+ - Mike Mochan (@mmochan)
+ - Will Thames (@willthames)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+options:
+ name:
+ description: Name of the Web Application Firewall ACL to manage.
+ required: yes
+ type: str
+ default_action:
+ description: The action that you want AWS WAF to take when a request doesn't
+ match the criteria specified in any of the Rule objects that are associated with the WebACL.
+ choices:
+ - block
+ - allow
+ - count
+ type: str
+ state:
+ description: Whether the Web ACL should be present or absent.
+ choices:
+ - present
+ - absent
+ default: present
+ type: str
+ metric_name:
+ description:
+ - A friendly name or description for the metrics for this WebACL.
+ - The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain whitespace.
+ - You can't change I(metric_name) after you create the WebACL.
+ - Metric name will default to I(name) with disallowed characters stripped out.
+ type: str
+ rules:
+ description:
+ - A list of rules that the Web ACL will enforce.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description: Name of the rule.
+ type: str
+ required: true
+ action:
+ description: The action to perform.
+ type: str
+ required: true
+ priority:
+ description: The priority of the action. Priorities must be unique. Lower numbered priorities are evaluated first.
+ type: int
+ required: true
+ type:
+ description: The type of rule.
+ choices:
+ - rate_based
+ - regular
+ type: str
+ purge_rules:
+ description:
+ - Whether to remove rules that aren't passed with I(rules).
+ default: False
+ type: bool
+ waf_regional:
+ description: Whether to use waf-regional module.
+ default: false
+ required: no
+ type: bool
+'''
+
+EXAMPLES = r'''
+ - name: create web ACL
+ community.aws.aws_waf_web_acl:
+ name: my_web_acl
+ rules:
+ - name: my_rule
+ priority: 1
+ action: block
+ default_action: block
+ purge_rules: yes
+ state: present
+
+ - name: delete the web acl
+ community.aws.aws_waf_web_acl:
+ name: my_web_acl
+ state: absent
+'''
+
+RETURN = r'''
+web_acl:
+ description: contents of the Web ACL.
+ returned: always
+ type: complex
+ contains:
+ default_action:
+ description: Default action taken by the Web ACL if no rules match.
+ returned: always
+ type: dict
+ sample:
+ type: BLOCK
+ metric_name:
+ description: Metric name used as an identifier.
+ returned: always
+ type: str
+ sample: mywebacl
+ name:
+ description: Friendly name of the Web ACL.
+ returned: always
+ type: str
+ sample: my web acl
+ rules:
+ description: List of rules.
+ returned: always
+ type: complex
+ contains:
+ action:
+ description: Action taken by the WAF when the rule matches.
+ returned: always
+ type: complex
+ sample:
+ type: ALLOW
+ priority:
+ description: priority number of the rule (lower numbers are run first).
+ returned: always
+ type: int
+ sample: 2
+ rule_id:
+ description: Rule ID.
+ returned: always
+ type: str
+ sample: a6fc7ab5-287b-479f-8004-7fd0399daf75
+ type:
+ description: Type of rule (either REGULAR or RATE_BASED).
+ returned: always
+ type: str
+ sample: REGULAR
+ web_acl_id:
+ description: Unique identifier of Web ACL.
+ returned: always
+ type: str
+ sample: 10fff965-4b6b-46e2-9d78-24f6d2e2d21c
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+import re
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.waf import (
+ list_regional_rules_with_backoff,
+ list_regional_web_acls_with_backoff,
+ list_rules_with_backoff,
+ list_web_acls_with_backoff,
+ run_func_with_change_token_backoff,
+)
+
+
+def get_web_acl_by_name(client, module, name):
+ acls = [d['WebACLId'] for d in list_web_acls(client, module) if d['Name'] == name]
+ if acls:
+ return acls[0]
+ else:
+ return acls
+
+
+def create_rule_lookup(client, module):
+ if client.__class__.__name__ == 'WAF':
+ try:
+ rules = list_rules_with_backoff(client)
+ return dict((rule['Name'], rule) for rule in rules)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not list rules')
+ elif client.__class__.__name__ == 'WAFRegional':
+ try:
+ rules = list_regional_rules_with_backoff(client)
+ return dict((rule['Name'], rule) for rule in rules)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not list regional rules')
+
+
+def get_web_acl(client, module, web_acl_id):
+ try:
+ return client.get_web_acl(WebACLId=web_acl_id)['WebACL']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not get Web ACL with id %s' % web_acl_id)
+
+
+def list_web_acls(client, module,):
+ if client.__class__.__name__ == 'WAF':
+ try:
+ return list_web_acls_with_backoff(client)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not get Web ACLs')
+ elif client.__class__.__name__ == 'WAFRegional':
+ try:
+ return list_regional_web_acls_with_backoff(client)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not get Web ACLs')
+
+
+def find_and_update_web_acl(client, module, web_acl_id):
+ acl = get_web_acl(client, module, web_acl_id)
+ rule_lookup = create_rule_lookup(client, module)
+ existing_rules = acl['Rules']
+ desired_rules = [{'RuleId': rule_lookup[rule['name']]['RuleId'],
+ 'Priority': rule['priority'],
+ 'Action': {'Type': rule['action'].upper()},
+ 'Type': rule.get('type', 'regular').upper()}
+ for rule in module.params['rules']]
+ missing = [rule for rule in desired_rules if rule not in existing_rules]
+ extras = []
+ if module.params['purge_rules']:
+ extras = [rule for rule in existing_rules if rule not in desired_rules]
+
+ insertions = [format_for_update(rule, 'INSERT') for rule in missing]
+ deletions = [format_for_update(rule, 'DELETE') for rule in extras]
+ changed = bool(insertions + deletions)
+
+ # Purge rules before adding new ones in case a deletion shares the same
+ # priority as an insertion.
+ params = {
+ 'WebACLId': acl['WebACLId'],
+ 'DefaultAction': acl['DefaultAction']
+ }
+ change_tokens = []
+ if deletions:
+ try:
+ params['Updates'] = deletions
+ result = run_func_with_change_token_backoff(client, module, params, client.update_web_acl)
+ change_tokens.append(result['ChangeToken'])
+ get_waiter(
+ client, 'change_token_in_sync',
+ ).wait(
+ ChangeToken=result['ChangeToken']
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not update Web ACL')
+ if insertions:
+ try:
+ params['Updates'] = insertions
+ result = run_func_with_change_token_backoff(client, module, params, client.update_web_acl)
+ change_tokens.append(result['ChangeToken'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not update Web ACL')
+ if change_tokens:
+ for token in change_tokens:
+ get_waiter(
+ client, 'change_token_in_sync',
+ ).wait(
+ ChangeToken=token
+ )
+ if changed:
+ acl = get_web_acl(client, module, web_acl_id)
+ return changed, acl
+
+
+def format_for_update(rule, action):
+ return dict(
+ Action=action,
+ ActivatedRule=dict(
+ Priority=rule['Priority'],
+ RuleId=rule['RuleId'],
+ Action=dict(
+ Type=rule['Action']['Type']
+ )
+ )
+ )
+
+
+def remove_rules_from_web_acl(client, module, web_acl_id):
+ acl = get_web_acl(client, module, web_acl_id)
+ deletions = [format_for_update(rule, 'DELETE') for rule in acl['Rules']]
+ try:
+ params = {'WebACLId': acl['WebACLId'], 'DefaultAction': acl['DefaultAction'], 'Updates': deletions}
+ run_func_with_change_token_backoff(client, module, params, client.update_web_acl)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not remove rule')
+
+
+def ensure_web_acl_present(client, module):
+ changed = False
+ result = None
+ name = module.params['name']
+ web_acl_id = get_web_acl_by_name(client, module, name)
+ if web_acl_id:
+ (changed, result) = find_and_update_web_acl(client, module, web_acl_id)
+ else:
+ metric_name = module.params['metric_name']
+ if not metric_name:
+ metric_name = re.sub(r'[^A-Za-z0-9]', '', module.params['name'])
+ default_action = module.params['default_action'].upper()
+ try:
+ params = {'Name': name, 'MetricName': metric_name, 'DefaultAction': {'Type': default_action}}
+ new_web_acl = run_func_with_change_token_backoff(client, module, params, client.create_web_acl)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not create Web ACL')
+ (changed, result) = find_and_update_web_acl(client, module, new_web_acl['WebACL']['WebACLId'])
+ return changed, result
+
+
+def ensure_web_acl_absent(client, module):
+ web_acl_id = get_web_acl_by_name(client, module, module.params['name'])
+ if web_acl_id:
+ web_acl = get_web_acl(client, module, web_acl_id)
+ if web_acl['Rules']:
+ remove_rules_from_web_acl(client, module, web_acl_id)
+ try:
+ run_func_with_change_token_backoff(client, module, {'WebACLId': web_acl_id}, client.delete_web_acl, wait=True)
+ return True, {}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not delete Web ACL')
+ return False, {}
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ default_action=dict(choices=['block', 'allow', 'count']),
+ metric_name=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ rules=dict(type='list', elements='dict'),
+ purge_rules=dict(type='bool', default=False),
+ waf_regional=dict(type='bool', default=False)
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[['state', 'present', ['default_action', 'rules']]])
+ state = module.params.get('state')
+
+ resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
+ client = module.client(resource)
+ if state == 'present':
+ (changed, results) = ensure_web_acl_present(client, module)
+ else:
+ (changed, results) = ensure_web_acl_absent(client, module)
+
+ module.exit_json(changed=changed, web_acl=camel_dict_to_snake_dict(results))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudformation_exports_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudformation_exports_info.py
new file mode 100644
index 00000000..8eab5325
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudformation_exports_info.py
@@ -0,0 +1,85 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: cloudformation_exports_info
+short_description: Read a value from CloudFormation Exports
+version_added: 1.0.0
+description:
+ - Module retrieves a value from CloudFormation Exports
+requirements: ['boto3 >= 1.11.15']
+author:
+ - "Michael Moyle (@mmoyle)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: Get Exports
+ community.aws.cloudformation_exports_info:
+ profile: 'my_aws_profile'
+ region: 'my_region'
+ register: cf_exports
+- ansible.builtin.debug:
+ msg: "{{ cf_exports }}"
+'''
+
+RETURN = '''
+export_items:
+ description: A dictionary of Exports items names and values.
+ returned: Always
+ type: dict
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+try:
+ from botocore.exceptions import ClientError
+ from botocore.exceptions import BotoCoreError
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+
+@AWSRetry.exponential_backoff()
+def list_exports(cloudformation_client):
+ '''Get Exports Names and Values and return in dictionary '''
+ list_exports_paginator = cloudformation_client.get_paginator('list_exports')
+ exports = list_exports_paginator.paginate().build_full_result()['Exports']
+ export_items = dict()
+
+ for item in exports:
+ export_items[item['Name']] = item['Value']
+
+ return export_items
+
+
+def main():
+ argument_spec = dict()
+ result = dict(
+ changed=False,
+ original_message=''
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False)
+ cloudformation_client = module.client('cloudformation')
+
+ try:
+ result['export_items'] = list_exports(cloudformation_client)
+
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e)
+
+ result.update()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudformation_stack_set.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudformation_stack_set.py
new file mode 100644
index 00000000..148cbe61
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudformation_stack_set.py
@@ -0,0 +1,725 @@
+#!/usr/bin/python
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: cloudformation_stack_set
+version_added: 1.0.0
+short_description: Manage groups of CloudFormation stacks
+description:
+ - Launches/updates/deletes AWS CloudFormation Stack Sets.
+notes:
+ - To make an individual stack, you want the M(amazon.aws.cloudformation) module.
+options:
+ name:
+ description:
+ - Name of the CloudFormation stack set.
+ required: true
+ type: str
+ description:
+ description:
+ - A description of what this stack set creates.
+ type: str
+ parameters:
+ description:
+ - A list of hashes of all the template variables for the stack. The value can be a string or a dict.
+ - Dict can be used to set additional template parameter attributes like UsePreviousValue (see example).
+ default: {}
+ type: dict
+ state:
+ description:
+ - If I(state=present), stack will be created. If I(state=present) and if stack exists and template has changed, it will be updated.
+ If I(state=absent), stack will be removed.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ template:
+ description:
+ - The local path of the CloudFormation template.
+ - This must be the full path to the file, relative to the working directory. If using roles this may look
+ like C(roles/cloudformation/files/cloudformation-example.json).
+ - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
+ must be specified (but only one of them).
+ - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url)
+ are specified, the previous template will be reused.
+ type: path
+ template_body:
+ description:
+ - Template body. Use this to pass in the actual body of the CloudFormation template.
+ - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
+ must be specified (but only one of them).
+ - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url)
+ are specified, the previous template will be reused.
+ type: str
+ template_url:
+ description:
+ - Location of file containing the template body.
+ - The URL must point to a template (max size 307,200 bytes) located in an S3 bucket in the same region
+ as the stack.
+ - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
+ must be specified (but only one of them).
+ - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url)
+ are specified, the previous template will be reused.
+ type: str
+ purge_stacks:
+ description:
+ - Only applicable when I(state=absent). Sets whether, when deleting a stack set, the stack instances should also be deleted.
+ - By default, instances will be deleted. To keep stacks when stack set is deleted set I(purge_stacks=false).
+ type: bool
+ default: true
+ wait:
+ description:
+ - Whether or not to wait for stack operation to complete. This includes waiting for stack instances to reach UPDATE_COMPLETE status.
+ - If you choose not to wait, this module will not notify when stack operations fail because it will not wait for them to finish.
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - How long to wait (in seconds) for stacks to complete create/update/delete operations.
+ default: 900
+ type: int
+ capabilities:
+ description:
+ - Capabilities allow stacks to create and modify IAM resources, which may include adding users or roles.
+ - Currently the only available values are 'CAPABILITY_IAM' and 'CAPABILITY_NAMED_IAM'. Either or both may be provided.
+ - >
+ The following resources require that one or both of these parameters is specified: AWS::IAM::AccessKey,
+ AWS::IAM::Group, AWS::IAM::InstanceProfile, AWS::IAM::Policy, AWS::IAM::Role, AWS::IAM::User, AWS::IAM::UserToGroupAddition
+ type: list
+ elements: str
+ choices:
+ - 'CAPABILITY_IAM'
+ - 'CAPABILITY_NAMED_IAM'
+ regions:
+ description:
+ - A list of AWS regions to create instances of a stack in. The I(region) parameter chooses where the Stack Set is created, and I(regions)
+ specifies the region for stack instances.
+ - At least one region must be specified to create a stack set. On updates, if fewer regions are specified only the specified regions will
+ have their stack instances updated.
+ type: list
+ elements: str
+ accounts:
+ description:
+ - A list of AWS accounts in which to create instance of CloudFormation stacks.
+ - At least one region must be specified to create a stack set. On updates, if fewer regions are specified only the specified regions will
+ have their stack instances updated.
+ type: list
+ elements: str
+ administration_role_arn:
+ description:
+ - ARN of the administration role, meaning the role that CloudFormation Stack Sets use to assume the roles in your child accounts.
+ - This defaults to C(arn:aws:iam::{{ account ID }}:role/AWSCloudFormationStackSetAdministrationRole) where C({{ account ID }}) is replaced with the
+ account number of the current IAM role/user/STS credentials.
+ aliases:
+ - admin_role_arn
+ - admin_role
+ - administration_role
+ type: str
+ execution_role_name:
+ description:
+ - ARN of the execution role, meaning the role that CloudFormation Stack Sets assumes in your child accounts.
+ - This MUST NOT be an ARN, and the roles must exist in each child account specified.
+ - The default name for the execution role is C(AWSCloudFormationStackSetExecutionRole)
+ aliases:
+ - exec_role_name
+ - exec_role
+ - execution_role
+ type: str
+ tags:
+ description:
+ - Dictionary of tags to associate with stack and its resources during stack creation.
+ - Can be updated later, updating tags removes previous entries.
+ type: dict
+ failure_tolerance:
+ description:
+ - Settings to change what is considered "failed" when running stack instance updates, and how many to do at a time.
+ type: dict
+ suboptions:
+ fail_count:
+ description:
+ - The number of accounts, per region, for which this operation can fail before CloudFormation
+ stops the operation in that region.
+ - You must specify one of I(fail_count) and I(fail_percentage).
+ type: int
+ fail_percentage:
+ type: int
+ description:
+ - The percentage of accounts, per region, for which this stack operation can fail before CloudFormation
+ stops the operation in that region.
+ - You must specify one of I(fail_count) and I(fail_percentage).
+ parallel_percentage:
+ type: int
+ description:
+ - The maximum percentage of accounts in which to perform this operation at one time.
+ - You must specify one of I(parallel_count) and I(parallel_percentage).
+ - Note that this setting lets you specify the maximum for operations.
+ For large deployments, under certain circumstances the actual percentage may be lower.
+ parallel_count:
+ type: int
+ description:
+ - The maximum number of accounts in which to perform this operation at one time.
+ - I(parallel_count) may be at most one more than the I(fail_count).
+ - You must specify one of I(parallel_count) and I(parallel_percentage).
+ - Note that this setting lets you specify the maximum for operations.
+ For large deployments, under certain circumstances the actual count may be lower.
+
+author: "Ryan Scott Brown (@ryansb)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements: [ boto3>=1.6, botocore>=1.10.26 ]
+'''
+
+EXAMPLES = r'''
+- name: Create a stack set with instances in two accounts
+ community.aws.cloudformation_stack_set:
+ name: my-stack
+ description: Test stack in two accounts
+ state: present
+ template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
+ accounts: [1234567890, 2345678901]
+ regions:
+ - us-east-1
+
+- name: on subsequent calls, templates are optional but parameters and tags can be altered
+ community.aws.cloudformation_stack_set:
+ name: my-stack
+ state: present
+ parameters:
+ InstanceName: my_stacked_instance
+ tags:
+ foo: bar
+ test: stack
+ accounts: [1234567890, 2345678901]
+ regions:
+ - us-east-1
+
+- name: The same type of update, but wait for the update to complete in all stacks
+ community.aws.cloudformation_stack_set:
+ name: my-stack
+ state: present
+ wait: true
+ parameters:
+ InstanceName: my_restacked_instance
+ tags:
+ foo: bar
+ test: stack
+ accounts: [1234567890, 2345678901]
+ regions:
+ - us-east-1
+'''
+
+RETURN = r'''
+operations_log:
+ type: list
+ description: Most recent events in CloudFormation's event log. This may be from a previous run in some cases.
+ returned: always
+ sample:
+ - action: CREATE
+ creation_timestamp: '2018-06-18T17:40:46.372000+00:00'
+ end_timestamp: '2018-06-18T17:41:24.560000+00:00'
+ operation_id: Ansible-StackInstance-Create-0ff2af5b-251d-4fdb-8b89-1ee444eba8b8
+ status: FAILED
+ stack_instances:
+ - account: '1234567890'
+ region: us-east-1
+ stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929
+ status: OUTDATED
+ status_reason: Account 1234567890 should have 'AWSCloudFormationStackSetAdministrationRole' role with trust relationship to CloudFormation service.
+
+operations:
+ description: All operations initiated by this run of the cloudformation_stack_set module
+ returned: always
+ type: list
+ sample:
+ - action: CREATE
+ administration_role_arn: arn:aws:iam::1234567890:role/AWSCloudFormationStackSetAdministrationRole
+ creation_timestamp: '2018-06-18T17:40:46.372000+00:00'
+ end_timestamp: '2018-06-18T17:41:24.560000+00:00'
+ execution_role_name: AWSCloudFormationStackSetExecutionRole
+ operation_id: Ansible-StackInstance-Create-0ff2af5b-251d-4fdb-8b89-1ee444eba8b8
+ operation_preferences:
+ region_order:
+ - us-east-1
+ - us-east-2
+ stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929
+ status: FAILED
+stack_instances:
+ description: CloudFormation stack instances that are members of this stack set. This will also include their region and account ID.
+ returned: state == present
+ type: list
+ sample:
+ - account: '1234567890'
+ region: us-east-1
+ stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929
+ status: OUTDATED
+ status_reason: >
+ Account 1234567890 should have 'AWSCloudFormationStackSetAdministrationRole' role with trust relationship to CloudFormation service.
+ - account: '1234567890'
+ region: us-east-2
+ stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929
+ status: OUTDATED
+ status_reason: Cancelled since failure tolerance has exceeded
+stack_set:
+ type: dict
+ description: Facts about the currently deployed stack set, its parameters, and its tags
+ returned: state == present
+ sample:
+ administration_role_arn: arn:aws:iam::1234567890:role/AWSCloudFormationStackSetAdministrationRole
+ capabilities: []
+ description: test stack PRIME
+ execution_role_name: AWSCloudFormationStackSetExecutionRole
+ parameters: []
+ stack_set_arn: arn:aws:cloudformation:us-east-1:1234567890:stackset/TestStackPrime:19f3f684-aae9-467-ba36-e09f92cf5929
+ stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929
+ stack_set_name: TestStackPrime
+ status: ACTIVE
+ tags:
+ Some: Thing
+ an: other
+ template_body: |
+ AWSTemplateFormatVersion: "2010-09-09"
+ Parameters: {}
+ Resources:
+ Bukkit:
+ Type: "AWS::S3::Bucket"
+ Properties: {}
+ other:
+ Type: "AWS::SNS::Topic"
+ Properties: {}
+
+''' # NOQA
+
+import time
+import datetime
+import uuid
+import itertools
+
+try:
+ import boto3
+ import botocore.exceptions
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ # handled by AnsibleAWSModule
+ pass
+
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AWSRetry,
+ boto3_tag_list_to_ansible_dict,
+ ansible_dict_to_boto3_tag_list,
+ camel_dict_to_snake_dict,
+ )
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
+from ansible.module_utils._text import to_native
+
+
+def create_stack_set(module, stack_params, cfn):
+ try:
+ cfn.create_stack_set(aws_retry=True, **stack_params)
+ return await_stack_set_exists(cfn, stack_params['StackSetName'])
+ except (ClientError, BotoCoreError) as err:
+ module.fail_json_aws(err, msg="Failed to create stack set {0}.".format(stack_params.get('StackSetName')))
+
+
+def update_stack_set(module, stack_params, cfn):
+ # if the state is present and the stack already exists, we try to update it.
+ # AWS will tell us if the stack template and parameters are the same and
+ # don't need to be updated.
+ try:
+ cfn.update_stack_set(**stack_params)
+ except is_boto3_error_code('StackSetNotFound') as err: # pylint: disable=duplicate-except
+ module.fail_json_aws(err, msg="Failed to find stack set. Check the name & region.")
+ except is_boto3_error_code('StackInstanceNotFound') as err: # pylint: disable=duplicate-except
+ module.fail_json_aws(err, msg="One or more stack instances were not found for this stack set. Double check "
+ "the `accounts` and `regions` parameters.")
+ except is_boto3_error_code('OperationInProgressException') as err: # pylint: disable=duplicate-except
+ module.fail_json_aws(
+ err, msg="Another operation is already in progress on this stack set - please try again later. When making "
+ "multiple cloudformation_stack_set calls, it's best to enable `wait: yes` to avoid unfinished op errors.")
+ except (ClientError, BotoCoreError) as err: # pylint: disable=duplicate-except
+ module.fail_json_aws(err, msg="Could not update stack set.")
+ if module.params.get('wait'):
+ await_stack_set_operation(
+ module, cfn, operation_id=stack_params['OperationId'],
+ stack_set_name=stack_params['StackSetName'],
+ max_wait=module.params.get('wait_timeout'),
+ )
+
+ return True
+
+
+def compare_stack_instances(cfn, stack_set_name, accounts, regions):
+ instance_list = cfn.list_stack_instances(
+ aws_retry=True,
+ StackSetName=stack_set_name,
+ )['Summaries']
+ desired_stack_instances = set(itertools.product(accounts, regions))
+ existing_stack_instances = set((i['Account'], i['Region']) for i in instance_list)
+ # new stacks, existing stacks, unspecified stacks
+ return (desired_stack_instances - existing_stack_instances), existing_stack_instances, (existing_stack_instances - desired_stack_instances)
+
+
+@AWSRetry.backoff(tries=3, delay=4)
+def stack_set_facts(cfn, stack_set_name):
+ try:
+ ss = cfn.describe_stack_set(StackSetName=stack_set_name)['StackSet']
+ ss['Tags'] = boto3_tag_list_to_ansible_dict(ss['Tags'])
+ return ss
+ except cfn.exceptions.from_code('StackSetNotFound'):
+ # Return None if the stack doesn't exist
+ return
+
+
+def await_stack_set_operation(module, cfn, stack_set_name, operation_id, max_wait):
+ wait_start = datetime.datetime.now()
+ operation = None
+ for i in range(max_wait // 15):
+ try:
+ operation = cfn.describe_stack_set_operation(StackSetName=stack_set_name, OperationId=operation_id)
+ if operation['StackSetOperation']['Status'] not in ('RUNNING', 'STOPPING'):
+ # Stack set has completed operation
+ break
+ except is_boto3_error_code('StackSetNotFound'): # pylint: disable=duplicate-except
+ pass
+ except is_boto3_error_code('OperationNotFound'): # pylint: disable=duplicate-except
+ pass
+ time.sleep(15)
+
+ if operation and operation['StackSetOperation']['Status'] not in ('FAILED', 'STOPPED'):
+ await_stack_instance_completion(
+ module, cfn,
+ stack_set_name=stack_set_name,
+ # subtract however long we waited already
+ max_wait=int(max_wait - (datetime.datetime.now() - wait_start).total_seconds()),
+ )
+ elif operation and operation['StackSetOperation']['Status'] in ('FAILED', 'STOPPED'):
+ pass
+ else:
+ module.warn(
+ "Timed out waiting for operation {0} on stack set {1} after {2} seconds. Returning unfinished operation".format(
+ operation_id, stack_set_name, max_wait
+ )
+ )
+
+
+def await_stack_instance_completion(module, cfn, stack_set_name, max_wait):
+ to_await = None
+ for i in range(max_wait // 15):
+ try:
+ stack_instances = cfn.list_stack_instances(StackSetName=stack_set_name)
+ to_await = [inst for inst in stack_instances['Summaries']
+ if inst['Status'] != 'CURRENT']
+ if not to_await:
+ return stack_instances['Summaries']
+ except is_boto3_error_code('StackSetNotFound'): # pylint: disable=duplicate-except
+ # this means the deletion beat us, or the stack set is not yet propagated
+ pass
+ time.sleep(15)
+
+ module.warn(
+ "Timed out waiting for stack set {0} instances {1} to complete after {2} seconds. Returning unfinished operation".format(
+ stack_set_name, ', '.join(s['StackId'] for s in to_await), max_wait
+ )
+ )
+
+
+def await_stack_set_exists(cfn, stack_set_name):
+ # AWSRetry will retry on `StackSetNotFound` errors for us
+ ss = cfn.describe_stack_set(StackSetName=stack_set_name, aws_retry=True)['StackSet']
+ ss['Tags'] = boto3_tag_list_to_ansible_dict(ss['Tags'])
+ return camel_dict_to_snake_dict(ss, ignore_list=('Tags',))
+
+
+def describe_stack_tree(module, stack_set_name, operation_ids=None):
+ jittered_backoff_decorator = AWSRetry.jittered_backoff(retries=5, delay=3, max_delay=5, catch_extra_error_codes=['StackSetNotFound'])
+ cfn = module.client('cloudformation', retry_decorator=jittered_backoff_decorator)
+ result = dict()
+ result['stack_set'] = camel_dict_to_snake_dict(
+ cfn.describe_stack_set(
+ StackSetName=stack_set_name,
+ aws_retry=True,
+ )['StackSet']
+ )
+ result['stack_set']['tags'] = boto3_tag_list_to_ansible_dict(result['stack_set']['tags'])
+ result['operations_log'] = sorted(
+ camel_dict_to_snake_dict(
+ cfn.list_stack_set_operations(
+ StackSetName=stack_set_name,
+ aws_retry=True,
+ )
+ )['summaries'],
+ key=lambda x: x['creation_timestamp']
+ )
+ result['stack_instances'] = sorted(
+ [
+ camel_dict_to_snake_dict(i) for i in
+ cfn.list_stack_instances(StackSetName=stack_set_name)['Summaries']
+ ],
+ key=lambda i: i['region'] + i['account']
+ )
+
+ if operation_ids:
+ result['operations'] = []
+ for op_id in operation_ids:
+ try:
+ result['operations'].append(camel_dict_to_snake_dict(
+ cfn.describe_stack_set_operation(
+ StackSetName=stack_set_name,
+ OperationId=op_id,
+ )['StackSetOperation']
+ ))
+ except is_boto3_error_code('OperationNotFoundException'): # pylint: disable=duplicate-except
+ pass
+ return result
+
+
+def get_operation_preferences(module):
+ params = dict()
+ if module.params.get('regions'):
+ params['RegionOrder'] = list(module.params['regions'])
+ for param, api_name in {
+ 'fail_count': 'FailureToleranceCount',
+ 'fail_percentage': 'FailureTolerancePercentage',
+ 'parallel_percentage': 'MaxConcurrentPercentage',
+ 'parallel_count': 'MaxConcurrentCount',
+ }.items():
+ if module.params.get('failure_tolerance', {}).get(param):
+ params[api_name] = module.params.get('failure_tolerance', {}).get(param)
+ return params
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=900),
+ state=dict(default='present', choices=['present', 'absent']),
+ purge_stacks=dict(type='bool', default=True),
+ parameters=dict(type='dict', default={}),
+ template=dict(type='path'),
+ template_url=dict(),
+ template_body=dict(),
+ capabilities=dict(type='list', elements='str', choices=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']),
+ regions=dict(type='list', elements='str'),
+ accounts=dict(type='list', elements='str'),
+ failure_tolerance=dict(
+ type='dict',
+ default={},
+ options=dict(
+ fail_count=dict(type='int'),
+ fail_percentage=dict(type='int'),
+ parallel_percentage=dict(type='int'),
+ parallel_count=dict(type='int'),
+ ),
+ mutually_exclusive=[
+ ['fail_count', 'fail_percentage'],
+ ['parallel_count', 'parallel_percentage'],
+ ],
+ ),
+ administration_role_arn=dict(aliases=['admin_role_arn', 'administration_role', 'admin_role']),
+ execution_role_name=dict(aliases=['execution_role', 'exec_role', 'exec_role_name']),
+ tags=dict(type='dict'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[['template_url', 'template', 'template_body']],
+ supports_check_mode=True
+ )
+ if not (module.boto3_at_least('1.6.0') and module.botocore_at_least('1.10.26')):
+ module.fail_json(msg="Boto3 or botocore version is too low. This module requires at least boto3 1.6 and botocore 1.10.26")
+
+ # Wrap the cloudformation client methods that this module uses with
+ # automatic backoff / retry for throttling error codes
+ jittered_backoff_decorator = AWSRetry.jittered_backoff(retries=10, delay=3, max_delay=30, catch_extra_error_codes=['StackSetNotFound'])
+ cfn = module.client('cloudformation', retry_decorator=jittered_backoff_decorator)
+ existing_stack_set = stack_set_facts(cfn, module.params['name'])
+
+ operation_uuid = to_native(uuid.uuid4())
+ operation_ids = []
+ # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
+ stack_params = {}
+ state = module.params['state']
+ if state == 'present' and not module.params['accounts']:
+ module.fail_json(
+ msg="Can't create a stack set without choosing at least one account. "
+ "To get the ID of the current account, use the aws_caller_info module."
+ )
+
+ module.params['accounts'] = [to_native(a) for a in module.params['accounts']]
+
+ stack_params['StackSetName'] = module.params['name']
+ if module.params.get('description'):
+ stack_params['Description'] = module.params['description']
+
+ if module.params.get('capabilities'):
+ stack_params['Capabilities'] = module.params['capabilities']
+
+ if module.params['template'] is not None:
+ with open(module.params['template'], 'r') as tpl:
+ stack_params['TemplateBody'] = tpl.read()
+ elif module.params['template_body'] is not None:
+ stack_params['TemplateBody'] = module.params['template_body']
+ elif module.params['template_url'] is not None:
+ stack_params['TemplateURL'] = module.params['template_url']
+ else:
+ # no template is provided, but if the stack set exists already, we can use the existing one.
+ if existing_stack_set:
+ stack_params['UsePreviousTemplate'] = True
+ else:
+ module.fail_json(
+ msg="The Stack Set {0} does not exist, and no template was provided. Provide one of `template`, "
+ "`template_body`, or `template_url`".format(module.params['name'])
+ )
+
+ stack_params['Parameters'] = []
+ for k, v in module.params['parameters'].items():
+ if isinstance(v, dict):
+ # set parameter based on a dict to allow additional CFN Parameter Attributes
+ param = dict(ParameterKey=k)
+
+ if 'value' in v:
+ param['ParameterValue'] = to_native(v['value'])
+
+ if 'use_previous_value' in v and bool(v['use_previous_value']):
+ param['UsePreviousValue'] = True
+ param.pop('ParameterValue', None)
+
+ stack_params['Parameters'].append(param)
+ else:
+ # allow default k/v configuration to set a template parameter
+ stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)})
+
+ if module.params.get('tags') and isinstance(module.params.get('tags'), dict):
+ stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags'])
+
+ if module.params.get('administration_role_arn'):
+ # TODO loosen the semantics here to autodetect the account ID and build the ARN
+ stack_params['AdministrationRoleARN'] = module.params['administration_role_arn']
+ if module.params.get('execution_role_name'):
+ stack_params['ExecutionRoleName'] = module.params['execution_role_name']
+
+ result = {}
+
+ if module.check_mode:
+ if state == 'absent' and existing_stack_set:
+ module.exit_json(changed=True, msg='Stack set would be deleted', meta=[])
+ elif state == 'absent' and not existing_stack_set:
+ module.exit_json(changed=False, msg='Stack set doesn\'t exist', meta=[])
+ elif state == 'present' and not existing_stack_set:
+ module.exit_json(changed=True, msg='New stack set would be created', meta=[])
+ elif state == 'present' and existing_stack_set:
+ new_stacks, existing_stacks, unspecified_stacks = compare_stack_instances(
+ cfn,
+ module.params['name'],
+ module.params['accounts'],
+ module.params['regions'],
+ )
+ if new_stacks:
+ module.exit_json(changed=True, msg='New stack instance(s) would be created', meta=[])
+ elif unspecified_stacks and module.params.get('purge_stack_instances'):
+ module.exit_json(changed=True, msg='Old stack instance(s) would be deleted', meta=[])
+ else:
+ # TODO: need to check the template and other settings for correct check mode
+ module.exit_json(changed=False, msg='No changes detected', meta=[])
+
+ changed = False
+ if state == 'present':
+ if not existing_stack_set:
+ # on create this parameter has a different name, and cannot be referenced later in the job log
+ stack_params['ClientRequestToken'] = 'Ansible-StackSet-Create-{0}'.format(operation_uuid)
+ changed = True
+ create_stack_set(module, stack_params, cfn)
+ else:
+ stack_params['OperationId'] = 'Ansible-StackSet-Update-{0}'.format(operation_uuid)
+ operation_ids.append(stack_params['OperationId'])
+ if module.params.get('regions'):
+ stack_params['OperationPreferences'] = get_operation_preferences(module)
+ changed |= update_stack_set(module, stack_params, cfn)
+
+ # now create/update any appropriate stack instances
+ new_stack_instances, existing_stack_instances, unspecified_stack_instances = compare_stack_instances(
+ cfn,
+ module.params['name'],
+ module.params['accounts'],
+ module.params['regions'],
+ )
+ if new_stack_instances:
+ operation_ids.append('Ansible-StackInstance-Create-{0}'.format(operation_uuid))
+ changed = True
+ cfn.create_stack_instances(
+ StackSetName=module.params['name'],
+ Accounts=list(set(acct for acct, region in new_stack_instances)),
+ Regions=list(set(region for acct, region in new_stack_instances)),
+ OperationPreferences=get_operation_preferences(module),
+ OperationId=operation_ids[-1],
+ )
+ else:
+ operation_ids.append('Ansible-StackInstance-Update-{0}'.format(operation_uuid))
+ cfn.update_stack_instances(
+ StackSetName=module.params['name'],
+ Accounts=list(set(acct for acct, region in existing_stack_instances)),
+ Regions=list(set(region for acct, region in existing_stack_instances)),
+ OperationPreferences=get_operation_preferences(module),
+ OperationId=operation_ids[-1],
+ )
+ for op in operation_ids:
+ await_stack_set_operation(
+ module, cfn, operation_id=op,
+ stack_set_name=module.params['name'],
+ max_wait=module.params.get('wait_timeout'),
+ )
+
+ elif state == 'absent':
+ if not existing_stack_set:
+ module.exit_json(msg='Stack set {0} does not exist'.format(module.params['name']))
+ if module.params.get('purge_stack_instances') is False:
+ pass
+ try:
+ cfn.delete_stack_set(
+ StackSetName=module.params['name'],
+ )
+ module.exit_json(msg='Stack set {0} deleted'.format(module.params['name']))
+ except is_boto3_error_code('OperationInProgressException') as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Cannot delete stack {0} while there is an operation in progress'.format(module.params['name']))
+ except is_boto3_error_code('StackSetNotEmptyException'): # pylint: disable=duplicate-except
+ delete_instances_op = 'Ansible-StackInstance-Delete-{0}'.format(operation_uuid)
+ cfn.delete_stack_instances(
+ StackSetName=module.params['name'],
+ Accounts=module.params['accounts'],
+ Regions=module.params['regions'],
+ RetainStacks=(not module.params.get('purge_stacks')),
+ OperationId=delete_instances_op
+ )
+ await_stack_set_operation(
+ module, cfn, operation_id=delete_instances_op,
+ stack_set_name=stack_params['StackSetName'],
+ max_wait=module.params.get('wait_timeout'),
+ )
+ try:
+ cfn.delete_stack_set(
+ StackSetName=module.params['name'],
+ )
+ except is_boto3_error_code('StackSetNotEmptyException') as exc: # pylint: disable=duplicate-except
+ # this time, it is likely that either the delete failed or there are more stacks.
+ instances = cfn.list_stack_instances(
+ StackSetName=module.params['name'],
+ )
+ stack_states = ', '.join('(account={Account}, region={Region}, state={Status})'.format(**i) for i in instances['Summaries'])
+ module.fail_json_aws(exc, msg='Could not purge all stacks, or not all accounts/regions were chosen for deletion: ' + stack_states)
+ module.exit_json(changed=True, msg='Stack set {0} deleted'.format(module.params['name']))
+
+ result.update(**describe_stack_tree(module, stack_params['StackSetName'], operation_ids=operation_ids))
+ if any(o['status'] == 'FAILED' for o in result['operations']):
+ module.fail_json(msg="One or more operations failed to execute", **result)
+ module.exit_json(changed=changed, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py
new file mode 100644
index 00000000..a48f687a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py
@@ -0,0 +1,2258 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+
+version_added: 1.0.0
+module: cloudfront_distribution
+
+short_description: Create, update and delete AWS CloudFront distributions.
+
+description:
+ - Allows for easy creation, updating and deletion of CloudFront distributions.
+
+requirements:
+ - boto3 >= 1.0.0
+ - python >= 2.6
+
+
+author:
+ - Willem van Ketwich (@wilvk)
+ - Will Thames (@willthames)
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+
+options:
+
+ state:
+ description:
+ - The desired state of the distribution.
+ - I(state=present) creates a new distribution or updates an existing distribution.
+ - I(state=absent) deletes an existing distribution.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ distribution_id:
+ description:
+ - The ID of the CloudFront distribution.
+ - This parameter can be exchanged with I(alias) or I(caller_reference) and is used in conjunction with I(e_tag).
+ type: str
+
+ e_tag:
+ description:
+ - A unique identifier of a modified or existing distribution. Used in conjunction with I(distribution_id).
+ - Is determined automatically if not specified.
+ type: str
+
+ caller_reference:
+ description:
+ - A unique identifier for creating and updating CloudFront distributions.
+ - Each caller reference must be unique across all distributions. e.g. a caller reference used in a web
+ distribution cannot be reused in a streaming distribution. This parameter can be used instead of I(distribution_id)
+ to reference an existing distribution. If not specified, this defaults to a datetime stamp of the format
+ C(YYYY-MM-DDTHH:MM:SS.ffffff).
+ type: str
+
+ tags:
+ description:
+ - Should be input as a dict of key-value pairs.
+ - "Note that numeric keys or values must be wrapped in quotes. e.g. C(Priority: '1')"
+ type: dict
+
+ purge_tags:
+ description:
+ - Specifies whether existing tags will be removed before adding new tags.
+ - When I(purge_tags=yes), existing tags are removed and I(tags) are added, if specified.
+ If no tags are specified, it removes all existing tags for the distribution.
+ - When I(purge_tags=no), existing tags are kept and I(tags) are added, if specified.
+ default: false
+ type: bool
+
+ alias:
+ description:
+ - The name of an alias (CNAME) that is used in a distribution. This is used to effectively reference a distribution by its alias as an alias can only
+ be used by one distribution per AWS account. This variable avoids having to provide the I(distribution_id) as well as
+ the I(e_tag), or I(caller_reference) of an existing distribution.
+ type: str
+
+ aliases:
+ description:
+ - A list of domain name aliases (CNAMEs) as strings to be used for the distribution.
+ - Each alias must be unique across all distribution for the AWS account.
+ type: list
+ elements: str
+
+ purge_aliases:
+ description:
+ - Specifies whether existing aliases will be removed before adding new aliases.
+ - When I(purge_aliases=yes), existing aliases are removed and I(aliases) are added.
+ default: false
+ type: bool
+
+ default_root_object:
+ description:
+ - A config element that specifies the path to request when the user requests the origin.
+ - e.g. if specified as 'index.html', this maps to www.example.com/index.html when www.example.com is called by the user.
+ - This prevents the entire distribution origin from being exposed at the root.
+ type: str
+
+ default_origin_domain_name:
+ description:
+ - The domain name to use for an origin if no I(origins) have been specified.
+ - Should only be used on a first run of generating a distribution and not on
+ subsequent runs.
+ - Should not be used in conjunction with I(distribution_id), I(caller_reference) or I(alias).
+ type: str
+
+ default_origin_path:
+ description:
+ - The default origin path to specify for an origin if no I(origins) have been specified. Defaults to empty if not specified.
+ type: str
+
+ origins:
+ type: list
+ elements: dict
+ description:
+ - A config element that is a list of complex origin objects to be specified for the distribution. Used for creating and updating distributions.
+ suboptions:
+ id:
+ description: A unique identifier for the origin or origin group. I(id) must be unique within the distribution.
+ type: str
+ domain_name:
+ description:
+ - The domain name which CloudFront will query as the origin.
+ - For more information see the CloudFront documentation
+ at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesDomainName)
+ type: str
+ origin_path:
+ description: Tells CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin.
+ type: str
+ custom_headers:
+ description:
+ - Custom headers you wish to add to the request before passing it to the origin.
+ - For more information see the CloudFront documentation
+ at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/forward-custom-headers.html).
+ type: list
+ elements: dict
+ suboptions:
+ header_name:
+ description: The name of a header that you want CloudFront to forward to your origin.
+ type: str
+ header_value:
+ description: The value for the header that you specified in the I(header_name) field.
+ type: str
+ s3_origin_access_identity_enabled:
+ description:
+ - Use an origin access identity to configure the origin so that viewers can only access objects in an Amazon S3 bucket through CloudFront.
+ - Will automatically create an Identity for you.
+ - See also U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html).
+ type: bool
+ custom_origin_config:
+ description: Connection information about the origin.
+ type: dict
+ suboptions:
+ http_port:
+ description: The HTTP port the custom origin listens on.
+ type: int
+ https_port:
+ description: The HTTPS port the custom origin listens on.
+ type: int
+ origin_protocol_policy:
+ description: The origin protocol policy to apply to your origin.
+ type: str
+ origin_ssl_protocols:
+ description: A list of SSL/TLS protocols that you want CloudFront to use when communicating to the origin over HTTPS.
+ type: list
+ elements: str
+ origin_read_timeout:
+ description: A timeout (in seconds) when reading from your origin.
+ type: int
+ origin_keepalive_timeout:
+ description: A keep-alive timeout (in seconds).
+ type: int
+
+ purge_origins:
+ description: Whether to remove any origins that aren't listed in I(origins).
+ default: false
+ type: bool
+
+ default_cache_behavior:
+ type: dict
+ description:
+ - A dict specifying the default cache behavior of the distribution.
+ - If not specified, the I(target_origin_id) is defined as the I(target_origin_id) of the first valid
+ cache_behavior in I(cache_behaviors) with defaults.
+ suboptions:
+ target_origin_id:
+ description:
+ - The ID of the origin that you want CloudFront to route requests to
+ by default.
+ type: str
+ forwarded_values:
+ description:
+ - A dict that specifies how CloudFront handles query strings and cookies.
+ type: dict
+ suboptions:
+ query_string:
+ description:
+ - Indicates whether you want CloudFront to forward query strings
+ to the origin that is associated with this cache behavior.
+ type: bool
+ cookies:
+ description: A dict that specifies whether you want CloudFront to forward cookies to the origin and, if so, which ones.
+ type: dict
+ suboptions:
+ forward:
+ description:
+ - Specifies which cookies to forward to the origin for this cache behavior.
+ - Valid values are C(all), C(none), or C(whitelist).
+ type: str
+ whitelisted_names:
+ type: list
+ elements: str
+ description: A list of cookies to forward to the origin for this cache behavior.
+ headers:
+ description:
+ - A list of headers to forward to the origin for this cache behavior.
+ - To forward all headers use a list containing a single element '*' (C(['*']))
+ type: list
+ elements: str
+ query_string_cache_keys:
+ description:
+ - A list that contains the query string parameters you want CloudFront to use as a basis for caching for a cache behavior.
+ type: list
+ elements: str
+ trusted_signers:
+ description:
+ - A dict that specifies the AWS accounts that you want to allow to create signed URLs for private content.
+ type: dict
+ suboptions:
+ enabled:
+ description: Whether you want to require viewers to use signed URLs to access the files specified by I(target_origin_id)
+ type: bool
+ items:
+ description: A list of trusted signers for this cache behavior.
+ elements: str
+ type: list
+ viewer_protocol_policy:
+ description:
+ - The protocol that viewers can use to access the files in the origin specified by I(target_origin_id).
+ - Valid values are C(allow-all), C(redirect-to-https) and C(https-only).
+ type: str
+ default_ttl:
+ description: The default amount of time that you want objects to stay in CloudFront caches.
+ type: int
+ max_ttl:
+ description: The maximum amount of time that you want objects to stay in CloudFront caches.
+ type: int
+ min_ttl:
+ description: The minimum amount of time that you want objects to stay in CloudFront caches.
+ type: int
+ allowed_methods:
+ description: A dict that controls which HTTP methods CloudFront processes and forwards.
+ type: dict
+ suboptions:
+ items:
+ description: A list of HTTP methods that you want CloudFront to process and forward.
+ type: list
+ elements: str
+ cached_methods:
+ description:
+ - A list of HTTP methods that you want CloudFront to apply caching to.
+ - This can either be C([GET,HEAD]), or C([GET,HEAD,OPTIONS]).
+ type: list
+ elements: str
+ smooth_streaming:
+ description:
+ - Whether you want to distribute media files in the Microsoft Smooth Streaming format.
+ type: bool
+ compress:
+ description:
+ - Whether you want CloudFront to automatically compress files.
+ type: bool
+ lambda_function_associations:
+ description:
+ - A list of Lambda function associations to use for this cache behavior.
+ type: list
+ elements: dict
+ suboptions:
+ lambda_function_arn:
+ description: The ARN of the Lambda function.
+ type: str
+ event_type:
+ description:
+ - Specifies the event type that triggers a Lambda function invocation.
+ - This can be C(viewer-request), C(origin-request), C(origin-response) or C(viewer-response).
+ type: str
+ field_level_encryption_id:
+ description:
+ - The field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data.
+ type: str
+
+ cache_behaviors:
+ type: list
+ elements: dict
+ description:
+ - A list of dictionaries describing the cache behaviors for the distribution.
+ - The order of the list is preserved across runs unless I(purge_cache_behaviors) is enabled.
+ suboptions:
+ path_pattern:
+ description:
+ - The pattern that specifies which requests to apply the behavior to.
+ type: str
+ target_origin_id:
+ description:
+ - The ID of the origin that you want CloudFront to route requests to
+ by default.
+ type: str
+ forwarded_values:
+ description:
+ - A dict that specifies how CloudFront handles query strings and cookies.
+ type: dict
+ suboptions:
+ query_string:
+ description:
+ - Indicates whether you want CloudFront to forward query strings
+ to the origin that is associated with this cache behavior.
+ type: bool
+ cookies:
+ description: A dict that specifies whether you want CloudFront to forward cookies to the origin and, if so, which ones.
+ type: dict
+ suboptions:
+ forward:
+ description:
+ - Specifies which cookies to forward to the origin for this cache behavior.
+ - Valid values are C(all), C(none), or C(whitelist).
+ type: str
+ whitelisted_names:
+ type: list
+ elements: str
+ description: A list of cookies to forward to the origin for this cache behavior.
+ headers:
+ description:
+ - A list of headers to forward to the origin for this cache behavior.
+ - To forward all headers use a list containing a single element '*' (C(['*']))
+ type: list
+ elements: str
+ query_string_cache_keys:
+ description:
+ - A list that contains the query string parameters you want CloudFront to use as a basis for caching for a cache behavior.
+ type: list
+ elements: str
+ trusted_signers:
+ description:
+ - A dict that specifies the AWS accounts that you want to allow to create signed URLs for private content.
+ type: dict
+ suboptions:
+ enabled:
+ description: Whether you want to require viewers to use signed URLs to access the files specified by I(path_pattern) and I(target_origin_id)
+ type: bool
+ items:
+ description: A list of trusted signers for this cache behavior.
+ elements: str
+ type: list
+ viewer_protocol_policy:
+ description:
+ - The protocol that viewers can use to access the files in the origin specified by I(target_origin_id) when a request matches I(path_pattern).
+ - Valid values are C(allow-all), C(redirect-to-https) and C(https-only).
+ type: str
+ default_ttl:
+ description: The default amount of time that you want objects to stay in CloudFront caches.
+ type: int
+ max_ttl:
+ description: The maximum amount of time that you want objects to stay in CloudFront caches.
+ type: int
+ min_ttl:
+ description: The minimum amount of time that you want objects to stay in CloudFront caches.
+ type: int
+ allowed_methods:
+ description: A dict that controls which HTTP methods CloudFront processes and forwards.
+ type: dict
+ suboptions:
+ items:
+ description: A list of HTTP methods that you want CloudFront to process and forward.
+ type: list
+ elements: str
+ cached_methods:
+ description:
+ - A list of HTTP methods that you want CloudFront to apply caching to.
+ - This can either be C([GET,HEAD]), or C([GET,HEAD,OPTIONS]).
+ type: list
+ elements: str
+ smooth_streaming:
+ description:
+ - Whether you want to distribute media files in the Microsoft Smooth Streaming format.
+ type: bool
+ compress:
+ description:
+ - Whether you want CloudFront to automatically compress files.
+ type: bool
+ lambda_function_associations:
+ description:
+ - A list of Lambda function associations to use for this cache behavior.
+ type: list
+ elements: dict
+ suboptions:
+ lambda_function_arn:
+ description: The ARN of the Lambda function.
+ type: str
+ event_type:
+ description:
+ - Specifies the event type that triggers a Lambda function invocation.
+ - This can be C(viewer-request), C(origin-request), C(origin-response) or C(viewer-response).
+ type: str
+ field_level_encryption_id:
+ description:
+ - The field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data.
+ type: str
+
+
+ purge_cache_behaviors:
+ description:
+ - Whether to remove any cache behaviors that aren't listed in I(cache_behaviors).
+ - This switch also allows the reordering of I(cache_behaviors).
+ default: false
+ type: bool
+
+ custom_error_responses:
+ type: list
+ elements: dict
+ description:
+ - A config element that is a I(list[]) of complex custom error responses to be specified for the distribution.
+ - This attribute configures custom http error messages returned to the user.
+ suboptions:
+ error_code:
+ type: int
+ description: The error code the custom error page is for.
+ error_caching_min_ttl:
+ type: int
+ description: The length of time (in seconds) that CloudFront will cache status codes for.
+ response_code:
+ type: int
+ description:
+ - The HTTP status code that CloudFront should return to a user when the origin returns the HTTP status code specified by I(error_code).
+ response_page_path:
+ type: str
+ description:
+ - The path to the custom error page that you want CloudFront to return to a viewer when your origin returns
+ the HTTP status code specified by I(error_code).
+
+ purge_custom_error_responses:
+ description: Whether to remove any custom error responses that aren't listed in I(custom_error_responses).
+ default: false
+ type: bool
+
+ comment:
+ description:
+ - A comment that describes the CloudFront distribution.
+ - If not specified, it defaults to a generic message that it has been created with Ansible, and a datetime stamp.
+ type: str
+
+ logging:
+ description:
+ - A config element that is a complex object that defines logging for the distribution.
+ suboptions:
+ enabled:
+ description: When I(enabled=true) CloudFront will log access to an S3 bucket.
+ type: bool
+ include_cookies:
+ description: When I(include_cookies=true) CloudFront will include cookies in the logs.
+ type: bool
+ bucket:
+ description: The S3 bucket to store the log in.
+ type: str
+ prefix:
+ description: A prefix to include in the S3 object names.
+ type: str
+ type: dict
+
+ price_class:
+ description:
+ - A string that specifies the pricing class of the distribution. As per
+ U(https://aws.amazon.com/cloudfront/pricing/)
+ - I(price_class=PriceClass_100) consists of the areas United States, Canada and Europe.
+ - I(price_class=PriceClass_200) consists of the areas United States, Canada, Europe, Japan, India,
+ Hong Kong, Philippines, S. Korea, Singapore & Taiwan.
+ - I(price_class=PriceClass_All) consists of the areas United States, Canada, Europe, Japan, India,
+ South America, Australia, Hong Kong, Philippines, S. Korea, Singapore & Taiwan.
+ - AWS defaults this to C(PriceClass_All).
+ - Valid values are C(PriceClass_100), C(PriceClass_200) and C(PriceClass_All)
+ type: str
+
+ enabled:
+ description:
+ - A boolean value that specifies whether the distribution is enabled or disabled.
+ - Defaults to C(false).
+ type: bool
+
+ viewer_certificate:
+ type: dict
+ description:
+ - A dict that specifies the encryption details of the distribution.
+ suboptions:
+ cloudfront_default_certificate:
+ type: bool
+ description:
+ - If you're using the CloudFront domain name for your distribution, such as C(123456789abcde.cloudfront.net)
+ you should set I(cloudfront_default_certificate=true).
+ - If I(cloudfront_default_certificate=true) do not set I(ssl_support_method).
+ iam_certificate_id:
+ type: str
+ description:
+ - The ID of a certificate stored in IAM to use for HTTPS connections.
+ - If I(iam_certificate_id) is set then you must also specify I(ssl_support_method).
+ acm_certificate_arn:
+ type: str
+ description:
+ - The ID of a certificate stored in ACM to use for HTTPS connections.
+ - If I(acm_certificate_id) is set then you must also specify I(ssl_support_method).
+ ssl_support_method:
+ type: str
+ description:
+ - How CloudFront should serve SSL certificates.
+ - Valid values are C(sni-only) for SNI, and C(vip) if CloudFront is configured to use a dedicated IP for your content.
+ minimum_protocol_version:
+ type: str
+ description:
+ - The security policy that you want CloudFront to use for HTTPS connections.
+ - See U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/secure-connections-supported-viewer-protocols-ciphers.html)
+ for supported security policies.
+
+ restrictions:
+ type: dict
+ description:
+ - A config element that is a complex object that describes how a distribution should restrict it's content.
+ suboptions:
+ geo_restriction:
+ description: Apply a restriction based on the location of the requester.
+ type: dict
+ suboptions:
+ restriction_type:
+ type: str
+ description:
+ - The method that you want to use to restrict distribution of your content by country.
+ - Valid values are C(none), C(whitelist), C(blacklist).
+ items:
+ description:
+ - A list of ISO 3166-1 two letter (Alpha 2) country codes that the
+ restriction should apply to.
+ - 'See the ISO website for a full list of codes U(https://www.iso.org/obp/ui/#search/code/).'
+ type: list
+
+ web_acl_id:
+ description:
+ - The ID of a Web Application Firewall (WAF) Access Control List (ACL).
+ type: str
+
+ http_version:
+ description:
+ - The version of the http protocol to use for the distribution.
+ - AWS defaults this to C(http2).
+ - Valid values are C(http1.1) and C(http2).
+ type: str
+
+ ipv6_enabled:
+ description:
+ - Determines whether IPv6 support is enabled or not.
+ - Defaults to C(false).
+ type: bool
+
+ wait:
+ description:
+ - Specifies whether the module waits until the distribution has completed processing the creation or update.
+ type: bool
+ default: false
+
+ wait_timeout:
+ description:
+ - Specifies the duration in seconds to wait for a timeout of a cloudfront create or update.
+ default: 1800
+ type: int
+
+'''
+
+EXAMPLES = r'''
+- name: create a basic distribution with defaults and tags
+ community.aws.cloudfront_distribution:
+ state: present
+ default_origin_domain_name: www.my-cloudfront-origin.com
+ tags:
+ Name: example distribution
+ Project: example project
+ Priority: '1'
+
+- name: update a distribution comment by distribution_id
+ community.aws.cloudfront_distribution:
+ state: present
+ distribution_id: E1RP5A2MJ8073O
+ comment: modified by ansible cloudfront.py
+
+- name: update a distribution comment by caller_reference
+ community.aws.cloudfront_distribution:
+ state: present
+ caller_reference: my cloudfront distribution 001
+ comment: modified by ansible cloudfront.py
+
+- name: update a distribution's aliases and comment using the distribution_id as a reference
+ community.aws.cloudfront_distribution:
+ state: present
+ distribution_id: E1RP5A2MJ8073O
+ comment: modified by cloudfront.py again
+ aliases: [ 'www.my-distribution-source.com', 'zzz.aaa.io' ]
+
+- name: update a distribution's aliases and comment using an alias as a reference
+ community.aws.cloudfront_distribution:
+ state: present
+ caller_reference: my test distribution
+ comment: modified by cloudfront.py again
+ aliases:
+ - www.my-distribution-source.com
+ - zzz.aaa.io
+
+- name: update a distribution's comment and aliases and tags and remove existing tags
+ community.aws.cloudfront_distribution:
+ state: present
+ distribution_id: E15BU8SDCGSG57
+ comment: modified by cloudfront.py again
+ aliases:
+ - tested.com
+ tags:
+ Project: distribution 1.2
+ purge_tags: yes
+
+- name: create a distribution with an origin, logging and default cache behavior
+ community.aws.cloudfront_distribution:
+ state: present
+ caller_reference: unique test distribution ID
+ origins:
+ - id: 'my test origin-000111'
+ domain_name: www.example.com
+ origin_path: /production
+ custom_headers:
+ - header_name: MyCustomHeaderName
+ header_value: MyCustomHeaderValue
+ default_cache_behavior:
+ target_origin_id: 'my test origin-000111'
+ forwarded_values:
+ query_string: true
+ cookies:
+ forward: all
+ headers:
+ - '*'
+ viewer_protocol_policy: allow-all
+ smooth_streaming: true
+ compress: true
+ allowed_methods:
+ items:
+ - GET
+ - HEAD
+ cached_methods:
+ - GET
+ - HEAD
+ logging:
+ enabled: true
+ include_cookies: false
+ bucket: mylogbucket.s3.amazonaws.com
+ prefix: myprefix/
+ enabled: false
+ comment: this is a CloudFront distribution with logging
+
+- name: delete a distribution
+ community.aws.cloudfront_distribution:
+ state: absent
+ caller_reference: replaceable distribution
+'''
+
+RETURN = r'''
+active_trusted_signers:
+ description: Key pair IDs that CloudFront is aware of for each trusted signer.
+ returned: always
+ type: complex
+ contains:
+ enabled:
+ description: Whether trusted signers are in use.
+ returned: always
+ type: bool
+ sample: false
+ quantity:
+ description: Number of trusted signers.
+ returned: always
+ type: int
+ sample: 1
+ items:
+ description: Number of trusted signers.
+ returned: when there are trusted signers
+ type: list
+ sample:
+ - key_pair_id
+aliases:
+ description: Aliases that refer to the distribution.
+ returned: always
+ type: complex
+ contains:
+ items:
+ description: List of aliases.
+ returned: always
+ type: list
+ sample:
+ - test.example.com
+ quantity:
+ description: Number of aliases.
+ returned: always
+ type: int
+ sample: 1
+arn:
+ description: Amazon Resource Name of the distribution.
+ returned: always
+ type: str
+ sample: arn:aws:cloudfront::123456789012:distribution/E1234ABCDEFGHI
+cache_behaviors:
+ description: CloudFront cache behaviors.
+ returned: always
+ type: complex
+ contains:
+ items:
+ description: List of cache behaviors.
+ returned: always
+ type: complex
+ contains:
+ allowed_methods:
+ description: Methods allowed by the cache behavior.
+ returned: always
+ type: complex
+ contains:
+ cached_methods:
+ description: Methods cached by the cache behavior.
+ returned: always
+ type: complex
+ contains:
+ items:
+ description: List of cached methods.
+ returned: always
+ type: list
+ sample:
+ - HEAD
+ - GET
+ quantity:
+ description: Count of cached methods.
+ returned: always
+ type: int
+ sample: 2
+ items:
+ description: List of methods allowed by the cache behavior.
+ returned: always
+ type: list
+ sample:
+ - HEAD
+ - GET
+ quantity:
+ description: Count of methods allowed by the cache behavior.
+ returned: always
+ type: int
+ sample: 2
+ compress:
+ description: Whether compression is turned on for the cache behavior.
+ returned: always
+ type: bool
+ sample: false
+ default_ttl:
+ description: Default Time to Live of the cache behavior.
+ returned: always
+ type: int
+ sample: 86400
+ forwarded_values:
+ description: Values forwarded to the origin for this cache behavior.
+ returned: always
+ type: complex
+ contains:
+ cookies:
+ description: Cookies to forward to the origin.
+ returned: always
+ type: complex
+ contains:
+ forward:
+ description: Which cookies to forward to the origin for this cache behavior.
+ returned: always
+ type: str
+ sample: none
+ whitelisted_names:
+ description: The names of the cookies to forward to the origin for this cache behavior.
+ returned: when I(forward=whitelist)
+ type: complex
+ contains:
+ quantity:
+ description: Count of cookies to forward.
+ returned: always
+ type: int
+ sample: 1
+ items:
+ description: List of cookies to forward.
+ returned: when list is not empty
+ type: list
+ sample: my_cookie
+ headers:
+ description: Which headers are used to vary on cache retrievals.
+ returned: always
+ type: complex
+ contains:
+ quantity:
+ description: Count of headers to vary on.
+ returned: always
+ type: int
+ sample: 1
+ items:
+ description: List of headers to vary on.
+ returned: when list is not empty
+ type: list
+ sample:
+ - Host
+ query_string:
+ description: Whether the query string is used in cache lookups.
+ returned: always
+ type: bool
+ sample: false
+ query_string_cache_keys:
+ description: Which query string keys to use in cache lookups.
+ returned: always
+ type: complex
+ contains:
+ quantity:
+ description: Count of query string cache keys to use in cache lookups.
+ returned: always
+ type: int
+ sample: 1
+ items:
+ description: List of query string cache keys to use in cache lookups.
+ returned: when list is not empty
+ type: list
+ sample:
+ lambda_function_associations:
+ description: Lambda function associations for a cache behavior.
+ returned: always
+ type: complex
+ contains:
+ quantity:
+ description: Count of lambda function associations.
+ returned: always
+ type: int
+ sample: 1
+ items:
+ description: List of lambda function associations.
+ returned: when list is not empty
+ type: list
+ sample:
+ - lambda_function_arn: arn:aws:lambda:123456789012:us-east-1/lambda/lambda-function
+ event_type: viewer-response
+ max_ttl:
+ description: Maximum Time to Live.
+ returned: always
+ type: int
+ sample: 31536000
+ min_ttl:
+ description: Minimum Time to Live.
+ returned: always
+ type: int
+ sample: 0
+ path_pattern:
+ description: Path pattern that determines this cache behavior.
+ returned: always
+ type: str
+ sample: /path/to/files/*
+ smooth_streaming:
+ description: Whether smooth streaming is enabled.
+ returned: always
+ type: bool
+ sample: false
+ target_origin_id:
+ description: ID of origin reference by this cache behavior.
+ returned: always
+ type: str
+ sample: origin_abcd
+ trusted_signers:
+ description: Trusted signers.
+ returned: always
+ type: complex
+ contains:
+ enabled:
+ description: Whether trusted signers are enabled for this cache behavior.
+ returned: always
+ type: bool
+ sample: false
+ quantity:
+ description: Count of trusted signers.
+ returned: always
+ type: int
+ sample: 1
+ viewer_protocol_policy:
+ description: Policy of how to handle http/https.
+ returned: always
+ type: str
+ sample: redirect-to-https
+ quantity:
+ description: Count of cache behaviors.
+ returned: always
+ type: int
+ sample: 1
+
+caller_reference:
+ description: Idempotency reference given when creating CloudFront distribution.
+ returned: always
+ type: str
+ sample: '1484796016700'
+comment:
+ description: Any comments you want to include about the distribution.
+ returned: always
+ type: str
+ sample: 'my first CloudFront distribution'
+custom_error_responses:
+ description: Custom error responses to use for error handling.
+ returned: always
+ type: complex
+ contains:
+ items:
+ description: List of custom error responses.
+ returned: always
+ type: complex
+ contains:
+ error_caching_min_ttl:
+ description: Minimum time to cache this error response.
+ returned: always
+ type: int
+ sample: 300
+ error_code:
+ description: Origin response code that triggers this error response.
+ returned: always
+ type: int
+ sample: 500
+ response_code:
+ description: Response code to return to the requester.
+ returned: always
+ type: str
+ sample: '500'
+ response_page_path:
+ description: Path that contains the error page to display.
+ returned: always
+ type: str
+ sample: /errors/5xx.html
+ quantity:
+ description: Count of custom error response items
+ returned: always
+ type: int
+ sample: 1
+default_cache_behavior:
+ description: Default cache behavior.
+ returned: always
+ type: complex
+ contains:
+ allowed_methods:
+ description: Methods allowed by the cache behavior.
+ returned: always
+ type: complex
+ contains:
+ cached_methods:
+ description: Methods cached by the cache behavior.
+ returned: always
+ type: complex
+ contains:
+ items:
+ description: List of cached methods.
+ returned: always
+ type: list
+ sample:
+ - HEAD
+ - GET
+ quantity:
+ description: Count of cached methods.
+ returned: always
+ type: int
+ sample: 2
+ items:
+ description: List of methods allowed by the cache behavior.
+ returned: always
+ type: list
+ sample:
+ - HEAD
+ - GET
+ quantity:
+ description: Count of methods allowed by the cache behavior.
+ returned: always
+ type: int
+ sample: 2
+ compress:
+ description: Whether compression is turned on for the cache behavior.
+ returned: always
+ type: bool
+ sample: false
+ default_ttl:
+ description: Default Time to Live of the cache behavior.
+ returned: always
+ type: int
+ sample: 86400
+ forwarded_values:
+ description: Values forwarded to the origin for this cache behavior.
+ returned: always
+ type: complex
+ contains:
+ cookies:
+ description: Cookies to forward to the origin.
+ returned: always
+ type: complex
+ contains:
+ forward:
+ description: Which cookies to forward to the origin for this cache behavior.
+ returned: always
+ type: str
+ sample: none
+ whitelisted_names:
+ description: The names of the cookies to forward to the origin for this cache behavior.
+ returned: when I(forward=whitelist)
+ type: complex
+ contains:
+ quantity:
+ description: Count of cookies to forward.
+ returned: always
+ type: int
+ sample: 1
+ items:
+ description: List of cookies to forward.
+ returned: when list is not empty
+ type: list
+ sample: my_cookie
+ headers:
+ description: Which headers are used to vary on cache retrievals.
+ returned: always
+ type: complex
+ contains:
+ quantity:
+ description: Count of headers to vary on.
+ returned: always
+ type: int
+ sample: 1
+ items:
+ description: List of headers to vary on.
+ returned: when list is not empty
+ type: list
+ sample:
+ - Host
+ query_string:
+ description: Whether the query string is used in cache lookups.
+ returned: always
+ type: bool
+ sample: false
+ query_string_cache_keys:
+ description: Which query string keys to use in cache lookups.
+ returned: always
+ type: complex
+ contains:
+ quantity:
+ description: Count of query string cache keys to use in cache lookups.
+ returned: always
+ type: int
+ sample: 1
+ items:
+ description: List of query string cache keys to use in cache lookups.
+ returned: when list is not empty
+ type: list
+ sample:
+ lambda_function_associations:
+ description: Lambda function associations for a cache behavior.
+ returned: always
+ type: complex
+ contains:
+ quantity:
+ description: Count of lambda function associations.
+ returned: always
+ type: int
+ sample: 1
+ items:
+ description: List of lambda function associations.
+ returned: when list is not empty
+ type: list
+ sample:
+ - lambda_function_arn: arn:aws:lambda:123456789012:us-east-1/lambda/lambda-function
+ event_type: viewer-response
+ max_ttl:
+ description: Maximum Time to Live.
+ returned: always
+ type: int
+ sample: 31536000
+ min_ttl:
+ description: Minimum Time to Live.
+ returned: always
+ type: int
+ sample: 0
+ path_pattern:
+ description: Path pattern that determines this cache behavior.
+ returned: always
+ type: str
+ sample: /path/to/files/*
+ smooth_streaming:
+ description: Whether smooth streaming is enabled.
+ returned: always
+ type: bool
+ sample: false
+ target_origin_id:
+ description: ID of origin reference by this cache behavior.
+ returned: always
+ type: str
+ sample: origin_abcd
+ trusted_signers:
+ description: Trusted signers.
+ returned: always
+ type: complex
+ contains:
+ enabled:
+ description: Whether trusted signers are enabled for this cache behavior.
+ returned: always
+ type: bool
+ sample: false
+ quantity:
+ description: Count of trusted signers.
+ returned: always
+ type: int
+ sample: 1
+ viewer_protocol_policy:
+ description: Policy of how to handle http/https.
+ returned: always
+ type: str
+ sample: redirect-to-https
+default_root_object:
+ description: The object that you want CloudFront to request from your origin (for example, index.html)
+ when a viewer requests the root URL for your distribution.
+ returned: always
+ type: str
+ sample: ''
+diff:
+ description: Difference between previous configuration and new configuration.
+ returned: always
+ type: dict
+ sample: {}
+domain_name:
+ description: Domain name of CloudFront distribution.
+ returned: always
+ type: str
+ sample: d1vz8pzgurxosf.cloudfront.net
+enabled:
+ description: Whether the CloudFront distribution is enabled or not.
+ returned: always
+ type: bool
+ sample: true
+http_version:
+ description: Version of HTTP supported by the distribution.
+ returned: always
+ type: str
+ sample: http2
+id:
+ description: CloudFront distribution ID.
+ returned: always
+ type: str
+ sample: E123456ABCDEFG
+in_progress_invalidation_batches:
+ description: The number of invalidation batches currently in progress.
+ returned: always
+ type: int
+ sample: 0
+is_ipv6_enabled:
+ description: Whether IPv6 is enabled.
+ returned: always
+ type: bool
+ sample: true
+last_modified_time:
+ description: Date and time distribution was last modified.
+ returned: always
+ type: str
+ sample: '2017-10-13T01:51:12.656000+00:00'
+logging:
+ description: Logging information.
+ returned: always
+ type: complex
+ contains:
+ bucket:
+ description: S3 bucket logging destination.
+ returned: always
+ type: str
+ sample: logs-example-com.s3.amazonaws.com
+ enabled:
+ description: Whether logging is enabled.
+ returned: always
+ type: bool
+ sample: true
+ include_cookies:
+ description: Whether to log cookies.
+ returned: always
+ type: bool
+ sample: false
+ prefix:
+ description: Prefix added to logging object names.
+ returned: always
+ type: str
+ sample: cloudfront/test
+origins:
+ description: Origins in the CloudFront distribution.
+ returned: always
+ type: complex
+ contains:
+ items:
+ description: List of origins.
+ returned: always
+ type: complex
+ contains:
+ custom_headers:
+ description: Custom headers passed to the origin.
+ returned: always
+ type: complex
+ contains:
+ quantity:
+ description: Count of headers.
+ returned: always
+ type: int
+ sample: 1
+ custom_origin_config:
+ description: Configuration of the origin.
+ returned: always
+ type: complex
+ contains:
+ http_port:
+ description: Port on which HTTP is listening.
+ returned: always
+ type: int
+ sample: 80
+ https_port:
+ description: Port on which HTTPS is listening.
+ returned: always
+ type: int
+ sample: 443
+ origin_keepalive_timeout:
+ description: Keep-alive timeout.
+ returned: always
+ type: int
+ sample: 5
+ origin_protocol_policy:
+ description: Policy of which protocols are supported.
+ returned: always
+ type: str
+ sample: https-only
+ origin_read_timeout:
+ description: Timeout for reads to the origin.
+ returned: always
+ type: int
+ sample: 30
+ origin_ssl_protocols:
+ description: SSL protocols allowed by the origin.
+ returned: always
+ type: complex
+ contains:
+ items:
+ description: List of SSL protocols.
+ returned: always
+ type: list
+ sample:
+ - TLSv1
+ - TLSv1.1
+ - TLSv1.2
+ quantity:
+ description: Count of SSL protocols.
+ returned: always
+ type: int
+ sample: 3
+ domain_name:
+ description: Domain name of the origin.
+ returned: always
+ type: str
+ sample: test-origin.example.com
+ id:
+ description: ID of the origin.
+ returned: always
+ type: str
+ sample: test-origin.example.com
+ origin_path:
+ description: Subdirectory to prefix the request from the S3 or HTTP origin.
+ returned: always
+ type: str
+ sample: ''
+ quantity:
+ description: Count of origins.
+ returned: always
+ type: int
+ sample: 1
+price_class:
+ description: Price class of CloudFront distribution.
+ returned: always
+ type: str
+ sample: PriceClass_All
+restrictions:
+ description: Restrictions in use by CloudFront.
+ returned: always
+ type: complex
+ contains:
+ geo_restriction:
+ description: Controls the countries in which your content is distributed.
+ returned: always
+ type: complex
+ contains:
+ quantity:
+ description: Count of restrictions.
+ returned: always
+ type: int
+ sample: 1
+ items:
+ description: List of country codes allowed or disallowed.
+ returned: always
+ type: list
+ sample: xy
+ restriction_type:
+ description: Type of restriction.
+ returned: always
+ type: str
+ sample: blacklist
+status:
+ description: Status of the CloudFront distribution.
+ returned: always
+ type: str
+ sample: InProgress
+tags:
+ description: Distribution tags.
+ returned: always
+ type: dict
+ sample:
+ Hello: World
+viewer_certificate:
+ description: Certificate used by CloudFront distribution.
+ returned: always
+ type: complex
+ contains:
+ acm_certificate_arn:
+ description: ARN of ACM certificate.
+ returned: when certificate comes from ACM
+ type: str
+ sample: arn:aws:acm:us-east-1:123456789012:certificate/abcd1234-1234-1234-abcd-123456abcdef
+ certificate:
+ description: Reference to certificate.
+ returned: always
+ type: str
+ sample: arn:aws:acm:us-east-1:123456789012:certificate/abcd1234-1234-1234-abcd-123456abcdef
+ certificate_source:
+ description: Where certificate comes from.
+ returned: always
+ type: str
+ sample: acm
+ minimum_protocol_version:
+ description: Minimum SSL/TLS protocol supported by this distribution.
+ returned: always
+ type: str
+ sample: TLSv1
+ ssl_support_method:
+ description: Support for pre-SNI browsers or not.
+ returned: always
+ type: str
+ sample: sni-only
+web_acl_id:
+ description: ID of Web Access Control List (from WAF service).
+ returned: always
+ type: str
+ sample: abcd1234-1234-abcd-abcd-abcd12345678
+'''
+
+from ansible.module_utils._text import to_text, to_native
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager
+from ansible.module_utils.common.dict_transformations import recursive_diff
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict
+import datetime
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ try:
+ from ordereddict import OrderedDict
+ except ImportError:
+ pass # caught by AnsibleAWSModule (as python 2.6 + boto3 => ordereddict is installed)
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def change_dict_key_name(dictionary, old_key, new_key):
+ if old_key in dictionary:
+ dictionary[new_key] = dictionary.get(old_key)
+ dictionary.pop(old_key, None)
+ return dictionary
+
+
+def merge_validation_into_config(config, validated_node, node_name):
+ if validated_node is not None:
+ if isinstance(validated_node, dict):
+ config_node = config.get(node_name)
+ if config_node is not None:
+ config_node_items = list(config_node.items())
+ else:
+ config_node_items = []
+ config[node_name] = dict(config_node_items + list(validated_node.items()))
+ if isinstance(validated_node, list):
+ config[node_name] = list(set(config.get(node_name) + validated_node))
+ return config
+
+
+def ansible_list_to_cloudfront_list(list_items=None, include_quantity=True):
+ if list_items is None:
+ list_items = []
+ if not isinstance(list_items, list):
+ raise ValueError('Expected a list, got a {0} with value {1}'.format(type(list_items).__name__, str(list_items)))
+ result = {}
+ if include_quantity:
+ result['quantity'] = len(list_items)
+ if len(list_items) > 0:
+ result['items'] = list_items
+ return result
+
+
+def create_distribution(client, module, config, tags):
+ try:
+ if not tags:
+ return client.create_distribution(DistributionConfig=config)['Distribution']
+ else:
+ distribution_config_with_tags = {
+ 'DistributionConfig': config,
+ 'Tags': {
+ 'Items': tags
+ }
+ }
+ return client.create_distribution_with_tags(DistributionConfigWithTags=distribution_config_with_tags)['Distribution']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error creating distribution")
+
+
+def delete_distribution(client, module, distribution):
+ try:
+ return client.delete_distribution(Id=distribution['Distribution']['Id'], IfMatch=distribution['ETag'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error deleting distribution %s" % to_native(distribution['Distribution']))
+
+
+def update_distribution(client, module, config, distribution_id, e_tag):
+ try:
+ return client.update_distribution(DistributionConfig=config, Id=distribution_id, IfMatch=e_tag)['Distribution']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error updating distribution to %s" % to_native(config))
+
+
+def tag_resource(client, module, arn, tags):
+ try:
+ return client.tag_resource(Resource=arn, Tags=dict(Items=tags))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error tagging resource")
+
+
+def untag_resource(client, module, arn, tag_keys):
+ try:
+ return client.untag_resource(Resource=arn, TagKeys=dict(Items=tag_keys))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error untagging resource")
+
+
+def list_tags_for_resource(client, module, arn):
+ try:
+ response = client.list_tags_for_resource(Resource=arn)
+ return boto3_tag_list_to_ansible_dict(response.get('Tags').get('Items'))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error listing tags for resource")
+
+
+def update_tags(client, module, existing_tags, valid_tags, purge_tags, arn):
+ changed = False
+ to_add, to_remove = compare_aws_tags(existing_tags, valid_tags, purge_tags)
+ if to_remove:
+ untag_resource(client, module, arn, to_remove)
+ changed = True
+ if to_add:
+ tag_resource(client, module, arn, ansible_dict_to_boto3_tag_list(to_add))
+ changed = True
+ return changed
+
+
+class CloudFrontValidationManager(object):
+ """
+ Manages CloudFront validations
+ """
+
+ def __init__(self, module):
+ self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module)
+ self.module = module
+ self.__default_distribution_enabled = True
+ self.__default_http_port = 80
+ self.__default_https_port = 443
+ self.__default_ipv6_enabled = False
+ self.__default_origin_ssl_protocols = [
+ 'TLSv1',
+ 'TLSv1.1',
+ 'TLSv1.2'
+ ]
+ self.__default_custom_origin_protocol_policy = 'match-viewer'
+ self.__default_custom_origin_read_timeout = 30
+ self.__default_custom_origin_keepalive_timeout = 5
+ self.__default_datetime_string = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')
+ self.__default_cache_behavior_min_ttl = 0
+ self.__default_cache_behavior_max_ttl = 31536000
+ self.__default_cache_behavior_default_ttl = 86400
+ self.__default_cache_behavior_compress = False
+ self.__default_cache_behavior_viewer_protocol_policy = 'allow-all'
+ self.__default_cache_behavior_smooth_streaming = False
+ self.__default_cache_behavior_forwarded_values_forward_cookies = 'none'
+ self.__default_cache_behavior_forwarded_values_query_string = True
+ self.__default_trusted_signers_enabled = False
+ self.__valid_price_classes = set([
+ 'PriceClass_100',
+ 'PriceClass_200',
+ 'PriceClass_All'
+ ])
+ self.__valid_origin_protocol_policies = set([
+ 'http-only',
+ 'match-viewer',
+ 'https-only'
+ ])
+ self.__valid_origin_ssl_protocols = set([
+ 'SSLv3',
+ 'TLSv1',
+ 'TLSv1.1',
+ 'TLSv1.2'
+ ])
+ self.__valid_cookie_forwarding = set([
+ 'none',
+ 'whitelist',
+ 'all'
+ ])
+ self.__valid_viewer_protocol_policies = set([
+ 'allow-all',
+ 'https-only',
+ 'redirect-to-https'
+ ])
+ self.__valid_methods = set([
+ 'GET',
+ 'HEAD',
+ 'POST',
+ 'PUT',
+ 'PATCH',
+ 'OPTIONS',
+ 'DELETE'
+ ])
+ self.__valid_methods_cached_methods = [
+ set([
+ 'GET',
+ 'HEAD'
+ ]),
+ set([
+ 'GET',
+ 'HEAD',
+ 'OPTIONS'
+ ])
+ ]
+ self.__valid_methods_allowed_methods = [
+ self.__valid_methods_cached_methods[0],
+ self.__valid_methods_cached_methods[1],
+ self.__valid_methods
+ ]
+ self.__valid_lambda_function_association_event_types = set([
+ 'viewer-request',
+ 'viewer-response',
+ 'origin-request',
+ 'origin-response'
+ ])
+ self.__valid_viewer_certificate_ssl_support_methods = set([
+ 'sni-only',
+ 'vip'
+ ])
+ self.__valid_viewer_certificate_minimum_protocol_versions = set([
+ 'SSLv3',
+ 'TLSv1',
+ 'TLSv1_2016',
+ 'TLSv1.1_2016',
+ 'TLSv1.2_2018',
+ 'TLSv1.2_2019'
+ ])
+ self.__valid_viewer_certificate_certificate_sources = set([
+ 'cloudfront',
+ 'iam',
+ 'acm'
+ ])
+ self.__valid_http_versions = set([
+ 'http1.1',
+ 'http2'
+ ])
+ self.__s3_bucket_domain_identifier = '.s3.amazonaws.com'
+
+ def add_missing_key(self, dict_object, key_to_set, value_to_set):
+ if key_to_set not in dict_object and value_to_set is not None:
+ dict_object[key_to_set] = value_to_set
+ return dict_object
+
+ def add_key_else_change_dict_key(self, dict_object, old_key, new_key, value_to_set):
+ if old_key not in dict_object and value_to_set is not None:
+ dict_object[new_key] = value_to_set
+ else:
+ dict_object = change_dict_key_name(dict_object, old_key, new_key)
+ return dict_object
+
+ def add_key_else_validate(self, dict_object, key_name, attribute_name, value_to_set, valid_values, to_aws_list=False):
+ if key_name in dict_object:
+ self.validate_attribute_with_allowed_values(value_to_set, attribute_name, valid_values)
+ else:
+ if to_aws_list:
+ dict_object[key_name] = ansible_list_to_cloudfront_list(value_to_set)
+ elif value_to_set is not None:
+ dict_object[key_name] = value_to_set
+ return dict_object
+
+ def validate_logging(self, logging):
+ try:
+ if logging is None:
+ return None
+ valid_logging = {}
+ if logging and not set(['enabled', 'include_cookies', 'bucket', 'prefix']).issubset(logging):
+ self.module.fail_json(msg="The logging parameters enabled, include_cookies, bucket and prefix must be specified.")
+ valid_logging['include_cookies'] = logging.get('include_cookies')
+ valid_logging['enabled'] = logging.get('enabled')
+ valid_logging['bucket'] = logging.get('bucket')
+ valid_logging['prefix'] = logging.get('prefix')
+ return valid_logging
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating distribution logging")
+
+ def validate_is_list(self, list_to_validate, list_name):
+ if not isinstance(list_to_validate, list):
+ self.module.fail_json(msg='%s is of type %s. Must be a list.' % (list_name, type(list_to_validate).__name__))
+
+ def validate_required_key(self, key_name, full_key_name, dict_object):
+ if key_name not in dict_object:
+ self.module.fail_json(msg="%s must be specified." % full_key_name)
+
+ def validate_origins(self, client, config, origins, default_origin_domain_name,
+ default_origin_path, create_distribution, purge_origins=False):
+ try:
+ if origins is None:
+ if default_origin_domain_name is None and not create_distribution:
+ if purge_origins:
+ return None
+ else:
+ return ansible_list_to_cloudfront_list(config)
+ if default_origin_domain_name is not None:
+ origins = [{
+ 'domain_name': default_origin_domain_name,
+ 'origin_path': default_origin_path or ''
+ }]
+ else:
+ origins = []
+ self.validate_is_list(origins, 'origins')
+ if not origins and default_origin_domain_name is None and create_distribution:
+ self.module.fail_json(msg="Both origins[] and default_origin_domain_name have not been specified. Please specify at least one.")
+ all_origins = OrderedDict()
+ new_domains = list()
+ for origin in config:
+ all_origins[origin.get('domain_name')] = origin
+ for origin in origins:
+ origin = self.validate_origin(client, all_origins.get(origin.get('domain_name'), {}), origin, default_origin_path)
+ all_origins[origin['domain_name']] = origin
+ new_domains.append(origin['domain_name'])
+ if purge_origins:
+ for domain in list(all_origins.keys()):
+ if domain not in new_domains:
+ del(all_origins[domain])
+ return ansible_list_to_cloudfront_list(list(all_origins.values()))
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating distribution origins")
+
+ def validate_s3_origin_configuration(self, client, existing_config, origin):
+ if not origin['s3_origin_access_identity_enabled']:
+ return None
+
+ if origin.get('s3_origin_config', {}).get('origin_access_identity'):
+ return origin['s3_origin_config']['origin_access_identity']
+
+ if existing_config.get('s3_origin_config', {}).get('origin_access_identity'):
+ return existing_config['s3_origin_config']['origin_access_identity']
+
+ try:
+ comment = "access-identity-by-ansible-%s-%s" % (origin.get('domain_name'), self.__default_datetime_string)
+ caller_reference = "%s-%s" % (origin.get('domain_name'), self.__default_datetime_string)
+ cfoai_config = dict(CloudFrontOriginAccessIdentityConfig=dict(CallerReference=caller_reference,
+ Comment=comment))
+ oai = client.create_cloud_front_origin_access_identity(**cfoai_config)['CloudFrontOriginAccessIdentity']['Id']
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Couldn't create Origin Access Identity for id %s" % origin['id'])
+ return "origin-access-identity/cloudfront/%s" % oai
+
+ def validate_origin(self, client, existing_config, origin, default_origin_path):
+ try:
+ origin = self.add_missing_key(origin, 'origin_path', existing_config.get('origin_path', default_origin_path or ''))
+ self.validate_required_key('origin_path', 'origins[].origin_path', origin)
+ origin = self.add_missing_key(origin, 'id', existing_config.get('id', self.__default_datetime_string))
+ if 'custom_headers' in origin and len(origin.get('custom_headers')) > 0:
+ for custom_header in origin.get('custom_headers'):
+ if 'header_name' not in custom_header or 'header_value' not in custom_header:
+ self.module.fail_json(msg="Both origins[].custom_headers.header_name and origins[].custom_headers.header_value must be specified.")
+ origin['custom_headers'] = ansible_list_to_cloudfront_list(origin.get('custom_headers'))
+ else:
+ origin['custom_headers'] = ansible_list_to_cloudfront_list()
+ if self.__s3_bucket_domain_identifier in origin.get('domain_name').lower():
+ if origin.get("s3_origin_access_identity_enabled") is not None:
+ s3_origin_config = self.validate_s3_origin_configuration(client, existing_config, origin)
+ if s3_origin_config:
+ oai = s3_origin_config
+ else:
+ oai = ""
+ origin["s3_origin_config"] = dict(origin_access_identity=oai)
+ del(origin["s3_origin_access_identity_enabled"])
+ if 'custom_origin_config' in origin:
+ self.module.fail_json(msg="s3_origin_access_identity_enabled and custom_origin_config are mutually exclusive")
+ else:
+ origin = self.add_missing_key(origin, 'custom_origin_config', existing_config.get('custom_origin_config', {}))
+ custom_origin_config = origin.get('custom_origin_config')
+ custom_origin_config = self.add_key_else_validate(custom_origin_config, 'origin_protocol_policy',
+ 'origins[].custom_origin_config.origin_protocol_policy',
+ self.__default_custom_origin_protocol_policy, self.__valid_origin_protocol_policies)
+ custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_read_timeout', self.__default_custom_origin_read_timeout)
+ custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_keepalive_timeout', self.__default_custom_origin_keepalive_timeout)
+ custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'http_port', 'h_t_t_p_port', self.__default_http_port)
+ custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'https_port', 'h_t_t_p_s_port', self.__default_https_port)
+ if custom_origin_config.get('origin_ssl_protocols', {}).get('items'):
+ custom_origin_config['origin_ssl_protocols'] = custom_origin_config['origin_ssl_protocols']['items']
+ if custom_origin_config.get('origin_ssl_protocols'):
+ self.validate_attribute_list_with_allowed_list(custom_origin_config['origin_ssl_protocols'], 'origins[].origin_ssl_protocols',
+ self.__valid_origin_ssl_protocols)
+ else:
+ custom_origin_config['origin_ssl_protocols'] = self.__default_origin_ssl_protocols
+ custom_origin_config['origin_ssl_protocols'] = ansible_list_to_cloudfront_list(custom_origin_config['origin_ssl_protocols'])
+ return origin
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error validating distribution origin")
+
+ def validate_cache_behaviors(self, config, cache_behaviors, valid_origins, purge_cache_behaviors=False):
+ try:
+ if cache_behaviors is None and valid_origins is not None and purge_cache_behaviors is False:
+ return ansible_list_to_cloudfront_list(config)
+ all_cache_behaviors = OrderedDict()
+ # cache behaviors are order dependent so we don't preserve the existing ordering when purge_cache_behaviors
+ # is true (if purge_cache_behaviors is not true, we can't really know the full new order)
+ if not purge_cache_behaviors:
+ for behavior in config:
+ all_cache_behaviors[behavior['path_pattern']] = behavior
+ for cache_behavior in cache_behaviors:
+ valid_cache_behavior = self.validate_cache_behavior(all_cache_behaviors.get(cache_behavior.get('path_pattern'), {}),
+ cache_behavior, valid_origins)
+ all_cache_behaviors[cache_behavior['path_pattern']] = valid_cache_behavior
+ if purge_cache_behaviors:
+ for target_origin_id in set(all_cache_behaviors.keys()) - set([cb['path_pattern'] for cb in cache_behaviors]):
+ del(all_cache_behaviors[target_origin_id])
+ return ansible_list_to_cloudfront_list(list(all_cache_behaviors.values()))
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating distribution cache behaviors")
+
+ def validate_cache_behavior(self, config, cache_behavior, valid_origins, is_default_cache=False):
+ if is_default_cache and cache_behavior is None:
+ cache_behavior = {}
+ if cache_behavior is None and valid_origins is not None:
+ return config
+ cache_behavior = self.validate_cache_behavior_first_level_keys(config, cache_behavior, valid_origins, is_default_cache)
+ cache_behavior = self.validate_forwarded_values(config, cache_behavior.get('forwarded_values'), cache_behavior)
+ cache_behavior = self.validate_allowed_methods(config, cache_behavior.get('allowed_methods'), cache_behavior)
+ cache_behavior = self.validate_lambda_function_associations(config, cache_behavior.get('lambda_function_associations'), cache_behavior)
+ cache_behavior = self.validate_trusted_signers(config, cache_behavior.get('trusted_signers'), cache_behavior)
+ cache_behavior = self.validate_field_level_encryption_id(config, cache_behavior.get('field_level_encryption_id'), cache_behavior)
+ return cache_behavior
+
+ def validate_cache_behavior_first_level_keys(self, config, cache_behavior, valid_origins, is_default_cache):
+ try:
+ cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'min_ttl', 'min_t_t_l',
+ config.get('min_t_t_l', self.__default_cache_behavior_min_ttl))
+ cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'max_ttl', 'max_t_t_l',
+ config.get('max_t_t_l', self.__default_cache_behavior_max_ttl))
+ cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'default_ttl', 'default_t_t_l',
+ config.get('default_t_t_l', self.__default_cache_behavior_default_ttl))
+ cache_behavior = self.add_missing_key(cache_behavior, 'compress', config.get('compress', self.__default_cache_behavior_compress))
+ target_origin_id = cache_behavior.get('target_origin_id', config.get('target_origin_id'))
+ if not target_origin_id:
+ target_origin_id = self.get_first_origin_id_for_default_cache_behavior(valid_origins)
+ if target_origin_id not in [origin['id'] for origin in valid_origins.get('items', [])]:
+ if is_default_cache:
+ cache_behavior_name = 'Default cache behavior'
+ else:
+ cache_behavior_name = 'Cache behavior for path %s' % cache_behavior['path_pattern']
+ self.module.fail_json(msg="%s has target_origin_id pointing to an origin that does not exist." %
+ cache_behavior_name)
+ cache_behavior['target_origin_id'] = target_origin_id
+ cache_behavior = self.add_key_else_validate(cache_behavior, 'viewer_protocol_policy', 'cache_behavior.viewer_protocol_policy',
+ config.get('viewer_protocol_policy',
+ self.__default_cache_behavior_viewer_protocol_policy),
+ self.__valid_viewer_protocol_policies)
+ cache_behavior = self.add_missing_key(cache_behavior, 'smooth_streaming',
+ config.get('smooth_streaming', self.__default_cache_behavior_smooth_streaming))
+ return cache_behavior
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating distribution cache behavior first level keys")
+
+ def validate_forwarded_values(self, config, forwarded_values, cache_behavior):
+ try:
+ if not forwarded_values:
+ forwarded_values = dict()
+ existing_config = config.get('forwarded_values', {})
+ headers = forwarded_values.get('headers', existing_config.get('headers', {}).get('items'))
+ if headers:
+ headers.sort()
+ forwarded_values['headers'] = ansible_list_to_cloudfront_list(headers)
+ if 'cookies' not in forwarded_values:
+ forward = existing_config.get('cookies', {}).get('forward', self.__default_cache_behavior_forwarded_values_forward_cookies)
+ forwarded_values['cookies'] = {'forward': forward}
+ else:
+ existing_whitelist = existing_config.get('cookies', {}).get('whitelisted_names', {}).get('items')
+ whitelist = forwarded_values.get('cookies').get('whitelisted_names', existing_whitelist)
+ if whitelist:
+ self.validate_is_list(whitelist, 'forwarded_values.whitelisted_names')
+ forwarded_values['cookies']['whitelisted_names'] = ansible_list_to_cloudfront_list(whitelist)
+ cookie_forwarding = forwarded_values.get('cookies').get('forward', existing_config.get('cookies', {}).get('forward'))
+ self.validate_attribute_with_allowed_values(cookie_forwarding, 'cache_behavior.forwarded_values.cookies.forward',
+ self.__valid_cookie_forwarding)
+ forwarded_values['cookies']['forward'] = cookie_forwarding
+ query_string_cache_keys = forwarded_values.get('query_string_cache_keys', existing_config.get('query_string_cache_keys', {}).get('items', []))
+ self.validate_is_list(query_string_cache_keys, 'forwarded_values.query_string_cache_keys')
+ forwarded_values['query_string_cache_keys'] = ansible_list_to_cloudfront_list(query_string_cache_keys)
+ forwarded_values = self.add_missing_key(forwarded_values, 'query_string',
+ existing_config.get('query_string', self.__default_cache_behavior_forwarded_values_query_string))
+ cache_behavior['forwarded_values'] = forwarded_values
+ return cache_behavior
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating forwarded values")
+
+ def validate_lambda_function_associations(self, config, lambda_function_associations, cache_behavior):
+ try:
+ if lambda_function_associations is not None:
+ self.validate_is_list(lambda_function_associations, 'lambda_function_associations')
+ for association in lambda_function_associations:
+ association = change_dict_key_name(association, 'lambda_function_arn', 'lambda_function_a_r_n')
+ self.validate_attribute_with_allowed_values(association.get('event_type'), 'cache_behaviors[].lambda_function_associations.event_type',
+ self.__valid_lambda_function_association_event_types)
+ cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list(lambda_function_associations)
+ else:
+ if 'lambda_function_associations' in config:
+ cache_behavior['lambda_function_associations'] = config.get('lambda_function_associations')
+ else:
+ cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list([])
+ return cache_behavior
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating lambda function associations")
+
+ def validate_field_level_encryption_id(self, config, field_level_encryption_id, cache_behavior):
+ if field_level_encryption_id is not None:
+ cache_behavior['field_level_encryption_id'] = field_level_encryption_id
+ elif 'field_level_encryption_id' in config:
+ cache_behavior['field_level_encryption_id'] = config.get('field_level_encryption_id')
+ else:
+ cache_behavior['field_level_encryption_id'] = ""
+ return cache_behavior
+
+ def validate_allowed_methods(self, config, allowed_methods, cache_behavior):
+ try:
+ if allowed_methods is not None:
+ self.validate_required_key('items', 'cache_behavior.allowed_methods.items[]', allowed_methods)
+ temp_allowed_items = allowed_methods.get('items')
+ self.validate_is_list(temp_allowed_items, 'cache_behavior.allowed_methods.items')
+ self.validate_attribute_list_with_allowed_list(temp_allowed_items, 'cache_behavior.allowed_methods.items[]',
+ self.__valid_methods_allowed_methods)
+ cached_items = allowed_methods.get('cached_methods')
+ if 'cached_methods' in allowed_methods:
+ self.validate_is_list(cached_items, 'cache_behavior.allowed_methods.cached_methods')
+ self.validate_attribute_list_with_allowed_list(cached_items, 'cache_behavior.allowed_items.cached_methods[]',
+ self.__valid_methods_cached_methods)
+ # we don't care if the order of how cloudfront stores the methods differs - preserving existing
+ # order reduces likelihood of making unnecessary changes
+ if 'allowed_methods' in config and set(config['allowed_methods']['items']) == set(temp_allowed_items):
+ cache_behavior['allowed_methods'] = config['allowed_methods']
+ else:
+ cache_behavior['allowed_methods'] = ansible_list_to_cloudfront_list(temp_allowed_items)
+
+ if cached_items and set(cached_items) == set(config.get('allowed_methods', {}).get('cached_methods', {}).get('items', [])):
+ cache_behavior['allowed_methods']['cached_methods'] = config['allowed_methods']['cached_methods']
+ else:
+ cache_behavior['allowed_methods']['cached_methods'] = ansible_list_to_cloudfront_list(cached_items)
+ else:
+ if 'allowed_methods' in config:
+ cache_behavior['allowed_methods'] = config.get('allowed_methods')
+ return cache_behavior
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating allowed methods")
+
+ def validate_trusted_signers(self, config, trusted_signers, cache_behavior):
+ try:
+ if trusted_signers is None:
+ trusted_signers = {}
+ if 'items' in trusted_signers:
+ valid_trusted_signers = ansible_list_to_cloudfront_list(trusted_signers.get('items'))
+ else:
+ valid_trusted_signers = dict(quantity=config.get('quantity', 0))
+ if 'items' in config:
+ valid_trusted_signers = dict(items=config['items'])
+ valid_trusted_signers['enabled'] = trusted_signers.get('enabled', config.get('enabled', self.__default_trusted_signers_enabled))
+ cache_behavior['trusted_signers'] = valid_trusted_signers
+ return cache_behavior
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating trusted signers")
+
+ def validate_viewer_certificate(self, viewer_certificate):
+ try:
+ if viewer_certificate is None:
+ return None
+ if viewer_certificate.get('cloudfront_default_certificate') and viewer_certificate.get('ssl_support_method') is not None:
+ self.module.fail_json(msg="viewer_certificate.ssl_support_method should not be specified with viewer_certificate_cloudfront_default" +
+ "_certificate set to true.")
+ self.validate_attribute_with_allowed_values(viewer_certificate.get('ssl_support_method'), 'viewer_certificate.ssl_support_method',
+ self.__valid_viewer_certificate_ssl_support_methods)
+ self.validate_attribute_with_allowed_values(viewer_certificate.get('minimum_protocol_version'), 'viewer_certificate.minimum_protocol_version',
+ self.__valid_viewer_certificate_minimum_protocol_versions)
+ self.validate_attribute_with_allowed_values(viewer_certificate.get('certificate_source'), 'viewer_certificate.certificate_source',
+ self.__valid_viewer_certificate_certificate_sources)
+ viewer_certificate = change_dict_key_name(viewer_certificate, 'cloudfront_default_certificate', 'cloud_front_default_certificate')
+ viewer_certificate = change_dict_key_name(viewer_certificate, 'ssl_support_method', 's_s_l_support_method')
+ viewer_certificate = change_dict_key_name(viewer_certificate, 'iam_certificate_id', 'i_a_m_certificate_id')
+ viewer_certificate = change_dict_key_name(viewer_certificate, 'acm_certificate_arn', 'a_c_m_certificate_arn')
+ return viewer_certificate
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating viewer certificate")
+
+ def validate_custom_error_responses(self, config, custom_error_responses, purge_custom_error_responses):
+ try:
+ if custom_error_responses is None and not purge_custom_error_responses:
+ return ansible_list_to_cloudfront_list(config)
+ self.validate_is_list(custom_error_responses, 'custom_error_responses')
+ result = list()
+ existing_responses = dict((response['error_code'], response) for response in custom_error_responses)
+ for custom_error_response in custom_error_responses:
+ self.validate_required_key('error_code', 'custom_error_responses[].error_code', custom_error_response)
+ custom_error_response = change_dict_key_name(custom_error_response, 'error_caching_min_ttl', 'error_caching_min_t_t_l')
+ if 'response_code' in custom_error_response:
+ custom_error_response['response_code'] = str(custom_error_response['response_code'])
+ if custom_error_response['error_code'] in existing_responses:
+ del(existing_responses[custom_error_response['error_code']])
+ result.append(custom_error_response)
+ if not purge_custom_error_responses:
+ result.extend(existing_responses.values())
+
+ return ansible_list_to_cloudfront_list(result)
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating custom error responses")
+
+ def validate_restrictions(self, config, restrictions, purge_restrictions=False):
+ try:
+ if restrictions is None:
+ if purge_restrictions:
+ return None
+ else:
+ return config
+ self.validate_required_key('geo_restriction', 'restrictions.geo_restriction', restrictions)
+ geo_restriction = restrictions.get('geo_restriction')
+ self.validate_required_key('restriction_type', 'restrictions.geo_restriction.restriction_type', geo_restriction)
+ existing_restrictions = config.get('geo_restriction', {}).get(geo_restriction['restriction_type'], {}).get('items', [])
+ geo_restriction_items = geo_restriction.get('items')
+ if not purge_restrictions:
+ geo_restriction_items.extend([rest for rest in existing_restrictions if
+ rest not in geo_restriction_items])
+ valid_restrictions = ansible_list_to_cloudfront_list(geo_restriction_items)
+ valid_restrictions['restriction_type'] = geo_restriction.get('restriction_type')
+ return {'geo_restriction': valid_restrictions}
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating restrictions")
+
+ def validate_distribution_config_parameters(self, config, default_root_object, ipv6_enabled, http_version, web_acl_id):
+ try:
+ config['default_root_object'] = default_root_object or config.get('default_root_object', '')
+ config['is_i_p_v_6_enabled'] = ipv6_enabled or config.get('i_p_v_6_enabled', self.__default_ipv6_enabled)
+ if http_version is not None or config.get('http_version'):
+ self.validate_attribute_with_allowed_values(http_version, 'http_version', self.__valid_http_versions)
+ config['http_version'] = http_version or config.get('http_version')
+ if web_acl_id or config.get('web_a_c_l_id'):
+ config['web_a_c_l_id'] = web_acl_id or config.get('web_a_c_l_id')
+ return config
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating distribution config parameters")
+
+ def validate_common_distribution_parameters(self, config, enabled, aliases, logging, price_class, purge_aliases=False):
+ try:
+ if config is None:
+ config = {}
+ if aliases is not None:
+ if not purge_aliases:
+ aliases.extend([alias for alias in config.get('aliases', {}).get('items', [])
+ if alias not in aliases])
+ config['aliases'] = ansible_list_to_cloudfront_list(aliases)
+ if logging is not None:
+ config['logging'] = self.validate_logging(logging)
+ config['enabled'] = enabled or config.get('enabled', self.__default_distribution_enabled)
+ if price_class is not None:
+ self.validate_attribute_with_allowed_values(price_class, 'price_class', self.__valid_price_classes)
+ config['price_class'] = price_class
+ return config
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating common distribution parameters")
+
+ def validate_comment(self, config, comment):
+ config['comment'] = comment or config.get('comment', "Distribution created by Ansible with datetime stamp " + self.__default_datetime_string)
+ return config
+
+ def validate_caller_reference(self, caller_reference):
+ return caller_reference or self.__default_datetime_string
+
+ def get_first_origin_id_for_default_cache_behavior(self, valid_origins):
+ try:
+ if valid_origins is not None:
+ valid_origins_list = valid_origins.get('items')
+ if valid_origins_list is not None and isinstance(valid_origins_list, list) and len(valid_origins_list) > 0:
+ return str(valid_origins_list[0].get('id'))
+ self.module.fail_json(msg="There are no valid origins from which to specify a target_origin_id for the default_cache_behavior configuration.")
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error getting first origin_id for default cache behavior")
+
+ def validate_attribute_list_with_allowed_list(self, attribute_list, attribute_list_name, allowed_list):
+ try:
+ self.validate_is_list(attribute_list, attribute_list_name)
+ if (isinstance(allowed_list, list) and set(attribute_list) not in allowed_list or
+ isinstance(allowed_list, set) and not set(allowed_list).issuperset(attribute_list)):
+ self.module.fail_json(msg='The attribute list {0} must be one of [{1}]'.format(attribute_list_name, ' '.join(str(a) for a in allowed_list)))
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating attribute list with allowed value list")
+
+ def validate_attribute_with_allowed_values(self, attribute, attribute_name, allowed_list):
+ if attribute is not None and attribute not in allowed_list:
+ self.module.fail_json(msg='The attribute {0} must be one of [{1}]'.format(attribute_name, ' '.join(str(a) for a in allowed_list)))
+
+ def validate_distribution_from_caller_reference(self, caller_reference):
+ try:
+ distributions = self.__cloudfront_facts_mgr.list_distributions(False)
+ distribution_name = 'Distribution'
+ distribution_config_name = 'DistributionConfig'
+ distribution_ids = [dist.get('Id') for dist in distributions]
+ for distribution_id in distribution_ids:
+ distribution = self.__cloudfront_facts_mgr.get_distribution(distribution_id)
+ if distribution is not None:
+ distribution_config = distribution[distribution_name].get(distribution_config_name)
+ if distribution_config is not None and distribution_config.get('CallerReference') == caller_reference:
+ distribution[distribution_name][distribution_config_name] = distribution_config
+ return distribution
+
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating distribution from caller reference")
+
+ def validate_distribution_from_aliases_caller_reference(self, distribution_id, aliases, caller_reference):
+ try:
+ if caller_reference is not None:
+ return self.validate_distribution_from_caller_reference(caller_reference)
+ else:
+ if aliases:
+ distribution_id = self.validate_distribution_id_from_alias(aliases)
+ if distribution_id:
+ return self.__cloudfront_facts_mgr.get_distribution(distribution_id)
+ return None
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating distribution_id from alias, aliases and caller reference")
+
+ def validate_distribution_id_from_alias(self, aliases):
+ distributions = self.__cloudfront_facts_mgr.list_distributions(False)
+ if distributions:
+ for distribution in distributions:
+ distribution_aliases = distribution.get('Aliases', {}).get('Items', [])
+ if set(aliases) & set(distribution_aliases):
+ return distribution['Id']
+ return None
+
+ def wait_until_processed(self, client, wait_timeout, distribution_id, caller_reference):
+ if distribution_id is None:
+ distribution_id = self.validate_distribution_from_caller_reference(caller_reference=caller_reference)['Id']
+
+ try:
+ waiter = client.get_waiter('distribution_deployed')
+ attempts = 1 + int(wait_timeout / 60)
+ waiter.wait(Id=distribution_id, WaiterConfig={'MaxAttempts': attempts})
+ except botocore.exceptions.WaiterError as e:
+ self.module.fail_json_aws(e, msg="Timeout waiting for CloudFront action."
+ " Waited for {0} seconds before timeout.".format(to_text(wait_timeout)))
+
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error getting distribution {0}".format(distribution_id))
+
+
+def main():
+ argument_spec = dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ caller_reference=dict(),
+ comment=dict(),
+ distribution_id=dict(),
+ e_tag=dict(),
+ tags=dict(type='dict', default={}),
+ purge_tags=dict(type='bool', default=False),
+ alias=dict(),
+ aliases=dict(type='list', default=[], elements='str'),
+ purge_aliases=dict(type='bool', default=False),
+ default_root_object=dict(),
+ origins=dict(type='list', elements='dict'),
+ purge_origins=dict(type='bool', default=False),
+ default_cache_behavior=dict(type='dict'),
+ cache_behaviors=dict(type='list', elements='dict'),
+ purge_cache_behaviors=dict(type='bool', default=False),
+ custom_error_responses=dict(type='list', elements='dict'),
+ purge_custom_error_responses=dict(type='bool', default=False),
+ logging=dict(type='dict'),
+ price_class=dict(),
+ enabled=dict(type='bool'),
+ viewer_certificate=dict(type='dict'),
+ restrictions=dict(type='dict'),
+ web_acl_id=dict(),
+ http_version=dict(),
+ ipv6_enabled=dict(type='bool'),
+ default_origin_domain_name=dict(),
+ default_origin_path=dict(),
+ wait=dict(default=False, type='bool'),
+ wait_timeout=dict(default=1800, type='int')
+ )
+
+ result = {}
+ changed = True
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=False,
+ mutually_exclusive=[
+ ['distribution_id', 'alias'],
+ ['default_origin_domain_name', 'distribution_id'],
+ ['default_origin_domain_name', 'alias'],
+ ]
+ )
+
+ client = module.client('cloudfront')
+
+ validation_mgr = CloudFrontValidationManager(module)
+
+ state = module.params.get('state')
+ caller_reference = module.params.get('caller_reference')
+ comment = module.params.get('comment')
+ e_tag = module.params.get('e_tag')
+ tags = module.params.get('tags')
+ purge_tags = module.params.get('purge_tags')
+ distribution_id = module.params.get('distribution_id')
+ alias = module.params.get('alias')
+ aliases = module.params.get('aliases')
+ purge_aliases = module.params.get('purge_aliases')
+ default_root_object = module.params.get('default_root_object')
+ origins = module.params.get('origins')
+ purge_origins = module.params.get('purge_origins')
+ default_cache_behavior = module.params.get('default_cache_behavior')
+ cache_behaviors = module.params.get('cache_behaviors')
+ purge_cache_behaviors = module.params.get('purge_cache_behaviors')
+ custom_error_responses = module.params.get('custom_error_responses')
+ purge_custom_error_responses = module.params.get('purge_custom_error_responses')
+ logging = module.params.get('logging')
+ price_class = module.params.get('price_class')
+ enabled = module.params.get('enabled')
+ viewer_certificate = module.params.get('viewer_certificate')
+ restrictions = module.params.get('restrictions')
+ purge_restrictions = module.params.get('purge_restrictions')
+ web_acl_id = module.params.get('web_acl_id')
+ http_version = module.params.get('http_version')
+ ipv6_enabled = module.params.get('ipv6_enabled')
+ default_origin_domain_name = module.params.get('default_origin_domain_name')
+ default_origin_path = module.params.get('default_origin_path')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ if alias and alias not in aliases:
+ aliases.append(alias)
+
+ distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference)
+
+ update = state == 'present' and distribution
+ create = state == 'present' and not distribution
+ delete = state == 'absent' and distribution
+
+ if not (update or create or delete):
+ module.exit_json(changed=False)
+
+ if update or delete:
+ config = distribution['Distribution']['DistributionConfig']
+ e_tag = distribution['ETag']
+ distribution_id = distribution['Distribution']['Id']
+ else:
+ config = dict()
+ if update:
+ config = camel_dict_to_snake_dict(config, reversible=True)
+
+ if create or update:
+ config = validation_mgr.validate_common_distribution_parameters(config, enabled, aliases, logging, price_class, purge_aliases)
+ config = validation_mgr.validate_distribution_config_parameters(config, default_root_object, ipv6_enabled, http_version, web_acl_id)
+ config['origins'] = validation_mgr.validate_origins(client, config.get('origins', {}).get('items', []), origins, default_origin_domain_name,
+ default_origin_path, create, purge_origins)
+ config['cache_behaviors'] = validation_mgr.validate_cache_behaviors(config.get('cache_behaviors', {}).get('items', []),
+ cache_behaviors, config['origins'], purge_cache_behaviors)
+ config['default_cache_behavior'] = validation_mgr.validate_cache_behavior(config.get('default_cache_behavior', {}),
+ default_cache_behavior, config['origins'], True)
+ config['custom_error_responses'] = validation_mgr.validate_custom_error_responses(config.get('custom_error_responses', {}).get('items', []),
+ custom_error_responses, purge_custom_error_responses)
+ valid_restrictions = validation_mgr.validate_restrictions(config.get('restrictions', {}), restrictions, purge_restrictions)
+ if valid_restrictions:
+ config['restrictions'] = valid_restrictions
+ valid_viewer_certificate = validation_mgr.validate_viewer_certificate(viewer_certificate)
+ config = merge_validation_into_config(config, valid_viewer_certificate, 'viewer_certificate')
+ config = validation_mgr.validate_comment(config, comment)
+ config = snake_dict_to_camel_dict(config, capitalize_first=True)
+
+ if create:
+ config['CallerReference'] = validation_mgr.validate_caller_reference(caller_reference)
+ result = create_distribution(client, module, config, ansible_dict_to_boto3_tag_list(tags))
+ result = camel_dict_to_snake_dict(result)
+ result['tags'] = list_tags_for_resource(client, module, result['arn'])
+
+ if delete:
+ if config['Enabled']:
+ config['Enabled'] = False
+ result = update_distribution(client, module, config, distribution_id, e_tag)
+ validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference'))
+ distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference)
+ # e_tag = distribution['ETag']
+ result = delete_distribution(client, module, distribution)
+
+ if update:
+ changed = config != distribution['Distribution']['DistributionConfig']
+ if changed:
+ result = update_distribution(client, module, config, distribution_id, e_tag)
+ else:
+ result = distribution['Distribution']
+ existing_tags = list_tags_for_resource(client, module, result['ARN'])
+ distribution['Distribution']['DistributionConfig']['tags'] = existing_tags
+ changed |= update_tags(client, module, existing_tags, tags, purge_tags, result['ARN'])
+ result = camel_dict_to_snake_dict(result)
+ result['distribution_config']['tags'] = config['tags'] = list_tags_for_resource(client, module, result['arn'])
+ result['diff'] = dict()
+ diff = recursive_diff(distribution['Distribution']['DistributionConfig'], config)
+ if diff:
+ result['diff']['before'] = diff[0]
+ result['diff']['after'] = diff[1]
+
+ if wait and (create or update):
+ validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference'))
+
+ if 'distribution_config' in result:
+ result.update(result['distribution_config'])
+ del(result['distribution_config'])
+
+ module.exit_json(changed=changed, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudfront_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudfront_facts.py
new file mode 100644
index 00000000..293cd2f0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudfront_facts.py
@@ -0,0 +1,715 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cloudfront_info
+version_added: 1.0.0
+short_description: Obtain facts about an AWS CloudFront distribution
+description:
+ - Gets information about an AWS CloudFront distribution.
+ - This module was called C(cloudfront_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.aws.cloudfront_info) module no longer returns C(ansible_facts)!
+requirements:
+ - boto3 >= 1.0.0
+ - python >= 2.6
+author: Willem van Ketwich (@wilvk)
+options:
+ distribution_id:
+ description:
+ - The id of the CloudFront distribution. Used with I(distribution), I(distribution_config),
+ I(invalidation), I(streaming_distribution), I(streaming_distribution_config), I(list_invalidations).
+ required: false
+ type: str
+ invalidation_id:
+ description:
+ - The id of the invalidation to get information about.
+ - Used with I(invalidation).
+ required: false
+ type: str
+ origin_access_identity_id:
+ description:
+ - The id of the CloudFront origin access identity to get information about.
+ required: false
+ type: str
+# web_acl_id:
+# description:
+# - Used with I(list_distributions_by_web_acl_id).
+# required: false
+# type: str
+ domain_name_alias:
+ description:
+ - Can be used instead of I(distribution_id) - uses the aliased CNAME for the CloudFront
+ distribution to get the distribution id where required.
+ required: false
+ type: str
+ all_lists:
+ description:
+ - Get all CloudFront lists that do not require parameters.
+ required: false
+ default: false
+ type: bool
+ origin_access_identity:
+ description:
+ - Get information about an origin access identity.
+ - Requires I(origin_access_identity_id) to be specified.
+ required: false
+ default: false
+ type: bool
+ origin_access_identity_config:
+ description:
+ - Get the configuration information about an origin access identity.
+ - Requires I(origin_access_identity_id) to be specified.
+ required: false
+ default: false
+ type: bool
+ distribution:
+ description:
+ - Get information about a distribution.
+ - Requires I(distribution_id) or I(domain_name_alias) to be specified.
+ required: false
+ default: false
+ type: bool
+ distribution_config:
+ description:
+ - Get the configuration information about a distribution.
+ - Requires I(distribution_id) or I(domain_name_alias) to be specified.
+ required: false
+ default: false
+ type: bool
+ invalidation:
+ description:
+ - Get information about an invalidation.
+ - Requires I(invalidation_id) to be specified.
+ required: false
+ default: false
+ type: bool
+ streaming_distribution:
+ description:
+ - Get information about a specified RTMP distribution.
+ - Requires I(distribution_id) or I(domain_name_alias) to be specified.
+ required: false
+ default: false
+ type: bool
+ streaming_distribution_config:
+ description:
+ - Get the configuration information about a specified RTMP distribution.
+ - Requires I(distribution_id) or I(domain_name_alias) to be specified.
+ required: false
+ default: false
+ type: bool
+ list_origin_access_identities:
+ description:
+ - Get a list of CloudFront origin access identities.
+ - Requires I(origin_access_identity_id) to be set.
+ required: false
+ default: false
+ type: bool
+ list_distributions:
+ description:
+ - Get a list of CloudFront distributions.
+ required: false
+ default: false
+ type: bool
+ list_distributions_by_web_acl_id:
+ description:
+ - Get a list of distributions using web acl id as a filter.
+ - Requires I(web_acl_id) to be set.
+ required: false
+ default: false
+ type: bool
+ list_invalidations:
+ description:
+ - Get a list of invalidations.
+ - Requires I(distribution_id) or I(domain_name_alias) to be specified.
+ required: false
+ default: false
+ type: bool
+ list_streaming_distributions:
+ description:
+ - Get a list of streaming distributions.
+ required: false
+ default: false
+ type: bool
+ summary:
+ description:
+ - Returns a summary of all distributions, streaming distributions and origin_access_identities.
+ - This is the default behaviour if no option is selected.
+ required: false
+ default: false
+ type: bool
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Get a summary of distributions
+ community.aws.cloudfront_info:
+ summary: true
+ register: result
+
+- name: Get information about a distribution
+ community.aws.cloudfront_info:
+ distribution: true
+ distribution_id: my-cloudfront-distribution-id
+ register: result_did
+- ansible.builtin.debug:
+ msg: "{{ result_did['cloudfront']['my-cloudfront-distribution-id'] }}"
+
+- name: Get information about a distribution using the CNAME of the cloudfront distribution.
+ community.aws.cloudfront_info:
+ distribution: true
+ domain_name_alias: www.my-website.com
+ register: result_website
+- ansible.builtin.debug:
+ msg: "{{ result_website['cloudfront']['www.my-website.com'] }}"
+
+# When the module is called as cloudfront_facts, return values are published
+# in ansible_facts['cloudfront'][<id>] and can be used as follows.
+# Note that this is deprecated and will stop working in Ansible 2.13.
+- name: Gather facts
+ community.aws.cloudfront_facts:
+ distribution: true
+ distribution_id: my-cloudfront-distribution-id
+- ansible.builtin.debug:
+ msg: "{{ ansible_facts['cloudfront']['my-cloudfront-distribution-id'] }}"
+
+- community.aws.cloudfront_facts:
+ distribution: true
+ domain_name_alias: www.my-website.com
+- ansible.builtin.debug:
+ msg: "{{ ansible_facts['cloudfront']['www.my-website.com'] }}"
+
+- name: Get all information about an invalidation for a distribution.
+ community.aws.cloudfront_info:
+ invalidation: true
+ distribution_id: my-cloudfront-distribution-id
+ invalidation_id: my-cloudfront-invalidation-id
+
+- name: Get all information about a CloudFront origin access identity.
+ community.aws.cloudfront_info:
+ origin_access_identity: true
+ origin_access_identity_id: my-cloudfront-origin-access-identity-id
+
+- name: Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions)
+ community.aws.cloudfront_info:
+ origin_access_identity: true
+ origin_access_identity_id: my-cloudfront-origin-access-identity-id
+
+- name: Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions)
+ community.aws.cloudfront_info:
+ all_lists: true
+'''
+
+RETURN = '''
+origin_access_identity:
+ description: Describes the origin access identity information. Requires I(origin_access_identity_id) to be set.
+ returned: only if I(origin_access_identity) is true
+ type: dict
+origin_access_identity_configuration:
+ description: Describes the origin access identity information configuration information. Requires I(origin_access_identity_id) to be set.
+ returned: only if I(origin_access_identity_configuration) is true
+ type: dict
+distribution:
+ description: >
+ Facts about a CloudFront distribution. Requires I(distribution_id) or I(domain_name_alias)
+ to be specified. Requires I(origin_access_identity_id) to be set.
+ returned: only if distribution is true
+ type: dict
+distribution_config:
+ description: >
+ Facts about a CloudFront distribution's config. Requires I(distribution_id) or I(domain_name_alias)
+ to be specified.
+ returned: only if I(distribution_config) is true
+ type: dict
+invalidation:
+ description: >
+ Describes the invalidation information for the distribution. Requires
+ I(invalidation_id) to be specified and either I(distribution_id) or I(domain_name_alias.)
+ returned: only if invalidation is true
+ type: dict
+streaming_distribution:
+ description: >
+ Describes the streaming information for the distribution. Requires
+ I(distribution_id) or I(domain_name_alias) to be specified.
+ returned: only if I(streaming_distribution) is true
+ type: dict
+streaming_distribution_config:
+ description: >
+ Describes the streaming configuration information for the distribution.
+ Requires I(distribution_id) or I(domain_name_alias) to be specified.
+ returned: only if I(streaming_distribution_config) is true
+ type: dict
+summary:
+ description: Gives a summary of distributions, streaming distributions and origin access identities.
+ returned: as default or if summary is true
+ type: dict
+result:
+ description: >
+ Result dict not nested under the CloudFront ID to access results of module without the knowledge of that id
+ as figuring out the DistributionId is usually the reason one uses this module in the first place.
+ returned: always
+ type: dict
+'''
+
+from functools import partial
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+class CloudFrontServiceManager:
+ """Handles CloudFront Services"""
+
+ def __init__(self, module):
+ self.module = module
+
+ try:
+ self.client = module.client('cloudfront')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ def get_distribution(self, distribution_id):
+ try:
+ func = partial(self.client.get_distribution, Id=distribution_id)
+ return self.paginated_response(func)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error describing distribution - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def get_distribution_config(self, distribution_id):
+ try:
+ func = partial(self.client.get_distribution_config, Id=distribution_id)
+ return self.paginated_response(func)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error describing distribution configuration - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def get_origin_access_identity(self, origin_access_identity_id):
+ try:
+ func = partial(self.client.get_cloud_front_origin_access_identity, Id=origin_access_identity_id)
+ return self.paginated_response(func)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error describing origin access identity - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def get_origin_access_identity_config(self, origin_access_identity_id):
+ try:
+ func = partial(self.client.get_cloud_front_origin_access_identity_config, Id=origin_access_identity_id)
+ return self.paginated_response(func)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error describing origin access identity configuration - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def get_invalidation(self, distribution_id, invalidation_id):
+ try:
+ func = partial(self.client.get_invalidation, DistributionId=distribution_id, Id=invalidation_id)
+ return self.paginated_response(func)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error describing invalidation - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def get_streaming_distribution(self, distribution_id):
+ try:
+ func = partial(self.client.get_streaming_distribution, Id=distribution_id)
+ return self.paginated_response(func)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error describing streaming distribution - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def get_streaming_distribution_config(self, distribution_id):
+ try:
+ func = partial(self.client.get_streaming_distribution_config, Id=distribution_id)
+ return self.paginated_response(func)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error describing streaming distribution - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def list_origin_access_identities(self):
+ try:
+ func = partial(self.client.list_cloud_front_origin_access_identities)
+ origin_access_identity_list = self.paginated_response(func, 'CloudFrontOriginAccessIdentityList')
+ if origin_access_identity_list['Quantity'] > 0:
+ return origin_access_identity_list['Items']
+ return {}
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error listing cloud front origin access identities - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def list_distributions(self, keyed=True):
+ try:
+ func = partial(self.client.list_distributions)
+ distribution_list = self.paginated_response(func, 'DistributionList')
+ if distribution_list['Quantity'] == 0:
+ return {}
+ else:
+ distribution_list = distribution_list['Items']
+ if not keyed:
+ return distribution_list
+ return self.keyed_list_helper(distribution_list)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error listing distributions - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def list_distributions_by_web_acl_id(self, web_acl_id):
+ try:
+ func = partial(self.client.list_distributions_by_web_acl_id, WebAclId=web_acl_id)
+ distribution_list = self.paginated_response(func, 'DistributionList')
+ if distribution_list['Quantity'] == 0:
+ return {}
+ else:
+ distribution_list = distribution_list['Items']
+ return self.keyed_list_helper(distribution_list)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error listing distributions by web acl id - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def list_invalidations(self, distribution_id):
+ try:
+ func = partial(self.client.list_invalidations, DistributionId=distribution_id)
+ invalidation_list = self.paginated_response(func, 'InvalidationList')
+ if invalidation_list['Quantity'] > 0:
+ return invalidation_list['Items']
+ return {}
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error listing invalidations - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def list_streaming_distributions(self, keyed=True):
+ try:
+ func = partial(self.client.list_streaming_distributions)
+ streaming_distribution_list = self.paginated_response(func, 'StreamingDistributionList')
+ if streaming_distribution_list['Quantity'] == 0:
+ return {}
+ else:
+ streaming_distribution_list = streaming_distribution_list['Items']
+ if not keyed:
+ return streaming_distribution_list
+ return self.keyed_list_helper(streaming_distribution_list)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error listing streaming distributions - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def summary(self):
+ summary_dict = {}
+ summary_dict.update(self.summary_get_distribution_list(False))
+ summary_dict.update(self.summary_get_distribution_list(True))
+ summary_dict.update(self.summary_get_origin_access_identity_list())
+ return summary_dict
+
+ def summary_get_origin_access_identity_list(self):
+ try:
+ origin_access_identity_list = {'origin_access_identities': []}
+ origin_access_identities = self.list_origin_access_identities()
+ for origin_access_identity in origin_access_identities:
+ oai_id = origin_access_identity['Id']
+ oai_full_response = self.get_origin_access_identity(oai_id)
+ oai_summary = {'Id': oai_id, 'ETag': oai_full_response['ETag']}
+ origin_access_identity_list['origin_access_identities'].append(oai_summary)
+ return origin_access_identity_list
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error generating summary of origin access identities - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def summary_get_distribution_list(self, streaming=False):
+ try:
+ list_name = 'streaming_distributions' if streaming else 'distributions'
+ key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled']
+ distribution_list = {list_name: []}
+ distributions = self.list_streaming_distributions(False) if streaming else self.list_distributions(False)
+ for dist in distributions:
+ temp_distribution = {}
+ for key_name in key_list:
+ temp_distribution[key_name] = dist[key_name]
+ temp_distribution['Aliases'] = [alias for alias in dist['Aliases'].get('Items', [])]
+ temp_distribution['ETag'] = self.get_etag_from_distribution_id(dist['Id'], streaming)
+ if not streaming:
+ temp_distribution['WebACLId'] = dist['WebACLId']
+ invalidation_ids = self.get_list_of_invalidation_ids_from_distribution_id(dist['Id'])
+ if invalidation_ids:
+ temp_distribution['Invalidations'] = invalidation_ids
+ resource_tags = self.client.list_tags_for_resource(Resource=dist['ARN'])
+ temp_distribution['Tags'] = boto3_tag_list_to_ansible_dict(resource_tags['Tags'].get('Items', []))
+ distribution_list[list_name].append(temp_distribution)
+ return distribution_list
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error generating summary of distributions - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ except Exception as e:
+ self.module.fail_json(msg="Error generating summary of distributions - " + str(e),
+ exception=traceback.format_exc())
+
+ def get_etag_from_distribution_id(self, distribution_id, streaming):
+ distribution = {}
+ if not streaming:
+ distribution = self.get_distribution(distribution_id)
+ else:
+ distribution = self.get_streaming_distribution(distribution_id)
+ return distribution['ETag']
+
+ def get_list_of_invalidation_ids_from_distribution_id(self, distribution_id):
+ try:
+ invalidation_ids = []
+ invalidations = self.list_invalidations(distribution_id)
+ for invalidation in invalidations:
+ invalidation_ids.append(invalidation['Id'])
+ return invalidation_ids
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error getting list of invalidation ids - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def get_distribution_id_from_domain_name(self, domain_name):
+ try:
+ distribution_id = ""
+ distributions = self.list_distributions(False)
+ distributions += self.list_streaming_distributions(False)
+ for dist in distributions:
+ if 'Items' in dist['Aliases']:
+ for alias in dist['Aliases']['Items']:
+ if str(alias).lower() == domain_name.lower():
+ distribution_id = dist['Id']
+ break
+ return distribution_id
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error getting distribution id from domain name - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def get_aliases_from_distribution_id(self, distribution_id):
+ aliases = []
+ try:
+ distributions = self.list_distributions(False)
+ for dist in distributions:
+ if dist['Id'] == distribution_id and 'Items' in dist['Aliases']:
+ for alias in dist['Aliases']['Items']:
+ aliases.append(alias)
+ break
+ return aliases
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error getting list of aliases from distribution_id - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def paginated_response(self, func, result_key=""):
+ '''
+ Returns expanded response for paginated operations.
+ The 'result_key' is used to define the concatenated results that are combined from each paginated response.
+ '''
+ args = dict()
+ results = dict()
+ loop = True
+ while loop:
+ response = func(**args)
+ if result_key == "":
+ result = response
+ result.pop('ResponseMetadata', None)
+ else:
+ result = response.get(result_key)
+ results.update(result)
+ args['Marker'] = response.get('NextMarker')
+ for key in response.keys():
+ if key.endswith('List'):
+ args['Marker'] = response[key].get('NextMarker')
+ break
+ loop = args['Marker'] is not None
+ return results
+
+ def keyed_list_helper(self, list_to_key):
+ keyed_list = dict()
+ for item in list_to_key:
+ distribution_id = item['Id']
+ if 'Items' in item['Aliases']:
+ aliases = item['Aliases']['Items']
+ for alias in aliases:
+ keyed_list.update({alias: item})
+ keyed_list.update({distribution_id: item})
+ return keyed_list
+
+
+def set_facts_for_distribution_id_and_alias(details, facts, distribution_id, aliases):
+ facts[distribution_id].update(details)
+ # also have a fixed key for accessing results/details returned
+ facts['result'] = details
+ facts['result']['DistributionId'] = distribution_id
+
+ for alias in aliases:
+ facts[alias].update(details)
+ return facts
+
+
+def main():
+ argument_spec = dict(
+ distribution_id=dict(required=False, type='str'),
+ invalidation_id=dict(required=False, type='str'),
+ origin_access_identity_id=dict(required=False, type='str'),
+ domain_name_alias=dict(required=False, type='str'),
+ all_lists=dict(required=False, default=False, type='bool'),
+ distribution=dict(required=False, default=False, type='bool'),
+ distribution_config=dict(required=False, default=False, type='bool'),
+ origin_access_identity=dict(required=False, default=False, type='bool'),
+ origin_access_identity_config=dict(required=False, default=False, type='bool'),
+ invalidation=dict(required=False, default=False, type='bool'),
+ streaming_distribution=dict(required=False, default=False, type='bool'),
+ streaming_distribution_config=dict(required=False, default=False, type='bool'),
+ list_origin_access_identities=dict(required=False, default=False, type='bool'),
+ list_distributions=dict(required=False, default=False, type='bool'),
+ list_distributions_by_web_acl_id=dict(required=False, default=False, type='bool'),
+ list_invalidations=dict(required=False, default=False, type='bool'),
+ list_streaming_distributions=dict(required=False, default=False, type='bool'),
+ summary=dict(required=False, default=False, type='bool'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False)
+ is_old_facts = module._name == 'cloudfront_facts'
+ if is_old_facts:
+ module.deprecate("The 'cloudfront_facts' module has been renamed to 'cloudfront_info', "
+ "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws')
+
+ service_mgr = CloudFrontServiceManager(module)
+
+ distribution_id = module.params.get('distribution_id')
+ invalidation_id = module.params.get('invalidation_id')
+ origin_access_identity_id = module.params.get('origin_access_identity_id')
+ web_acl_id = module.params.get('web_acl_id')
+ domain_name_alias = module.params.get('domain_name_alias')
+ all_lists = module.params.get('all_lists')
+ distribution = module.params.get('distribution')
+ distribution_config = module.params.get('distribution_config')
+ origin_access_identity = module.params.get('origin_access_identity')
+ origin_access_identity_config = module.params.get('origin_access_identity_config')
+ invalidation = module.params.get('invalidation')
+ streaming_distribution = module.params.get('streaming_distribution')
+ streaming_distribution_config = module.params.get('streaming_distribution_config')
+ list_origin_access_identities = module.params.get('list_origin_access_identities')
+ list_distributions = module.params.get('list_distributions')
+ list_distributions_by_web_acl_id = module.params.get('list_distributions_by_web_acl_id')
+ list_invalidations = module.params.get('list_invalidations')
+ list_streaming_distributions = module.params.get('list_streaming_distributions')
+ summary = module.params.get('summary')
+
+ aliases = []
+ result = {'cloudfront': {}}
+ facts = {}
+
+ require_distribution_id = (distribution or distribution_config or invalidation or streaming_distribution or
+ streaming_distribution_config or list_invalidations)
+
+ # set default to summary if no option specified
+ summary = summary or not (distribution or distribution_config or origin_access_identity or
+ origin_access_identity_config or invalidation or streaming_distribution or streaming_distribution_config or
+ list_origin_access_identities or list_distributions_by_web_acl_id or list_invalidations or
+ list_streaming_distributions or list_distributions)
+
+ # validations
+ if require_distribution_id and distribution_id is None and domain_name_alias is None:
+ module.fail_json(msg='Error distribution_id or domain_name_alias have not been specified.')
+ if (invalidation and invalidation_id is None):
+ module.fail_json(msg='Error invalidation_id has not been specified.')
+ if (origin_access_identity or origin_access_identity_config) and origin_access_identity_id is None:
+ module.fail_json(msg='Error origin_access_identity_id has not been specified.')
+ if list_distributions_by_web_acl_id and web_acl_id is None:
+ module.fail_json(msg='Error web_acl_id has not been specified.')
+
+ # get distribution id from domain name alias
+ if require_distribution_id and distribution_id is None:
+ distribution_id = service_mgr.get_distribution_id_from_domain_name(domain_name_alias)
+ if not distribution_id:
+ module.fail_json(msg='Error unable to source a distribution id from domain_name_alias')
+
+ # set appropriate cloudfront id
+ if distribution_id and not list_invalidations:
+ facts = {distribution_id: {}}
+ aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
+ for alias in aliases:
+ facts.update({alias: {}})
+ if invalidation_id:
+ facts.update({invalidation_id: {}})
+ elif distribution_id and list_invalidations:
+ facts = {distribution_id: {}}
+ aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
+ for alias in aliases:
+ facts.update({alias: {}})
+ elif origin_access_identity_id:
+ facts = {origin_access_identity_id: {}}
+ elif web_acl_id:
+ facts = {web_acl_id: {}}
+
+ # get details based on options
+ if distribution:
+ facts_to_set = service_mgr.get_distribution(distribution_id)
+ if distribution_config:
+ facts_to_set = service_mgr.get_distribution_config(distribution_id)
+ if origin_access_identity:
+ facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity(origin_access_identity_id))
+ if origin_access_identity_config:
+ facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity_config(origin_access_identity_id))
+ if invalidation:
+ facts_to_set = service_mgr.get_invalidation(distribution_id, invalidation_id)
+ facts[invalidation_id].update(facts_to_set)
+ if streaming_distribution:
+ facts_to_set = service_mgr.get_streaming_distribution(distribution_id)
+ if streaming_distribution_config:
+ facts_to_set = service_mgr.get_streaming_distribution_config(distribution_id)
+ if list_invalidations:
+ facts_to_set = {'invalidations': service_mgr.list_invalidations(distribution_id)}
+ if 'facts_to_set' in vars():
+ facts = set_facts_for_distribution_id_and_alias(facts_to_set, facts, distribution_id, aliases)
+
+ # get list based on options
+ if all_lists or list_origin_access_identities:
+ facts['origin_access_identities'] = service_mgr.list_origin_access_identities()
+ if all_lists or list_distributions:
+ facts['distributions'] = service_mgr.list_distributions()
+ if all_lists or list_streaming_distributions:
+ facts['streaming_distributions'] = service_mgr.list_streaming_distributions()
+ if list_distributions_by_web_acl_id:
+ facts['distributions_by_web_acl_id'] = service_mgr.list_distributions_by_web_acl_id(web_acl_id)
+ if list_invalidations:
+ facts['invalidations'] = service_mgr.list_invalidations(distribution_id)
+
+ # default summary option
+ if summary:
+ facts['summary'] = service_mgr.summary()
+
+ result['changed'] = False
+ result['cloudfront'].update(facts)
+ if is_old_facts:
+ module.exit_json(msg="Retrieved CloudFront facts.", ansible_facts=result)
+ else:
+ module.exit_json(msg="Retrieved CloudFront info.", **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudfront_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudfront_info.py
new file mode 100644
index 00000000..293cd2f0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudfront_info.py
@@ -0,0 +1,715 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cloudfront_info
+version_added: 1.0.0
+short_description: Obtain facts about an AWS CloudFront distribution
+description:
+ - Gets information about an AWS CloudFront distribution.
+ - This module was called C(cloudfront_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.aws.cloudfront_info) module no longer returns C(ansible_facts)!
+requirements:
+ - boto3 >= 1.0.0
+ - python >= 2.6
+author: Willem van Ketwich (@wilvk)
+options:
+ distribution_id:
+ description:
+ - The id of the CloudFront distribution. Used with I(distribution), I(distribution_config),
+ I(invalidation), I(streaming_distribution), I(streaming_distribution_config), I(list_invalidations).
+ required: false
+ type: str
+ invalidation_id:
+ description:
+ - The id of the invalidation to get information about.
+ - Used with I(invalidation).
+ required: false
+ type: str
+ origin_access_identity_id:
+ description:
+ - The id of the CloudFront origin access identity to get information about.
+ required: false
+ type: str
+# web_acl_id:
+# description:
+# - Used with I(list_distributions_by_web_acl_id).
+# required: false
+# type: str
+ domain_name_alias:
+ description:
+ - Can be used instead of I(distribution_id) - uses the aliased CNAME for the CloudFront
+ distribution to get the distribution id where required.
+ required: false
+ type: str
+ all_lists:
+ description:
+ - Get all CloudFront lists that do not require parameters.
+ required: false
+ default: false
+ type: bool
+ origin_access_identity:
+ description:
+ - Get information about an origin access identity.
+ - Requires I(origin_access_identity_id) to be specified.
+ required: false
+ default: false
+ type: bool
+ origin_access_identity_config:
+ description:
+ - Get the configuration information about an origin access identity.
+ - Requires I(origin_access_identity_id) to be specified.
+ required: false
+ default: false
+ type: bool
+ distribution:
+ description:
+ - Get information about a distribution.
+ - Requires I(distribution_id) or I(domain_name_alias) to be specified.
+ required: false
+ default: false
+ type: bool
+ distribution_config:
+ description:
+ - Get the configuration information about a distribution.
+ - Requires I(distribution_id) or I(domain_name_alias) to be specified.
+ required: false
+ default: false
+ type: bool
+ invalidation:
+ description:
+ - Get information about an invalidation.
+ - Requires I(invalidation_id) to be specified.
+ required: false
+ default: false
+ type: bool
+ streaming_distribution:
+ description:
+ - Get information about a specified RTMP distribution.
+ - Requires I(distribution_id) or I(domain_name_alias) to be specified.
+ required: false
+ default: false
+ type: bool
+ streaming_distribution_config:
+ description:
+ - Get the configuration information about a specified RTMP distribution.
+ - Requires I(distribution_id) or I(domain_name_alias) to be specified.
+ required: false
+ default: false
+ type: bool
+ list_origin_access_identities:
+ description:
+ - Get a list of CloudFront origin access identities.
+ - Requires I(origin_access_identity_id) to be set.
+ required: false
+ default: false
+ type: bool
+ list_distributions:
+ description:
+ - Get a list of CloudFront distributions.
+ required: false
+ default: false
+ type: bool
+ list_distributions_by_web_acl_id:
+ description:
+ - Get a list of distributions using web acl id as a filter.
+ - Requires I(web_acl_id) to be set.
+ required: false
+ default: false
+ type: bool
+ list_invalidations:
+ description:
+ - Get a list of invalidations.
+ - Requires I(distribution_id) or I(domain_name_alias) to be specified.
+ required: false
+ default: false
+ type: bool
+ list_streaming_distributions:
+ description:
+ - Get a list of streaming distributions.
+ required: false
+ default: false
+ type: bool
+ summary:
+ description:
+ - Returns a summary of all distributions, streaming distributions and origin_access_identities.
+ - This is the default behaviour if no option is selected.
+ required: false
+ default: false
+ type: bool
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Get a summary of distributions
+ community.aws.cloudfront_info:
+ summary: true
+ register: result
+
+- name: Get information about a distribution
+ community.aws.cloudfront_info:
+ distribution: true
+ distribution_id: my-cloudfront-distribution-id
+ register: result_did
+- ansible.builtin.debug:
+ msg: "{{ result_did['cloudfront']['my-cloudfront-distribution-id'] }}"
+
+- name: Get information about a distribution using the CNAME of the cloudfront distribution.
+ community.aws.cloudfront_info:
+ distribution: true
+ domain_name_alias: www.my-website.com
+ register: result_website
+- ansible.builtin.debug:
+ msg: "{{ result_website['cloudfront']['www.my-website.com'] }}"
+
+# When the module is called as cloudfront_facts, return values are published
+# in ansible_facts['cloudfront'][<id>] and can be used as follows.
+# Note that this is deprecated and will stop working in Ansible 2.13.
+- name: Gather facts
+ community.aws.cloudfront_facts:
+ distribution: true
+ distribution_id: my-cloudfront-distribution-id
+- ansible.builtin.debug:
+ msg: "{{ ansible_facts['cloudfront']['my-cloudfront-distribution-id'] }}"
+
+- community.aws.cloudfront_facts:
+ distribution: true
+ domain_name_alias: www.my-website.com
+- ansible.builtin.debug:
+ msg: "{{ ansible_facts['cloudfront']['www.my-website.com'] }}"
+
+- name: Get all information about an invalidation for a distribution.
+ community.aws.cloudfront_info:
+ invalidation: true
+ distribution_id: my-cloudfront-distribution-id
+ invalidation_id: my-cloudfront-invalidation-id
+
+- name: Get all information about a CloudFront origin access identity.
+ community.aws.cloudfront_info:
+ origin_access_identity: true
+ origin_access_identity_id: my-cloudfront-origin-access-identity-id
+
+- name: Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions)
+ community.aws.cloudfront_info:
+ origin_access_identity: true
+ origin_access_identity_id: my-cloudfront-origin-access-identity-id
+
+- name: Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions)
+ community.aws.cloudfront_info:
+ all_lists: true
+'''
+
+RETURN = '''
+origin_access_identity:
+ description: Describes the origin access identity information. Requires I(origin_access_identity_id) to be set.
+ returned: only if I(origin_access_identity) is true
+ type: dict
+origin_access_identity_configuration:
+ description: Describes the origin access identity information configuration information. Requires I(origin_access_identity_id) to be set.
+ returned: only if I(origin_access_identity_configuration) is true
+ type: dict
+distribution:
+ description: >
+ Facts about a CloudFront distribution. Requires I(distribution_id) or I(domain_name_alias)
+ to be specified. Requires I(origin_access_identity_id) to be set.
+ returned: only if distribution is true
+ type: dict
+distribution_config:
+ description: >
+ Facts about a CloudFront distribution's config. Requires I(distribution_id) or I(domain_name_alias)
+ to be specified.
+ returned: only if I(distribution_config) is true
+ type: dict
+invalidation:
+ description: >
+ Describes the invalidation information for the distribution. Requires
+ I(invalidation_id) to be specified and either I(distribution_id) or I(domain_name_alias.)
+ returned: only if invalidation is true
+ type: dict
+streaming_distribution:
+ description: >
+ Describes the streaming information for the distribution. Requires
+ I(distribution_id) or I(domain_name_alias) to be specified.
+ returned: only if I(streaming_distribution) is true
+ type: dict
+streaming_distribution_config:
+ description: >
+ Describes the streaming configuration information for the distribution.
+ Requires I(distribution_id) or I(domain_name_alias) to be specified.
+ returned: only if I(streaming_distribution_config) is true
+ type: dict
+summary:
+ description: Gives a summary of distributions, streaming distributions and origin access identities.
+ returned: as default or if summary is true
+ type: dict
+result:
+ description: >
+ Result dict not nested under the CloudFront ID to access results of module without the knowledge of that id
+ as figuring out the DistributionId is usually the reason one uses this module in the first place.
+ returned: always
+ type: dict
+'''
+
+from functools import partial
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+class CloudFrontServiceManager:
+ """Handles CloudFront Services"""
+
+ def __init__(self, module):
+ self.module = module
+
+ try:
+ self.client = module.client('cloudfront')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ def get_distribution(self, distribution_id):
+ try:
+ func = partial(self.client.get_distribution, Id=distribution_id)
+ return self.paginated_response(func)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error describing distribution - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def get_distribution_config(self, distribution_id):
+ try:
+ func = partial(self.client.get_distribution_config, Id=distribution_id)
+ return self.paginated_response(func)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error describing distribution configuration - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def get_origin_access_identity(self, origin_access_identity_id):
+ try:
+ func = partial(self.client.get_cloud_front_origin_access_identity, Id=origin_access_identity_id)
+ return self.paginated_response(func)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error describing origin access identity - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def get_origin_access_identity_config(self, origin_access_identity_id):
+ try:
+ func = partial(self.client.get_cloud_front_origin_access_identity_config, Id=origin_access_identity_id)
+ return self.paginated_response(func)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error describing origin access identity configuration - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def get_invalidation(self, distribution_id, invalidation_id):
+ try:
+ func = partial(self.client.get_invalidation, DistributionId=distribution_id, Id=invalidation_id)
+ return self.paginated_response(func)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error describing invalidation - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def get_streaming_distribution(self, distribution_id):
+ try:
+ func = partial(self.client.get_streaming_distribution, Id=distribution_id)
+ return self.paginated_response(func)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error describing streaming distribution - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def get_streaming_distribution_config(self, distribution_id):
+ try:
+ func = partial(self.client.get_streaming_distribution_config, Id=distribution_id)
+ return self.paginated_response(func)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error describing streaming distribution - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def list_origin_access_identities(self):
+ try:
+ func = partial(self.client.list_cloud_front_origin_access_identities)
+ origin_access_identity_list = self.paginated_response(func, 'CloudFrontOriginAccessIdentityList')
+ if origin_access_identity_list['Quantity'] > 0:
+ return origin_access_identity_list['Items']
+ return {}
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error listing cloud front origin access identities - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def list_distributions(self, keyed=True):
+ try:
+ func = partial(self.client.list_distributions)
+ distribution_list = self.paginated_response(func, 'DistributionList')
+ if distribution_list['Quantity'] == 0:
+ return {}
+ else:
+ distribution_list = distribution_list['Items']
+ if not keyed:
+ return distribution_list
+ return self.keyed_list_helper(distribution_list)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error listing distributions - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def list_distributions_by_web_acl_id(self, web_acl_id):
+ try:
+ func = partial(self.client.list_distributions_by_web_acl_id, WebAclId=web_acl_id)
+ distribution_list = self.paginated_response(func, 'DistributionList')
+ if distribution_list['Quantity'] == 0:
+ return {}
+ else:
+ distribution_list = distribution_list['Items']
+ return self.keyed_list_helper(distribution_list)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error listing distributions by web acl id - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def list_invalidations(self, distribution_id):
+ try:
+ func = partial(self.client.list_invalidations, DistributionId=distribution_id)
+ invalidation_list = self.paginated_response(func, 'InvalidationList')
+ if invalidation_list['Quantity'] > 0:
+ return invalidation_list['Items']
+ return {}
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error listing invalidations - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def list_streaming_distributions(self, keyed=True):
+ try:
+ func = partial(self.client.list_streaming_distributions)
+ streaming_distribution_list = self.paginated_response(func, 'StreamingDistributionList')
+ if streaming_distribution_list['Quantity'] == 0:
+ return {}
+ else:
+ streaming_distribution_list = streaming_distribution_list['Items']
+ if not keyed:
+ return streaming_distribution_list
+ return self.keyed_list_helper(streaming_distribution_list)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error listing streaming distributions - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def summary(self):
+ summary_dict = {}
+ summary_dict.update(self.summary_get_distribution_list(False))
+ summary_dict.update(self.summary_get_distribution_list(True))
+ summary_dict.update(self.summary_get_origin_access_identity_list())
+ return summary_dict
+
+ def summary_get_origin_access_identity_list(self):
+ try:
+ origin_access_identity_list = {'origin_access_identities': []}
+ origin_access_identities = self.list_origin_access_identities()
+ for origin_access_identity in origin_access_identities:
+ oai_id = origin_access_identity['Id']
+ oai_full_response = self.get_origin_access_identity(oai_id)
+ oai_summary = {'Id': oai_id, 'ETag': oai_full_response['ETag']}
+ origin_access_identity_list['origin_access_identities'].append(oai_summary)
+ return origin_access_identity_list
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error generating summary of origin access identities - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def summary_get_distribution_list(self, streaming=False):
+ try:
+ list_name = 'streaming_distributions' if streaming else 'distributions'
+ key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled']
+ distribution_list = {list_name: []}
+ distributions = self.list_streaming_distributions(False) if streaming else self.list_distributions(False)
+ for dist in distributions:
+ temp_distribution = {}
+ for key_name in key_list:
+ temp_distribution[key_name] = dist[key_name]
+ temp_distribution['Aliases'] = [alias for alias in dist['Aliases'].get('Items', [])]
+ temp_distribution['ETag'] = self.get_etag_from_distribution_id(dist['Id'], streaming)
+ if not streaming:
+ temp_distribution['WebACLId'] = dist['WebACLId']
+ invalidation_ids = self.get_list_of_invalidation_ids_from_distribution_id(dist['Id'])
+ if invalidation_ids:
+ temp_distribution['Invalidations'] = invalidation_ids
+ resource_tags = self.client.list_tags_for_resource(Resource=dist['ARN'])
+ temp_distribution['Tags'] = boto3_tag_list_to_ansible_dict(resource_tags['Tags'].get('Items', []))
+ distribution_list[list_name].append(temp_distribution)
+ return distribution_list
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error generating summary of distributions - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ except Exception as e:
+ self.module.fail_json(msg="Error generating summary of distributions - " + str(e),
+ exception=traceback.format_exc())
+
+ def get_etag_from_distribution_id(self, distribution_id, streaming):
+ distribution = {}
+ if not streaming:
+ distribution = self.get_distribution(distribution_id)
+ else:
+ distribution = self.get_streaming_distribution(distribution_id)
+ return distribution['ETag']
+
+ def get_list_of_invalidation_ids_from_distribution_id(self, distribution_id):
+ try:
+ invalidation_ids = []
+ invalidations = self.list_invalidations(distribution_id)
+ for invalidation in invalidations:
+ invalidation_ids.append(invalidation['Id'])
+ return invalidation_ids
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error getting list of invalidation ids - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def get_distribution_id_from_domain_name(self, domain_name):
+ try:
+ distribution_id = ""
+ distributions = self.list_distributions(False)
+ distributions += self.list_streaming_distributions(False)
+ for dist in distributions:
+ if 'Items' in dist['Aliases']:
+ for alias in dist['Aliases']['Items']:
+ if str(alias).lower() == domain_name.lower():
+ distribution_id = dist['Id']
+ break
+ return distribution_id
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error getting distribution id from domain name - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def get_aliases_from_distribution_id(self, distribution_id):
+ aliases = []
+ try:
+ distributions = self.list_distributions(False)
+ for dist in distributions:
+ if dist['Id'] == distribution_id and 'Items' in dist['Aliases']:
+ for alias in dist['Aliases']['Items']:
+ aliases.append(alias)
+ break
+ return aliases
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg="Error getting list of aliases from distribution_id - " + str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ def paginated_response(self, func, result_key=""):
+ '''
+ Returns expanded response for paginated operations.
+ The 'result_key' is used to define the concatenated results that are combined from each paginated response.
+ '''
+ args = dict()
+ results = dict()
+ loop = True
+ while loop:
+ response = func(**args)
+ if result_key == "":
+ result = response
+ result.pop('ResponseMetadata', None)
+ else:
+ result = response.get(result_key)
+ results.update(result)
+ args['Marker'] = response.get('NextMarker')
+ for key in response.keys():
+ if key.endswith('List'):
+ args['Marker'] = response[key].get('NextMarker')
+ break
+ loop = args['Marker'] is not None
+ return results
+
+ def keyed_list_helper(self, list_to_key):
+ keyed_list = dict()
+ for item in list_to_key:
+ distribution_id = item['Id']
+ if 'Items' in item['Aliases']:
+ aliases = item['Aliases']['Items']
+ for alias in aliases:
+ keyed_list.update({alias: item})
+ keyed_list.update({distribution_id: item})
+ return keyed_list
+
+
+def set_facts_for_distribution_id_and_alias(details, facts, distribution_id, aliases):
+ facts[distribution_id].update(details)
+ # also have a fixed key for accessing results/details returned
+ facts['result'] = details
+ facts['result']['DistributionId'] = distribution_id
+
+ for alias in aliases:
+ facts[alias].update(details)
+ return facts
+
+
+def main():
+ argument_spec = dict(
+ distribution_id=dict(required=False, type='str'),
+ invalidation_id=dict(required=False, type='str'),
+ origin_access_identity_id=dict(required=False, type='str'),
+ domain_name_alias=dict(required=False, type='str'),
+ all_lists=dict(required=False, default=False, type='bool'),
+ distribution=dict(required=False, default=False, type='bool'),
+ distribution_config=dict(required=False, default=False, type='bool'),
+ origin_access_identity=dict(required=False, default=False, type='bool'),
+ origin_access_identity_config=dict(required=False, default=False, type='bool'),
+ invalidation=dict(required=False, default=False, type='bool'),
+ streaming_distribution=dict(required=False, default=False, type='bool'),
+ streaming_distribution_config=dict(required=False, default=False, type='bool'),
+ list_origin_access_identities=dict(required=False, default=False, type='bool'),
+ list_distributions=dict(required=False, default=False, type='bool'),
+ list_distributions_by_web_acl_id=dict(required=False, default=False, type='bool'),
+ list_invalidations=dict(required=False, default=False, type='bool'),
+ list_streaming_distributions=dict(required=False, default=False, type='bool'),
+ summary=dict(required=False, default=False, type='bool'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False)
+ is_old_facts = module._name == 'cloudfront_facts'
+ if is_old_facts:
+ module.deprecate("The 'cloudfront_facts' module has been renamed to 'cloudfront_info', "
+ "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws')
+
+ service_mgr = CloudFrontServiceManager(module)
+
+ distribution_id = module.params.get('distribution_id')
+ invalidation_id = module.params.get('invalidation_id')
+ origin_access_identity_id = module.params.get('origin_access_identity_id')
+ web_acl_id = module.params.get('web_acl_id')
+ domain_name_alias = module.params.get('domain_name_alias')
+ all_lists = module.params.get('all_lists')
+ distribution = module.params.get('distribution')
+ distribution_config = module.params.get('distribution_config')
+ origin_access_identity = module.params.get('origin_access_identity')
+ origin_access_identity_config = module.params.get('origin_access_identity_config')
+ invalidation = module.params.get('invalidation')
+ streaming_distribution = module.params.get('streaming_distribution')
+ streaming_distribution_config = module.params.get('streaming_distribution_config')
+ list_origin_access_identities = module.params.get('list_origin_access_identities')
+ list_distributions = module.params.get('list_distributions')
+ list_distributions_by_web_acl_id = module.params.get('list_distributions_by_web_acl_id')
+ list_invalidations = module.params.get('list_invalidations')
+ list_streaming_distributions = module.params.get('list_streaming_distributions')
+ summary = module.params.get('summary')
+
+ aliases = []
+ result = {'cloudfront': {}}
+ facts = {}
+
+ require_distribution_id = (distribution or distribution_config or invalidation or streaming_distribution or
+ streaming_distribution_config or list_invalidations)
+
+ # set default to summary if no option specified
+ summary = summary or not (distribution or distribution_config or origin_access_identity or
+ origin_access_identity_config or invalidation or streaming_distribution or streaming_distribution_config or
+ list_origin_access_identities or list_distributions_by_web_acl_id or list_invalidations or
+ list_streaming_distributions or list_distributions)
+
+ # validations
+ if require_distribution_id and distribution_id is None and domain_name_alias is None:
+ module.fail_json(msg='Error distribution_id or domain_name_alias have not been specified.')
+ if (invalidation and invalidation_id is None):
+ module.fail_json(msg='Error invalidation_id has not been specified.')
+ if (origin_access_identity or origin_access_identity_config) and origin_access_identity_id is None:
+ module.fail_json(msg='Error origin_access_identity_id has not been specified.')
+ if list_distributions_by_web_acl_id and web_acl_id is None:
+ module.fail_json(msg='Error web_acl_id has not been specified.')
+
+ # get distribution id from domain name alias
+ if require_distribution_id and distribution_id is None:
+ distribution_id = service_mgr.get_distribution_id_from_domain_name(domain_name_alias)
+ if not distribution_id:
+ module.fail_json(msg='Error unable to source a distribution id from domain_name_alias')
+
+ # set appropriate cloudfront id
+ if distribution_id and not list_invalidations:
+ facts = {distribution_id: {}}
+ aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
+ for alias in aliases:
+ facts.update({alias: {}})
+ if invalidation_id:
+ facts.update({invalidation_id: {}})
+ elif distribution_id and list_invalidations:
+ facts = {distribution_id: {}}
+ aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
+ for alias in aliases:
+ facts.update({alias: {}})
+ elif origin_access_identity_id:
+ facts = {origin_access_identity_id: {}}
+ elif web_acl_id:
+ facts = {web_acl_id: {}}
+
+ # get details based on options
+ if distribution:
+ facts_to_set = service_mgr.get_distribution(distribution_id)
+ if distribution_config:
+ facts_to_set = service_mgr.get_distribution_config(distribution_id)
+ if origin_access_identity:
+ facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity(origin_access_identity_id))
+ if origin_access_identity_config:
+ facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity_config(origin_access_identity_id))
+ if invalidation:
+ facts_to_set = service_mgr.get_invalidation(distribution_id, invalidation_id)
+ facts[invalidation_id].update(facts_to_set)
+ if streaming_distribution:
+ facts_to_set = service_mgr.get_streaming_distribution(distribution_id)
+ if streaming_distribution_config:
+ facts_to_set = service_mgr.get_streaming_distribution_config(distribution_id)
+ if list_invalidations:
+ facts_to_set = {'invalidations': service_mgr.list_invalidations(distribution_id)}
+ if 'facts_to_set' in vars():
+ facts = set_facts_for_distribution_id_and_alias(facts_to_set, facts, distribution_id, aliases)
+
+ # get list based on options
+ if all_lists or list_origin_access_identities:
+ facts['origin_access_identities'] = service_mgr.list_origin_access_identities()
+ if all_lists or list_distributions:
+ facts['distributions'] = service_mgr.list_distributions()
+ if all_lists or list_streaming_distributions:
+ facts['streaming_distributions'] = service_mgr.list_streaming_distributions()
+ if list_distributions_by_web_acl_id:
+ facts['distributions_by_web_acl_id'] = service_mgr.list_distributions_by_web_acl_id(web_acl_id)
+ if list_invalidations:
+ facts['invalidations'] = service_mgr.list_invalidations(distribution_id)
+
+ # default summary option
+ if summary:
+ facts['summary'] = service_mgr.summary()
+
+ result['changed'] = False
+ result['cloudfront'].update(facts)
+ if is_old_facts:
+ module.exit_json(msg="Retrieved CloudFront facts.", ansible_facts=result)
+ else:
+ module.exit_json(msg="Retrieved CloudFront info.", **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudfront_invalidation.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudfront_invalidation.py
new file mode 100644
index 00000000..52e3aea1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudfront_invalidation.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+
+version_added: 1.0.0
+module: cloudfront_invalidation
+
+short_description: create invalidations for AWS CloudFront distributions
+description:
+ - Allows for invalidation of a batch of paths for a CloudFront distribution.
+
+requirements:
+ - boto3 >= 1.0.0
+ - python >= 2.6
+
+
+author: Willem van Ketwich (@wilvk)
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+
+options:
+ distribution_id:
+ description:
+ - The ID of the CloudFront distribution to invalidate paths for. Can be specified instead of the alias.
+ required: false
+ type: str
+ alias:
+ description:
+ - The alias of the CloudFront distribution to invalidate paths for. Can be specified instead of distribution_id.
+ required: false
+ type: str
+ caller_reference:
+ description:
+ - A unique reference identifier for the invalidation paths.
+ - Defaults to current datetime stamp.
+ required: false
+ default:
+ type: str
+ target_paths:
+ description:
+ - A list of paths on the distribution to invalidate. Each path should begin with '/'. Wildcards are allowed. eg. '/foo/bar/*'
+ required: true
+ type: list
+ elements: str
+
+notes:
+ - does not support check mode
+
+'''
+
+EXAMPLES = r'''
+
+- name: create a batch of invalidations using a distribution_id for a reference
+ community.aws.cloudfront_invalidation:
+ distribution_id: E15BU8SDCGSG57
+ caller_reference: testing 123
+ target_paths:
+ - /testpathone/test1.css
+ - /testpathtwo/test2.js
+ - /testpaththree/test3.ss
+
+- name: create a batch of invalidations using an alias as a reference and one path using a wildcard match
+ community.aws.cloudfront_invalidation:
+ alias: alias.test.com
+ caller_reference: testing 123
+ target_paths:
+ - /testpathone/test4.css
+ - /testpathtwo/test5.js
+ - /testpaththree/*
+
+'''
+
+RETURN = r'''
+invalidation:
+ description: The invalidation's information.
+ returned: always
+ type: complex
+ contains:
+ create_time:
+ description: The date and time the invalidation request was first made.
+ returned: always
+ type: str
+ sample: '2018-02-01T15:50:41.159000+00:00'
+ id:
+ description: The identifier for the invalidation request.
+ returned: always
+ type: str
+ sample: I2G9MOWJZFV612
+ invalidation_batch:
+ description: The current invalidation information for the batch request.
+ returned: always
+ type: complex
+ contains:
+ caller_reference:
+ description: The value used to uniquely identify an invalidation request.
+ returned: always
+ type: str
+ sample: testing 123
+ paths:
+ description: A dict that contains information about the objects that you want to invalidate.
+ returned: always
+ type: complex
+ contains:
+ items:
+ description: A list of the paths that you want to invalidate.
+ returned: always
+ type: list
+ sample:
+ - /testpathtwo/test2.js
+ - /testpathone/test1.css
+ - /testpaththree/test3.ss
+ quantity:
+ description: The number of objects that you want to invalidate.
+ returned: always
+ type: int
+ sample: 3
+ status:
+ description: The status of the invalidation request.
+ returned: always
+ type: str
+ sample: Completed
+location:
+ description: The fully qualified URI of the distribution and invalidation batch request.
+ returned: always
+ type: str
+ sample: https://cloudfront.amazonaws.com/2017-03-25/distribution/E1ZID6KZJECZY7/invalidation/I2G9MOWJZFV622
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager
+import datetime
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by imported AnsibleAWSModule
+
+
+class CloudFrontInvalidationServiceManager(object):
+ """
+ Handles CloudFront service calls to AWS for invalidations
+ """
+
+ def __init__(self, module):
+ self.module = module
+ self.client = module.client('cloudfront')
+
+ def create_invalidation(self, distribution_id, invalidation_batch):
+ current_invalidation_response = self.get_invalidation(distribution_id, invalidation_batch['CallerReference'])
+ try:
+ response = self.client.create_invalidation(DistributionId=distribution_id, InvalidationBatch=invalidation_batch)
+ response.pop('ResponseMetadata', None)
+ if current_invalidation_response:
+ return response, False
+ else:
+ return response, True
+ except BotoCoreError as e:
+ self.module.fail_json_aws(e, msg="Error creating CloudFront invalidations.")
+ except ClientError as e:
+ if ('Your request contains a caller reference that was used for a previous invalidation batch '
+ 'for the same distribution.' in e.response['Error']['Message']):
+ self.module.warn("InvalidationBatch target paths are not modifiable. "
+ "To make a new invalidation please update caller_reference.")
+ return current_invalidation_response, False
+ else:
+ self.module.fail_json_aws(e, msg="Error creating CloudFront invalidations.")
+
+ def get_invalidation(self, distribution_id, caller_reference):
+ current_invalidation = {}
+ # find all invalidations for the distribution
+ try:
+ paginator = self.client.get_paginator('list_invalidations')
+ invalidations = paginator.paginate(DistributionId=distribution_id).build_full_result().get('InvalidationList', {}).get('Items', [])
+ invalidation_ids = [inv['Id'] for inv in invalidations]
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error listing CloudFront invalidations.")
+
+ # check if there is an invalidation with the same caller reference
+ for inv_id in invalidation_ids:
+ try:
+ invalidation = self.client.get_invalidation(DistributionId=distribution_id, Id=inv_id)['Invalidation']
+ caller_ref = invalidation.get('InvalidationBatch', {}).get('CallerReference')
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error getting CloudFront invalidation {0}".format(inv_id))
+ if caller_ref == caller_reference:
+ current_invalidation = invalidation
+ break
+
+ current_invalidation.pop('ResponseMetadata', None)
+ return current_invalidation
+
+
+class CloudFrontInvalidationValidationManager(object):
+ """
+ Manages CloudFront validations for invalidation batches
+ """
+
+ def __init__(self, module):
+ self.module = module
+ self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module)
+
+ def validate_distribution_id(self, distribution_id, alias):
+ try:
+ if distribution_id is None and alias is None:
+ self.module.fail_json(msg="distribution_id or alias must be specified")
+ if distribution_id is None:
+ distribution_id = self.__cloudfront_facts_mgr.get_distribution_id_from_domain_name(alias)
+ return distribution_id
+ except (ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error validating parameters.")
+
+ def create_aws_list(self, invalidation_batch):
+ aws_list = {}
+ aws_list["Quantity"] = len(invalidation_batch)
+ aws_list["Items"] = invalidation_batch
+ return aws_list
+
+ def validate_invalidation_batch(self, invalidation_batch, caller_reference):
+ try:
+ if caller_reference is not None:
+ valid_caller_reference = caller_reference
+ else:
+ valid_caller_reference = datetime.datetime.now().isoformat()
+ valid_invalidation_batch = {
+ 'paths': self.create_aws_list(invalidation_batch),
+ 'caller_reference': valid_caller_reference
+ }
+ return valid_invalidation_batch
+ except (ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error validating invalidation batch.")
+
+
+def main():
+ argument_spec = dict(
+ caller_reference=dict(),
+ distribution_id=dict(),
+ alias=dict(),
+ target_paths=dict(required=True, type='list', elements='str')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[['distribution_id', 'alias']])
+
+ validation_mgr = CloudFrontInvalidationValidationManager(module)
+ service_mgr = CloudFrontInvalidationServiceManager(module)
+
+ caller_reference = module.params.get('caller_reference')
+ distribution_id = module.params.get('distribution_id')
+ alias = module.params.get('alias')
+ target_paths = module.params.get('target_paths')
+
+ result = {}
+
+ distribution_id = validation_mgr.validate_distribution_id(distribution_id, alias)
+ valid_target_paths = validation_mgr.validate_invalidation_batch(target_paths, caller_reference)
+ valid_pascal_target_paths = snake_dict_to_camel_dict(valid_target_paths, True)
+ result, changed = service_mgr.create_invalidation(distribution_id, valid_pascal_target_paths)
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(result))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudfront_origin_access_identity.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudfront_origin_access_identity.py
new file mode 100644
index 00000000..00f18822
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudfront_origin_access_identity.py
@@ -0,0 +1,278 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+
+version_added: 1.0.0
+module: cloudfront_origin_access_identity
+
+short_description: Create, update and delete origin access identities for a
+ CloudFront distribution
+
+description:
+ - Allows for easy creation, updating and deletion of origin access
+ identities.
+
+requirements:
+ - boto3 >= 1.0.0
+ - python >= 2.6
+
+
+author: Willem van Ketwich (@wilvk)
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+
+options:
+ state:
+ description: If the named resource should exist.
+ choices:
+ - present
+ - absent
+ default: present
+ type: str
+ origin_access_identity_id:
+ description:
+ - The origin_access_identity_id of the CloudFront distribution.
+ required: false
+ type: str
+ comment:
+ description:
+ - A comment to describe the CloudFront origin access identity.
+ required: false
+ type: str
+ caller_reference:
+ description:
+ - A unique identifier to reference the origin access identity by.
+ required: false
+ type: str
+
+notes:
+ - Does not support check mode.
+
+'''
+
+EXAMPLES = '''
+
+- name: create an origin access identity
+ community.aws.cloudfront_origin_access_identity:
+ state: present
+ caller_reference: this is an example reference
+ comment: this is an example comment
+
+- name: update an existing origin access identity using caller_reference as an identifier
+ community.aws.cloudfront_origin_access_identity:
+ origin_access_identity_id: E17DRN9XUOAHZX
+ caller_reference: this is an example reference
+ comment: this is a new comment
+
+- name: delete an existing origin access identity using caller_reference as an identifier
+ community.aws.cloudfront_origin_access_identity:
+ state: absent
+ caller_reference: this is an example reference
+ comment: this is a new comment
+
+'''
+
+RETURN = '''
+cloud_front_origin_access_identity:
+ description: The origin access identity's information.
+ returned: always
+ type: complex
+ contains:
+ cloud_front_origin_access_identity_config:
+ description: describes a url specifying the origin access identity.
+ returned: always
+ type: complex
+ contains:
+ caller_reference:
+ description: a caller reference for the oai
+ returned: always
+ type: str
+ comment:
+ description: a comment describing the oai
+ returned: always
+ type: str
+ id:
+ description: a unique identifier of the oai
+ returned: always
+ type: str
+ s3_canonical_user_id:
+ description: the canonical user ID of the user who created the oai
+ returned: always
+ type: str
+e_tag:
+ description: The current version of the origin access identity created.
+ returned: always
+ type: str
+location:
+ description: The fully qualified URI of the new origin access identity just created.
+ returned: when initially created
+ type: str
+
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+import datetime
+from functools import partial
+import json
+import traceback
+
+try:
+ import botocore
+ from botocore.signers import CloudFrontSigner
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by imported AnsibleAWSModule
+
+
+class CloudFrontOriginAccessIdentityServiceManager(object):
+ """
+ Handles CloudFront origin access identity service calls to aws
+ """
+
+ def __init__(self, module):
+ self.module = module
+ self.client = module.client('cloudfront')
+
+ def create_origin_access_identity(self, caller_reference, comment):
+ try:
+ return self.client.create_cloud_front_origin_access_identity(
+ CloudFrontOriginAccessIdentityConfig={
+ 'CallerReference': caller_reference,
+ 'Comment': comment
+ }
+ )
+ except (ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error creating cloud front origin access identity.")
+
+ def delete_origin_access_identity(self, origin_access_identity_id, e_tag):
+ try:
+ return self.client.delete_cloud_front_origin_access_identity(Id=origin_access_identity_id, IfMatch=e_tag)
+ except (ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error updating Origin Access Identity.")
+
+ def update_origin_access_identity(self, caller_reference, comment, origin_access_identity_id, e_tag):
+ changed = False
+ new_config = {
+ 'CallerReference': caller_reference,
+ 'Comment': comment
+ }
+
+ try:
+ current_config = self.client.get_cloud_front_origin_access_identity_config(
+ Id=origin_access_identity_id)['CloudFrontOriginAccessIdentityConfig']
+ except (ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error getting Origin Access Identity config.")
+
+ if new_config != current_config:
+ changed = True
+
+ try:
+ # If the CallerReference is a value already sent in a previous identity request
+ # the returned value is that of the original request
+ result = self.client.update_cloud_front_origin_access_identity(
+ CloudFrontOriginAccessIdentityConfig=new_config,
+ Id=origin_access_identity_id,
+ IfMatch=e_tag,
+ )
+ except (ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error updating Origin Access Identity.")
+
+ return result, changed
+
+
+class CloudFrontOriginAccessIdentityValidationManager(object):
+ """
+ Manages CloudFront Origin Access Identities
+ """
+
+ def __init__(self, module):
+ self.module = module
+ self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module)
+
+ def validate_etag_from_origin_access_identity_id(self, origin_access_identity_id):
+ try:
+ if origin_access_identity_id is None:
+ return
+ oai = self.__cloudfront_facts_mgr.get_origin_access_identity(origin_access_identity_id)
+ if oai is not None:
+ return oai.get('ETag')
+ except (ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error getting etag from origin_access_identity.")
+
+ def validate_origin_access_identity_id_from_caller_reference(
+ self, caller_reference):
+ try:
+ origin_access_identities = self.__cloudfront_facts_mgr.list_origin_access_identities()
+ origin_origin_access_identity_ids = [oai.get('Id') for oai in origin_access_identities]
+ for origin_access_identity_id in origin_origin_access_identity_ids:
+ oai_config = (self.__cloudfront_facts_mgr.get_origin_access_identity_config(origin_access_identity_id))
+ temp_caller_reference = oai_config.get('CloudFrontOriginAccessIdentityConfig').get('CallerReference')
+ if temp_caller_reference == caller_reference:
+ return origin_access_identity_id
+ except (ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error getting Origin Access Identity from caller_reference.")
+
+ def validate_comment(self, comment):
+ if comment is None:
+ return "origin access identity created by Ansible with datetime " + datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')
+ return comment
+
+
+def main():
+ argument_spec = dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ origin_access_identity_id=dict(),
+ caller_reference=dict(),
+ comment=dict(),
+ )
+
+ result = {}
+ e_tag = None
+ changed = False
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False)
+ service_mgr = CloudFrontOriginAccessIdentityServiceManager(module)
+ validation_mgr = CloudFrontOriginAccessIdentityValidationManager(module)
+
+ state = module.params.get('state')
+ caller_reference = module.params.get('caller_reference')
+
+ comment = module.params.get('comment')
+ origin_access_identity_id = module.params.get('origin_access_identity_id')
+
+ if origin_access_identity_id is None and caller_reference is not None:
+ origin_access_identity_id = validation_mgr.validate_origin_access_identity_id_from_caller_reference(caller_reference)
+
+ e_tag = validation_mgr.validate_etag_from_origin_access_identity_id(origin_access_identity_id)
+ comment = validation_mgr.validate_comment(comment)
+
+ if state == 'present':
+ if origin_access_identity_id is not None and e_tag is not None:
+ result, changed = service_mgr.update_origin_access_identity(caller_reference, comment, origin_access_identity_id, e_tag)
+ else:
+ result = service_mgr.create_origin_access_identity(caller_reference, comment)
+ changed = True
+ elif(state == 'absent' and origin_access_identity_id is not None and
+ e_tag is not None):
+ result = service_mgr.delete_origin_access_identity(origin_access_identity_id, e_tag)
+ changed = True
+
+ result.pop('ResponseMetadata', None)
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(result))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudtrail.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudtrail.py
new file mode 100644
index 00000000..c0bf3f4d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudtrail.py
@@ -0,0 +1,607 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cloudtrail
+version_added: 1.0.0
+short_description: manage CloudTrail create, delete, update
+description:
+ - Creates, deletes, or updates CloudTrail configuration. Ensures logging is also enabled.
+author:
+ - Ansible Core Team
+ - Ted Timmons (@tedder)
+ - Daniel Shepherd (@shepdelacreme)
+requirements:
+ - boto3
+ - botocore
+options:
+ state:
+ description:
+ - Add or remove CloudTrail configuration.
+ - 'The following states have been preserved for backwards compatibility: I(state=enabled) and I(state=disabled).'
+ - I(state=enabled) is equivalet to I(state=present).
+ - I(state=disabled) is equivalet to I(state=absent).
+ type: str
+ choices: ['present', 'absent', 'enabled', 'disabled']
+ default: present
+ name:
+ description:
+ - Name for the CloudTrail.
+ - Names are unique per-region unless the CloudTrail is a multi-region trail, in which case it is unique per-account.
+ type: str
+ default: default
+ enable_logging:
+ description:
+ - Start or stop the CloudTrail logging. If stopped the trail will be paused and will not record events or deliver log files.
+ default: true
+ type: bool
+ s3_bucket_name:
+ description:
+ - An existing S3 bucket where CloudTrail will deliver log files.
+ - This bucket should exist and have the proper policy.
+ - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html).
+ - Required when I(state=present).
+ type: str
+ s3_key_prefix:
+ description:
+ - S3 Key prefix for delivered log files. A trailing slash is not necessary and will be removed.
+ type: str
+ is_multi_region_trail:
+ description:
+ - Specify whether the trail belongs only to one region or exists in all regions.
+ default: false
+ type: bool
+ enable_log_file_validation:
+ description:
+ - Specifies whether log file integrity validation is enabled.
+ - CloudTrail will create a hash for every log file delivered and produce a signed digest file that can be used to ensure log files have not been tampered.
+ type: bool
+ aliases: [ "log_file_validation_enabled" ]
+ include_global_events:
+ description:
+ - Record API calls from global services such as IAM and STS.
+ default: true
+ type: bool
+ aliases: [ "include_global_service_events" ]
+ sns_topic_name:
+ description:
+ - SNS Topic name to send notifications to when a log file is delivered.
+ type: str
+ cloudwatch_logs_role_arn:
+ description:
+ - Specifies a full ARN for an IAM role that assigns the proper permissions for CloudTrail to create and write to the log group.
+ - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html).
+ - Required when C(cloudwatch_logs_log_group_arn).
+ type: str
+ cloudwatch_logs_log_group_arn:
+ description:
+ - A full ARN specifying a valid CloudWatch log group to which CloudTrail logs will be delivered. The log group should already exist.
+ - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html).
+ - Required when C(cloudwatch_logs_role_arn).
+ type: str
+ kms_key_id:
+ description:
+ - Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. This also has the effect of enabling log file encryption.
+ - The value can be an alias name prefixed by "alias/", a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier.
+ - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html).
+ type: str
+ tags:
+ description:
+ - A hash/dictionary of tags to be applied to the CloudTrail resource.
+ - Remove completely or specify an empty dictionary to remove all tags.
+ default: {}
+ type: dict
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: create single region cloudtrail
+ community.aws.cloudtrail:
+ state: present
+ name: default
+ s3_bucket_name: mylogbucket
+ s3_key_prefix: cloudtrail
+ region: us-east-1
+
+- name: create multi-region trail with validation and tags
+ community.aws.cloudtrail:
+ state: present
+ name: default
+ s3_bucket_name: mylogbucket
+ region: us-east-1
+ is_multi_region_trail: true
+ enable_log_file_validation: true
+ cloudwatch_logs_role_arn: "arn:aws:iam::123456789012:role/CloudTrail_CloudWatchLogs_Role"
+ cloudwatch_logs_log_group_arn: "arn:aws:logs:us-east-1:123456789012:log-group:CloudTrail/DefaultLogGroup:*"
+ kms_key_id: "alias/MyAliasName"
+ tags:
+ environment: dev
+ Name: default
+
+- name: show another valid kms_key_id
+ community.aws.cloudtrail:
+ state: present
+ name: default
+ s3_bucket_name: mylogbucket
+ kms_key_id: "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"
+ # simply "12345678-1234-1234-1234-123456789012" would be valid too.
+
+- name: pause logging the trail we just created
+ community.aws.cloudtrail:
+ state: present
+ name: default
+ enable_logging: false
+ s3_bucket_name: mylogbucket
+ region: us-east-1
+ is_multi_region_trail: true
+ enable_log_file_validation: true
+ tags:
+ environment: dev
+ Name: default
+
+- name: delete a trail
+ community.aws.cloudtrail:
+ state: absent
+ name: default
+'''
+
+RETURN = '''
+exists:
+ description: whether the resource exists
+ returned: always
+ type: bool
+ sample: true
+trail:
+ description: CloudTrail resource details
+ returned: always
+ type: complex
+ sample: hash/dictionary of values
+ contains:
+ trail_arn:
+ description: Full ARN of the CloudTrail resource
+ returned: success
+ type: str
+ sample: arn:aws:cloudtrail:us-east-1:123456789012:trail/default
+ name:
+ description: Name of the CloudTrail resource
+ returned: success
+ type: str
+ sample: default
+ is_logging:
+ description: Whether logging is turned on or paused for the Trail
+ returned: success
+ type: bool
+ sample: True
+ s3_bucket_name:
+ description: S3 bucket name where log files are delivered
+ returned: success
+ type: str
+ sample: myBucket
+ s3_key_prefix:
+ description: Key prefix in bucket where log files are delivered (if any)
+ returned: success when present
+ type: str
+ sample: myKeyPrefix
+ log_file_validation_enabled:
+ description: Whether log file validation is enabled on the trail
+ returned: success
+ type: bool
+ sample: true
+ include_global_service_events:
+ description: Whether global services (IAM, STS) are logged with this trail
+ returned: success
+ type: bool
+ sample: true
+ is_multi_region_trail:
+ description: Whether the trail applies to all regions or just one
+ returned: success
+ type: bool
+ sample: true
+ has_custom_event_selectors:
+ description: Whether any custom event selectors are used for this trail.
+ returned: success
+ type: bool
+ sample: False
+ home_region:
+ description: The home region where the trail was originally created and must be edited.
+ returned: success
+ type: str
+ sample: us-east-1
+ sns_topic_name:
+ description: The SNS topic name where log delivery notifications are sent.
+ returned: success when present
+ type: str
+ sample: myTopic
+ sns_topic_arn:
+ description: Full ARN of the SNS topic where log delivery notifications are sent.
+ returned: success when present
+ type: str
+ sample: arn:aws:sns:us-east-1:123456789012:topic/myTopic
+ cloud_watch_logs_log_group_arn:
+ description: Full ARN of the CloudWatch Logs log group where events are delivered.
+ returned: success when present
+ type: str
+ sample: arn:aws:logs:us-east-1:123456789012:log-group:CloudTrail/DefaultLogGroup:*
+ cloud_watch_logs_role_arn:
+ description: Full ARN of the IAM role that CloudTrail assumes to deliver events.
+ returned: success when present
+ type: str
+ sample: arn:aws:iam::123456789012:role/CloudTrail_CloudWatchLogs_Role
+ kms_key_id:
+ description: Full ARN of the KMS Key used to encrypt log files.
+ returned: success when present
+ type: str
+ sample: arn:aws:kms::123456789012:key/12345678-1234-1234-1234-123456789012
+ tags:
+ description: hash/dictionary of tags applied to this resource
+ returned: success
+ type: dict
+ sample: {'environment': 'dev', 'Name': 'default'}
+'''
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (camel_dict_to_snake_dict,
+ ansible_dict_to_boto3_tag_list,
+ boto3_tag_list_to_ansible_dict,
+ )
+
+
+def create_trail(module, client, ct_params):
+ """
+ Creates a CloudTrail
+
+ module : AnsibleAWSModule object
+ client : boto3 client connection object
+ ct_params : The parameters for the Trail to create
+ """
+ resp = {}
+ try:
+ resp = client.create_trail(**ct_params)
+ except (BotoCoreError, ClientError) as err:
+ module.fail_json_aws(err, msg="Failed to create Trail")
+
+ return resp
+
+
+def tag_trail(module, client, tags, trail_arn, curr_tags=None, dry_run=False):
+ """
+ Creates, updates, removes tags on a CloudTrail resource
+
+ module : AnsibleAWSModule object
+ client : boto3 client connection object
+ tags : Dict of tags converted from ansible_dict to boto3 list of dicts
+ trail_arn : The ARN of the CloudTrail to operate on
+ curr_tags : Dict of the current tags on resource, if any
+ dry_run : true/false to determine if changes will be made if needed
+ """
+ adds = []
+ removes = []
+ updates = []
+ changed = False
+
+ if curr_tags is None:
+ # No current tags so just convert all to a tag list
+ adds = ansible_dict_to_boto3_tag_list(tags)
+ else:
+ curr_keys = set(curr_tags.keys())
+ new_keys = set(tags.keys())
+ add_keys = new_keys - curr_keys
+ remove_keys = curr_keys - new_keys
+ update_keys = dict()
+ for k in curr_keys.intersection(new_keys):
+ if curr_tags[k] != tags[k]:
+ update_keys.update({k: tags[k]})
+
+ adds = get_tag_list(add_keys, tags)
+ removes = get_tag_list(remove_keys, curr_tags)
+ updates = get_tag_list(update_keys, tags)
+
+ if removes or updates:
+ changed = True
+ if not dry_run:
+ try:
+ client.remove_tags(ResourceId=trail_arn, TagsList=removes + updates)
+ except (BotoCoreError, ClientError) as err:
+ module.fail_json_aws(err, msg="Failed to remove tags from Trail")
+
+ if updates or adds:
+ changed = True
+ if not dry_run:
+ try:
+ client.add_tags(ResourceId=trail_arn, TagsList=updates + adds)
+ except (BotoCoreError, ClientError) as err:
+ module.fail_json_aws(err, msg="Failed to add tags to Trail")
+
+ return changed
+
+
+def get_tag_list(keys, tags):
+ """
+ Returns a list of dicts with tags to act on
+ keys : set of keys to get the values for
+ tags : the dict of tags to turn into a list
+ """
+ tag_list = []
+ for k in keys:
+ tag_list.append({'Key': k, 'Value': tags[k]})
+
+ return tag_list
+
+
+def set_logging(module, client, name, action):
+ """
+ Starts or stops logging based on given state
+
+ module : AnsibleAWSModule object
+ client : boto3 client connection object
+ name : The name or ARN of the CloudTrail to operate on
+ action : start or stop
+ """
+ if action == 'start':
+ try:
+ client.start_logging(Name=name)
+ return client.get_trail_status(Name=name)
+ except (BotoCoreError, ClientError) as err:
+ module.fail_json_aws(err, msg="Failed to start logging")
+ elif action == 'stop':
+ try:
+ client.stop_logging(Name=name)
+ return client.get_trail_status(Name=name)
+ except (BotoCoreError, ClientError) as err:
+ module.fail_json_aws(err, msg="Failed to stop logging")
+ else:
+ module.fail_json(msg="Unsupported logging action")
+
+
+def get_trail_facts(module, client, name):
+ """
+ Describes existing trail in an account
+
+ module : AnsibleAWSModule object
+ client : boto3 client connection object
+ name : Name of the trail
+ """
+ # get Trail info
+ try:
+ trail_resp = client.describe_trails(trailNameList=[name])
+ except (BotoCoreError, ClientError) as err:
+ module.fail_json_aws(err, msg="Failed to describe Trail")
+
+ # Now check to see if our trail exists and get status and tags
+ if len(trail_resp['trailList']):
+ trail = trail_resp['trailList'][0]
+ try:
+ status_resp = client.get_trail_status(Name=trail['Name'])
+ tags_list = client.list_tags(ResourceIdList=[trail['TrailARN']])
+ except (BotoCoreError, ClientError) as err:
+ module.fail_json_aws(err, msg="Failed to describe Trail")
+
+ trail['IsLogging'] = status_resp['IsLogging']
+ trail['tags'] = boto3_tag_list_to_ansible_dict(tags_list['ResourceTagList'][0]['TagsList'])
+ # Check for non-existent values and populate with None
+ optional_vals = set(['S3KeyPrefix', 'SnsTopicName', 'SnsTopicARN', 'CloudWatchLogsLogGroupArn', 'CloudWatchLogsRoleArn', 'KmsKeyId'])
+ for v in optional_vals - set(trail.keys()):
+ trail[v] = None
+ return trail
+
+ else:
+ # trail doesn't exist return None
+ return None
+
+
+def delete_trail(module, client, trail_arn):
+ """
+ Delete a CloudTrail
+
+ module : AnsibleAWSModule object
+ client : boto3 client connection object
+ trail_arn : Full CloudTrail ARN
+ """
+ try:
+ client.delete_trail(Name=trail_arn)
+ except (BotoCoreError, ClientError) as err:
+ module.fail_json_aws(err, msg="Failed to delete Trail")
+
+
+def update_trail(module, client, ct_params):
+ """
+ Delete a CloudTrail
+
+ module : AnsibleAWSModule object
+ client : boto3 client connection object
+ ct_params : The parameters for the Trail to update
+ """
+ try:
+ client.update_trail(**ct_params)
+ except (BotoCoreError, ClientError) as err:
+ module.fail_json_aws(err, msg="Failed to update Trail")
+
+
+def main():
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ name=dict(default='default'),
+ enable_logging=dict(default=True, type='bool'),
+ s3_bucket_name=dict(),
+ s3_key_prefix=dict(),
+ sns_topic_name=dict(),
+ is_multi_region_trail=dict(default=False, type='bool'),
+ enable_log_file_validation=dict(type='bool', aliases=['log_file_validation_enabled']),
+ include_global_events=dict(default=True, type='bool', aliases=['include_global_service_events']),
+ cloudwatch_logs_role_arn=dict(),
+ cloudwatch_logs_log_group_arn=dict(),
+ kms_key_id=dict(),
+ tags=dict(default={}, type='dict'),
+ )
+
+ required_if = [('state', 'present', ['s3_bucket_name']), ('state', 'enabled', ['s3_bucket_name'])]
+ required_together = [('cloudwatch_logs_role_arn', 'cloudwatch_logs_log_group_arn')]
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together, required_if=required_if)
+
+ # collect parameters
+ if module.params['state'] in ('present', 'enabled'):
+ state = 'present'
+ elif module.params['state'] in ('absent', 'disabled'):
+ state = 'absent'
+ tags = module.params['tags']
+ enable_logging = module.params['enable_logging']
+ ct_params = dict(
+ Name=module.params['name'],
+ S3BucketName=module.params['s3_bucket_name'],
+ IncludeGlobalServiceEvents=module.params['include_global_events'],
+ IsMultiRegionTrail=module.params['is_multi_region_trail'],
+ )
+
+ if module.params['s3_key_prefix']:
+ ct_params['S3KeyPrefix'] = module.params['s3_key_prefix'].rstrip('/')
+
+ if module.params['sns_topic_name']:
+ ct_params['SnsTopicName'] = module.params['sns_topic_name']
+
+ if module.params['cloudwatch_logs_role_arn']:
+ ct_params['CloudWatchLogsRoleArn'] = module.params['cloudwatch_logs_role_arn']
+
+ if module.params['cloudwatch_logs_log_group_arn']:
+ ct_params['CloudWatchLogsLogGroupArn'] = module.params['cloudwatch_logs_log_group_arn']
+
+ if module.params['enable_log_file_validation'] is not None:
+ ct_params['EnableLogFileValidation'] = module.params['enable_log_file_validation']
+
+ if module.params['kms_key_id']:
+ ct_params['KmsKeyId'] = module.params['kms_key_id']
+
+ client = module.client('cloudtrail')
+ region = module.region
+
+ results = dict(
+ changed=False,
+ exists=False
+ )
+
+ # Get existing trail facts
+ trail = get_trail_facts(module, client, ct_params['Name'])
+
+ # If the trail exists set the result exists variable
+ if trail is not None:
+ results['exists'] = True
+
+ if state == 'absent' and results['exists']:
+ # If Trail exists go ahead and delete
+ results['changed'] = True
+ results['exists'] = False
+ results['trail'] = dict()
+ if not module.check_mode:
+ delete_trail(module, client, trail['TrailARN'])
+
+ elif state == 'present' and results['exists']:
+ # If Trail exists see if we need to update it
+ do_update = False
+ for key in ct_params:
+ tkey = str(key)
+ # boto3 has inconsistent parameter naming so we handle it here
+ if key == 'EnableLogFileValidation':
+ tkey = 'LogFileValidationEnabled'
+ # We need to make an empty string equal None
+ if ct_params.get(key) == '':
+ val = None
+ else:
+ val = ct_params.get(key)
+ if val != trail.get(tkey):
+ do_update = True
+ results['changed'] = True
+ # If we are in check mode copy the changed values to the trail facts in result output to show what would change.
+ if module.check_mode:
+ trail.update({tkey: ct_params.get(key)})
+
+ if not module.check_mode and do_update:
+ update_trail(module, client, ct_params)
+ trail = get_trail_facts(module, client, ct_params['Name'])
+
+ # Check if we need to start/stop logging
+ if enable_logging and not trail['IsLogging']:
+ results['changed'] = True
+ trail['IsLogging'] = True
+ if not module.check_mode:
+ set_logging(module, client, name=ct_params['Name'], action='start')
+ if not enable_logging and trail['IsLogging']:
+ results['changed'] = True
+ trail['IsLogging'] = False
+ if not module.check_mode:
+ set_logging(module, client, name=ct_params['Name'], action='stop')
+
+ # Check if we need to update tags on resource
+ tag_dry_run = False
+ if module.check_mode:
+ tag_dry_run = True
+ tags_changed = tag_trail(module, client, tags=tags, trail_arn=trail['TrailARN'], curr_tags=trail['tags'], dry_run=tag_dry_run)
+ if tags_changed:
+ results['changed'] = True
+ trail['tags'] = tags
+ # Populate trail facts in output
+ results['trail'] = camel_dict_to_snake_dict(trail)
+
+ elif state == 'present' and not results['exists']:
+ # Trail doesn't exist just go create it
+ results['changed'] = True
+ if not module.check_mode:
+ # If we aren't in check_mode then actually create it
+ created_trail = create_trail(module, client, ct_params)
+ # Apply tags
+ tag_trail(module, client, tags=tags, trail_arn=created_trail['TrailARN'])
+ # Get the trail status
+ try:
+ status_resp = client.get_trail_status(Name=created_trail['Name'])
+ except (BotoCoreError, ClientError) as err:
+ module.fail_json_aws(err, msg="Failed to fetch Trail statuc")
+ # Set the logging state for the trail to desired value
+ if enable_logging and not status_resp['IsLogging']:
+ set_logging(module, client, name=ct_params['Name'], action='start')
+ if not enable_logging and status_resp['IsLogging']:
+ set_logging(module, client, name=ct_params['Name'], action='stop')
+ # Get facts for newly created Trail
+ trail = get_trail_facts(module, client, ct_params['Name'])
+
+ # If we are in check mode create a fake return structure for the newly minted trail
+ if module.check_mode:
+ acct_id = '123456789012'
+ try:
+ sts_client = module.client('sts')
+ acct_id = sts_client.get_caller_identity()['Account']
+ except (BotoCoreError, ClientError):
+ pass
+ trail = dict()
+ trail.update(ct_params)
+ if 'EnableLogFileValidation' not in ct_params:
+ ct_params['EnableLogFileValidation'] = False
+ trail['EnableLogFileValidation'] = ct_params['EnableLogFileValidation']
+ trail.pop('EnableLogFileValidation')
+ fake_arn = 'arn:aws:cloudtrail:' + region + ':' + acct_id + ':trail/' + ct_params['Name']
+ trail['HasCustomEventSelectors'] = False
+ trail['HomeRegion'] = region
+ trail['TrailARN'] = fake_arn
+ trail['IsLogging'] = enable_logging
+ trail['tags'] = tags
+ # Populate trail facts in output
+ results['trail'] = camel_dict_to_snake_dict(trail)
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudwatchevent_rule.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudwatchevent_rule.py
new file mode 100644
index 00000000..0ba66909
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudwatchevent_rule.py
@@ -0,0 +1,460 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: cloudwatchevent_rule
+version_added: 1.0.0
+short_description: Manage CloudWatch Event rules and targets
+description:
+ - This module creates and manages CloudWatch event rules and targets.
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+author: "Jim Dalton (@jsdalton) <jim.dalton@gmail.com>"
+requirements:
+ - python >= 2.6
+ - boto3
+notes:
+ - A rule must contain at least an I(event_pattern) or I(schedule_expression). A
+ rule can have both an I(event_pattern) and a I(schedule_expression), in which
+ case the rule will trigger on matching events as well as on a schedule.
+ - When specifying targets, I(input) and I(input_path) are mutually-exclusive
+ and optional parameters.
+options:
+ name:
+ description:
+ - The name of the rule you are creating, updating or deleting. No spaces
+ or special characters allowed (i.e. must match C([\.\-_A-Za-z0-9]+)).
+ required: true
+ type: str
+ schedule_expression:
+ description:
+ - A cron or rate expression that defines the schedule the rule will
+ trigger on. For example, C(cron(0 20 * * ? *)), C(rate(5 minutes)).
+ required: false
+ type: str
+ event_pattern:
+ description:
+ - A string pattern (in valid JSON format) that is used to match against
+ incoming events to determine if the rule should be triggered.
+ required: false
+ type: str
+ state:
+ description:
+ - Whether the rule is present (and enabled), disabled, or absent.
+ choices: ["present", "disabled", "absent"]
+ default: present
+ required: false
+ type: str
+ description:
+ description:
+ - A description of the rule.
+ required: false
+ type: str
+ role_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the IAM role associated with the rule.
+ required: false
+ type: str
+ targets:
+ type: list
+ elements: dict
+ description:
+ - A list of targets to add to or update for the rule.
+ suboptions:
+ id:
+ type: str
+ required: true
+ description: The unique target assignment ID.
+ arn:
+ type: str
+ required: true
+ description: The ARN associated with the target.
+ role_arn:
+ type: str
+ description: The ARN of the IAM role to be used for this target when the rule is triggered.
+ input:
+ type: str
+ description:
+ - A JSON object that will override the event data when passed to the target.
+ - If neither I(input) nor I(input_path) is specified, then the entire
+ event is passed to the target in JSON form.
+ input_path:
+ type: str
+ description:
+ - A JSONPath string (e.g. C($.detail)) that specifies the part of the event data to be
+ passed to the target.
+ - If neither I(input) nor I(input_path) is specified, then the entire
+ event is passed to the target in JSON form.
+ ecs_parameters:
+ type: dict
+ description:
+ - Contains the ECS task definition and task count to be used, if the event target is an ECS task.
+ suboptions:
+ task_definition_arn:
+ type: str
+ description: The full ARN of the task definition.
+ task_count:
+ type: int
+ description: The number of tasks to create based on I(task_definition).
+ required: false
+'''
+
+EXAMPLES = r'''
+- community.aws.cloudwatchevent_rule:
+ name: MyCronTask
+ schedule_expression: "cron(0 20 * * ? *)"
+ description: Run my scheduled task
+ targets:
+ - id: MyTargetId
+ arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction
+
+- community.aws.cloudwatchevent_rule:
+ name: MyDisabledCronTask
+ schedule_expression: "rate(5 minutes)"
+ description: Run my disabled scheduled task
+ state: disabled
+ targets:
+ - id: MyOtherTargetId
+ arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction
+ input: '{"foo": "bar"}'
+
+- community.aws.cloudwatchevent_rule:
+ name: MyCronTask
+ state: absent
+'''
+
+RETURN = r'''
+rule:
+ description: CloudWatch Event rule data.
+ returned: success
+ type: dict
+ sample:
+ arn: 'arn:aws:events:us-east-1:123456789012:rule/MyCronTask'
+ description: 'Run my scheduled task'
+ name: 'MyCronTask'
+ schedule_expression: 'cron(0 20 * * ? *)'
+ state: 'ENABLED'
+targets:
+ description: CloudWatch Event target(s) assigned to the rule.
+ returned: success
+ type: list
+ sample: "[{ 'arn': 'arn:aws:lambda:us-east-1:123456789012:function:MyFunction', 'id': 'MyTargetId' }]"
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+class CloudWatchEventRule(object):
+ def __init__(self, module, name, client, schedule_expression=None,
+ event_pattern=None, description=None, role_arn=None):
+ self.name = name
+ self.client = client
+ self.changed = False
+ self.schedule_expression = schedule_expression
+ self.event_pattern = event_pattern
+ self.description = description
+ self.role_arn = role_arn
+ self.module = module
+
+ def describe(self):
+ """Returns the existing details of the rule in AWS"""
+ try:
+ rule_info = self.client.describe_rule(Name=self.name)
+ except botocore.exceptions.ClientError as e:
+ error_code = e.response.get('Error', {}).get('Code')
+ if error_code == 'ResourceNotFoundException':
+ return {}
+ self.module.fail_json_aws(e, msg="Could not describe rule %s" % self.name)
+ except botocore.exceptions.BotoCoreError as e:
+ self.module.fail_json_aws(e, msg="Could not describe rule %s" % self.name)
+ return self._snakify(rule_info)
+
+ def put(self, enabled=True):
+ """Creates or updates the rule in AWS"""
+ request = {
+ 'Name': self.name,
+ 'State': "ENABLED" if enabled else "DISABLED",
+ }
+ if self.schedule_expression:
+ request['ScheduleExpression'] = self.schedule_expression
+ if self.event_pattern:
+ request['EventPattern'] = self.event_pattern
+ if self.description:
+ request['Description'] = self.description
+ if self.role_arn:
+ request['RoleArn'] = self.role_arn
+ try:
+ response = self.client.put_rule(**request)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Could not create/update rule %s" % self.name)
+ self.changed = True
+ return response
+
+ def delete(self):
+ """Deletes the rule in AWS"""
+ self.remove_all_targets()
+
+ try:
+ response = self.client.delete_rule(Name=self.name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Could not delete rule %s" % self.name)
+ self.changed = True
+ return response
+
+ def enable(self):
+ """Enables the rule in AWS"""
+ try:
+ response = self.client.enable_rule(Name=self.name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Could not enable rule %s" % self.name)
+ self.changed = True
+ return response
+
+ def disable(self):
+ """Disables the rule in AWS"""
+ try:
+ response = self.client.disable_rule(Name=self.name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Could not disable rule %s" % self.name)
+ self.changed = True
+ return response
+
+ def list_targets(self):
+ """Lists the existing targets for the rule in AWS"""
+ try:
+ targets = self.client.list_targets_by_rule(Rule=self.name)
+ except botocore.exceptions.ClientError as e:
+ error_code = e.response.get('Error', {}).get('Code')
+ if error_code == 'ResourceNotFoundException':
+ return []
+ self.module.fail_json_aws(e, msg="Could not find target for rule %s" % self.name)
+ except botocore.exceptions.BotoCoreError as e:
+ self.module.fail_json_aws(e, msg="Could not find target for rule %s" % self.name)
+ return self._snakify(targets)['targets']
+
+ def put_targets(self, targets):
+ """Creates or updates the provided targets on the rule in AWS"""
+ if not targets:
+ return
+ request = {
+ 'Rule': self.name,
+ 'Targets': self._targets_request(targets),
+ }
+ try:
+ response = self.client.put_targets(**request)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Could not create/update rule targets for rule %s" % self.name)
+ self.changed = True
+ return response
+
+ def remove_targets(self, target_ids):
+ """Removes the provided targets from the rule in AWS"""
+ if not target_ids:
+ return
+ request = {
+ 'Rule': self.name,
+ 'Ids': target_ids
+ }
+ try:
+ response = self.client.remove_targets(**request)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Could not remove rule targets from rule %s" % self.name)
+ self.changed = True
+ return response
+
+ def remove_all_targets(self):
+ """Removes all targets on rule"""
+ targets = self.list_targets()
+ return self.remove_targets([t['id'] for t in targets])
+
+ def _targets_request(self, targets):
+ """Formats each target for the request"""
+ targets_request = []
+ for target in targets:
+ target_request = {
+ 'Id': target['id'],
+ 'Arn': target['arn']
+ }
+ if 'input' in target:
+ target_request['Input'] = target['input']
+ if 'input_path' in target:
+ target_request['InputPath'] = target['input_path']
+ if 'role_arn' in target:
+ target_request['RoleArn'] = target['role_arn']
+ if 'ecs_parameters' in target:
+ target_request['EcsParameters'] = {}
+ ecs_parameters = target['ecs_parameters']
+ if 'task_definition_arn' in target['ecs_parameters']:
+ target_request['EcsParameters']['TaskDefinitionArn'] = ecs_parameters['task_definition_arn']
+ if 'task_count' in target['ecs_parameters']:
+ target_request['EcsParameters']['TaskCount'] = ecs_parameters['task_count']
+ targets_request.append(target_request)
+ return targets_request
+
+ def _snakify(self, dict):
+ """Converts camel case to snake case"""
+ return camel_dict_to_snake_dict(dict)
+
+
+class CloudWatchEventRuleManager(object):
+ RULE_FIELDS = ['name', 'event_pattern', 'schedule_expression', 'description', 'role_arn']
+
+ def __init__(self, rule, targets):
+ self.rule = rule
+ self.targets = targets
+
+ def ensure_present(self, enabled=True):
+ """Ensures the rule and targets are present and synced"""
+ rule_description = self.rule.describe()
+ if rule_description:
+ # Rule exists so update rule, targets and state
+ self._sync_rule(enabled)
+ self._sync_targets()
+ self._sync_state(enabled)
+ else:
+ # Rule does not exist, so create new rule and targets
+ self._create(enabled)
+
+ def ensure_disabled(self):
+ """Ensures the rule and targets are present, but disabled, and synced"""
+ self.ensure_present(enabled=False)
+
+ def ensure_absent(self):
+ """Ensures the rule and targets are absent"""
+ rule_description = self.rule.describe()
+ if not rule_description:
+ # Rule doesn't exist so don't need to delete
+ return
+ self.rule.delete()
+
+ def fetch_aws_state(self):
+ """Retrieves rule and target state from AWS"""
+ aws_state = {
+ 'rule': {},
+ 'targets': [],
+ 'changed': self.rule.changed
+ }
+ rule_description = self.rule.describe()
+ if not rule_description:
+ return aws_state
+
+ # Don't need to include response metadata noise in response
+ del rule_description['response_metadata']
+
+ aws_state['rule'] = rule_description
+ aws_state['targets'].extend(self.rule.list_targets())
+ return aws_state
+
+ def _sync_rule(self, enabled=True):
+ """Syncs local rule state with AWS"""
+ if not self._rule_matches_aws():
+ self.rule.put(enabled)
+
+ def _sync_targets(self):
+ """Syncs local targets with AWS"""
+ # Identify and remove extraneous targets on AWS
+ target_ids_to_remove = self._remote_target_ids_to_remove()
+ if target_ids_to_remove:
+ self.rule.remove_targets(target_ids_to_remove)
+
+ # Identify targets that need to be added or updated on AWS
+ targets_to_put = self._targets_to_put()
+ if targets_to_put:
+ self.rule.put_targets(targets_to_put)
+
+ def _sync_state(self, enabled=True):
+ """Syncs local rule state with AWS"""
+ remote_state = self._remote_state()
+ if enabled and remote_state != 'ENABLED':
+ self.rule.enable()
+ elif not enabled and remote_state != 'DISABLED':
+ self.rule.disable()
+
+ def _create(self, enabled=True):
+ """Creates rule and targets on AWS"""
+ self.rule.put(enabled)
+ self.rule.put_targets(self.targets)
+
+ def _rule_matches_aws(self):
+ """Checks if the local rule data matches AWS"""
+ aws_rule_data = self.rule.describe()
+
+ # The rule matches AWS only if all rule data fields are equal
+ # to their corresponding local value defined in the task
+ return all([
+ getattr(self.rule, field) == aws_rule_data.get(field, None)
+ for field in self.RULE_FIELDS
+ ])
+
+ def _targets_to_put(self):
+ """Returns a list of targets that need to be updated or added remotely"""
+ remote_targets = self.rule.list_targets()
+ return [t for t in self.targets if t not in remote_targets]
+
+ def _remote_target_ids_to_remove(self):
+ """Returns a list of targets that need to be removed remotely"""
+ target_ids = [t['id'] for t in self.targets]
+ remote_targets = self.rule.list_targets()
+ return [
+ rt['id'] for rt in remote_targets if rt['id'] not in target_ids
+ ]
+
+ def _remote_state(self):
+ """Returns the remote state from AWS"""
+ description = self.rule.describe()
+ if not description:
+ return
+ return description['state']
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ schedule_expression=dict(),
+ event_pattern=dict(),
+ state=dict(choices=['present', 'disabled', 'absent'],
+ default='present'),
+ description=dict(),
+ role_arn=dict(),
+ targets=dict(type='list', default=[], elements='dict'),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ rule_data = dict(
+ [(rf, module.params.get(rf)) for rf in CloudWatchEventRuleManager.RULE_FIELDS]
+ )
+ targets = module.params.get('targets')
+ state = module.params.get('state')
+ client = module.client('events')
+
+ cwe_rule = CloudWatchEventRule(module, client=client, **rule_data)
+ cwe_rule_manager = CloudWatchEventRuleManager(cwe_rule, targets)
+
+ if state == 'present':
+ cwe_rule_manager.ensure_present()
+ elif state == 'disabled':
+ cwe_rule_manager.ensure_disabled()
+ elif state == 'absent':
+ cwe_rule_manager.ensure_absent()
+ else:
+ module.fail_json(msg="Invalid state '{0}' provided".format(state))
+
+ module.exit_json(**cwe_rule_manager.fetch_aws_state())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudwatchlogs_log_group.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudwatchlogs_log_group.py
new file mode 100644
index 00000000..e8890988
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudwatchlogs_log_group.py
@@ -0,0 +1,315 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cloudwatchlogs_log_group
+version_added: 1.0.0
+short_description: create or delete log_group in CloudWatchLogs
+notes:
+ - For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/logs.html).
+description:
+ - Create or delete log_group in CloudWatchLogs.
+author:
+ - Willian Ricardo (@willricardo) <willricardo@gmail.com>
+requirements: [ json, botocore, boto3 ]
+options:
+ state:
+ description:
+ - Whether the rule is present or absent.
+ choices: ["present", "absent"]
+ default: present
+ required: false
+ type: str
+ log_group_name:
+ description:
+ - The name of the log group.
+ required: true
+ type: str
+ kms_key_id:
+ description:
+ - The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
+ required: false
+ type: str
+ tags:
+ description:
+ - The key-value pairs to use for the tags.
+ required: false
+ type: dict
+ retention:
+ description:
+ - The number of days to retain the log events in the specified log group.
+ - "Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]"
+ - Mutually exclusive with I(purge_retention_policy).
+ required: false
+ type: int
+ purge_retention_policy:
+ description:
+ - "Whether to purge the retention policy or not."
+ - "Mutually exclusive with I(retention) and I(overwrite)."
+ default: false
+ required: false
+ type: bool
+ overwrite:
+ description:
+ - Whether an existing log group should be overwritten on create.
+ - Mutually exclusive with I(purge_retention_policy).
+ default: false
+ required: false
+ type: bool
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- community.aws.cloudwatchlogs_log_group:
+ log_group_name: test-log-group
+
+- community.aws.cloudwatchlogs_log_group:
+ state: present
+ log_group_name: test-log-group
+ tags: { "Name": "test-log-group", "Env" : "QA" }
+
+- community.aws.cloudwatchlogs_log_group:
+ state: present
+ log_group_name: test-log-group
+ tags: { "Name": "test-log-group", "Env" : "QA" }
+ kms_key_id: arn:aws:kms:region:account-id:key/key-id
+
+- community.aws.cloudwatchlogs_log_group:
+ state: absent
+ log_group_name: test-log-group
+
+'''
+
+RETURN = '''
+log_groups:
+ description: Return the list of complex objects representing log groups
+ returned: success
+ type: complex
+ contains:
+ log_group_name:
+ description: The name of the log group.
+ returned: always
+ type: str
+ creation_time:
+ description: The creation time of the log group.
+ returned: always
+ type: int
+ retention_in_days:
+ description: The number of days to retain the log events in the specified log group.
+ returned: always
+ type: int
+ metric_filter_count:
+ description: The number of metric filters.
+ returned: always
+ type: int
+ arn:
+ description: The Amazon Resource Name (ARN) of the log group.
+ returned: always
+ type: str
+ stored_bytes:
+ description: The number of bytes stored.
+ returned: always
+ type: str
+ kms_key_id:
+ description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
+ returned: always
+ type: str
+'''
+
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def create_log_group(client, log_group_name, kms_key_id, tags, retention, module):
+ request = {'logGroupName': log_group_name}
+ if kms_key_id:
+ request['kmsKeyId'] = kms_key_id
+ if tags:
+ request['tags'] = tags
+
+ try:
+ client.create_log_group(**request)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to create log group: {0}".format(to_native(e)),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json(msg="Unable to create log group: {0}".format(to_native(e)),
+ exception=traceback.format_exc())
+
+ if retention:
+ input_retention_policy(client=client,
+ log_group_name=log_group_name,
+ retention=retention, module=module)
+
+ desc_log_group = describe_log_group(client=client,
+ log_group_name=log_group_name,
+ module=module)
+
+ if 'logGroups' in desc_log_group:
+ for i in desc_log_group['logGroups']:
+ if log_group_name == i['logGroupName']:
+ return i
+ module.fail_json(msg="The aws CloudWatchLogs log group was not created. \n please try again!")
+
+
+def input_retention_policy(client, log_group_name, retention, module):
+ try:
+ permited_values = [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]
+
+ if retention in permited_values:
+ response = client.put_retention_policy(logGroupName=log_group_name,
+ retentionInDays=retention)
+ else:
+ delete_log_group(client=client, log_group_name=log_group_name, module=module)
+ module.fail_json(msg="Invalid retention value. Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]")
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to put retention policy for log group {0}: {1}".format(log_group_name, to_native(e)),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json(msg="Unable to put retention policy for log group {0}: {1}".format(log_group_name, to_native(e)),
+ exception=traceback.format_exc())
+
+
+def delete_retention_policy(client, log_group_name, module):
+ try:
+ client.delete_retention_policy(logGroupName=log_group_name)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to delete retention policy for log group {0}: {1}".format(log_group_name, to_native(e)),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json(msg="Unable to delete retention policy for log group {0}: {1}".format(log_group_name, to_native(e)),
+ exception=traceback.format_exc())
+
+
+def delete_log_group(client, log_group_name, module):
+ desc_log_group = describe_log_group(client=client,
+ log_group_name=log_group_name,
+ module=module)
+
+ try:
+ if 'logGroups' in desc_log_group:
+ for i in desc_log_group['logGroups']:
+ if log_group_name == i['logGroupName']:
+ client.delete_log_group(logGroupName=log_group_name)
+
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to delete log group {0}: {1}".format(log_group_name, to_native(e)),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json(msg="Unable to delete log group {0}: {1}".format(log_group_name, to_native(e)),
+ exception=traceback.format_exc())
+
+
+def describe_log_group(client, log_group_name, module):
+ try:
+ desc_log_group = client.describe_log_groups(logGroupNamePrefix=log_group_name)
+ return desc_log_group
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)),
+ exception=traceback.format_exc())
+
+
+def main():
+ argument_spec = dict(
+ log_group_name=dict(required=True, type='str'),
+ state=dict(choices=['present', 'absent'],
+ default='present'),
+ kms_key_id=dict(required=False, type='str'),
+ tags=dict(required=False, type='dict'),
+ retention=dict(required=False, type='int'),
+ purge_retention_policy=dict(required=False, type='bool', default=False),
+ overwrite=dict(required=False, type='bool', default=False),
+ )
+
+ mutually_exclusive = [['retention', 'purge_retention_policy'], ['purge_retention_policy', 'overwrite']]
+ module = AnsibleAWSModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive)
+
+ try:
+ logs = module.client('logs')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ state = module.params.get('state')
+ changed = False
+
+ # Determine if the log group exists
+ desc_log_group = describe_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module)
+ found_log_group = {}
+ for i in desc_log_group.get('logGroups', []):
+ if module.params['log_group_name'] == i['logGroupName']:
+ found_log_group = i
+ break
+
+ if state == 'present':
+ if found_log_group:
+ if module.params['overwrite'] is True:
+ changed = True
+ delete_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module)
+ found_log_group = create_log_group(client=logs,
+ log_group_name=module.params['log_group_name'],
+ kms_key_id=module.params['kms_key_id'],
+ tags=module.params['tags'],
+ retention=module.params['retention'],
+ module=module)
+ elif module.params['purge_retention_policy']:
+ if found_log_group.get('retentionInDays'):
+ changed = True
+ delete_retention_policy(client=logs,
+ log_group_name=module.params['log_group_name'],
+ module=module)
+ elif module.params['retention'] != found_log_group.get('retentionInDays'):
+ if module.params['retention'] is not None:
+ changed = True
+ input_retention_policy(client=logs,
+ log_group_name=module.params['log_group_name'],
+ retention=module.params['retention'],
+ module=module)
+ found_log_group['retentionInDays'] = module.params['retention']
+
+ elif not found_log_group:
+ changed = True
+ found_log_group = create_log_group(client=logs,
+ log_group_name=module.params['log_group_name'],
+ kms_key_id=module.params['kms_key_id'],
+ tags=module.params['tags'],
+ retention=module.params['retention'],
+ module=module)
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(found_log_group))
+
+ elif state == 'absent':
+ if found_log_group:
+ changed = True
+ delete_log_group(client=logs,
+ log_group_name=module.params['log_group_name'],
+ module=module)
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudwatchlogs_log_group_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudwatchlogs_log_group_facts.py
new file mode 100644
index 00000000..153aac7b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudwatchlogs_log_group_facts.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cloudwatchlogs_log_group_info
+version_added: 1.0.0
+short_description: Get information about log_group in CloudWatchLogs
+description:
+ - Lists the specified log groups. You can list all your log groups or filter the results by prefix.
+ - This module was called C(cloudwatchlogs_log_group_facts) before Ansible 2.9. The usage did not change.
+author:
+ - Willian Ricardo (@willricardo) <willricardo@gmail.com>
+requirements: [ botocore, boto3 ]
+options:
+ log_group_name:
+ description:
+ - The name or prefix of the log group to filter by.
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+- community.aws.cloudwatchlogs_log_group_info:
+ log_group_name: test-log-group
+'''
+
+RETURN = '''
+log_groups:
+ description: Return the list of complex objects representing log groups
+ returned: success
+ type: complex
+ contains:
+ log_group_name:
+ description: The name of the log group.
+ returned: always
+ type: str
+ creation_time:
+ description: The creation time of the log group.
+ returned: always
+ type: int
+ retention_in_days:
+ description: The number of days to retain the log events in the specified log group.
+ returned: always
+ type: int
+ metric_filter_count:
+ description: The number of metric filters.
+ returned: always
+ type: int
+ arn:
+ description: The Amazon Resource Name (ARN) of the log group.
+ returned: always
+ type: str
+ stored_bytes:
+ description: The number of bytes stored.
+ returned: always
+ type: str
+ kms_key_id:
+ description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
+ returned: always
+ type: str
+'''
+
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def describe_log_group(client, log_group_name, module):
+ params = {}
+ if log_group_name:
+ params['logGroupNamePrefix'] = log_group_name
+ try:
+ paginator = client.get_paginator('describe_log_groups')
+ desc_log_group = paginator.paginate(**params).build_full_result()
+ return desc_log_group
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)),
+ exception=traceback.format_exc())
+
+
+def main():
+ argument_spec = dict(
+ log_group_name=dict(),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'cloudwatchlogs_log_group_facts':
+ module.deprecate("The 'cloudwatchlogs_log_group_facts' module has been renamed to 'cloudwatchlogs_log_group_info'",
+ date='2021-12-01', collection_name='community.aws')
+
+ try:
+ logs = module.client('logs')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ desc_log_group = describe_log_group(client=logs,
+ log_group_name=module.params['log_group_name'],
+ module=module)
+ final_log_group_snake = []
+
+ for log_group in desc_log_group['logGroups']:
+ final_log_group_snake.append(camel_dict_to_snake_dict(log_group))
+
+ desc_log_group_result = dict(changed=False, log_groups=final_log_group_snake)
+ module.exit_json(**desc_log_group_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudwatchlogs_log_group_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudwatchlogs_log_group_info.py
new file mode 100644
index 00000000..153aac7b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudwatchlogs_log_group_info.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cloudwatchlogs_log_group_info
+version_added: 1.0.0
+short_description: Get information about log_group in CloudWatchLogs
+description:
+ - Lists the specified log groups. You can list all your log groups or filter the results by prefix.
+ - This module was called C(cloudwatchlogs_log_group_facts) before Ansible 2.9. The usage did not change.
+author:
+ - Willian Ricardo (@willricardo) <willricardo@gmail.com>
+requirements: [ botocore, boto3 ]
+options:
+ log_group_name:
+ description:
+ - The name or prefix of the log group to filter by.
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+- community.aws.cloudwatchlogs_log_group_info:
+ log_group_name: test-log-group
+'''
+
+RETURN = '''
+log_groups:
+ description: Return the list of complex objects representing log groups
+ returned: success
+ type: complex
+ contains:
+ log_group_name:
+ description: The name of the log group.
+ returned: always
+ type: str
+ creation_time:
+ description: The creation time of the log group.
+ returned: always
+ type: int
+ retention_in_days:
+ description: The number of days to retain the log events in the specified log group.
+ returned: always
+ type: int
+ metric_filter_count:
+ description: The number of metric filters.
+ returned: always
+ type: int
+ arn:
+ description: The Amazon Resource Name (ARN) of the log group.
+ returned: always
+ type: str
+ stored_bytes:
+ description: The number of bytes stored.
+ returned: always
+ type: str
+ kms_key_id:
+ description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
+ returned: always
+ type: str
+'''
+
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def describe_log_group(client, log_group_name, module):
+ params = {}
+ if log_group_name:
+ params['logGroupNamePrefix'] = log_group_name
+ try:
+ paginator = client.get_paginator('describe_log_groups')
+ desc_log_group = paginator.paginate(**params).build_full_result()
+ return desc_log_group
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)),
+ exception=traceback.format_exc())
+
+
+def main():
+ argument_spec = dict(
+ log_group_name=dict(),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'cloudwatchlogs_log_group_facts':
+ module.deprecate("The 'cloudwatchlogs_log_group_facts' module has been renamed to 'cloudwatchlogs_log_group_info'",
+ date='2021-12-01', collection_name='community.aws')
+
+ try:
+ logs = module.client('logs')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ desc_log_group = describe_log_group(client=logs,
+ log_group_name=module.params['log_group_name'],
+ module=module)
+ final_log_group_snake = []
+
+ for log_group in desc_log_group['logGroups']:
+ final_log_group_snake.append(camel_dict_to_snake_dict(log_group))
+
+ desc_log_group_result = dict(changed=False, log_groups=final_log_group_snake)
+ module.exit_json(**desc_log_group_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudwatchlogs_log_group_metric_filter.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudwatchlogs_log_group_metric_filter.py
new file mode 100644
index 00000000..a05c7fe2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/cloudwatchlogs_log_group_metric_filter.py
@@ -0,0 +1,222 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cloudwatchlogs_log_group_metric_filter
+version_added: 1.0.0
+author:
+ - "Markus Bergholz (@markuman)"
+short_description: Manage CloudWatch log group metric filter
+description:
+ - Create, modify and delete CloudWatch log group metric filter.
+ - CloudWatch log group metric filter can be use with M(community.aws.ec2_metric_alarm).
+requirements:
+ - boto3
+ - botocore
+options:
+ state:
+ description:
+ - Whether the rule is present or absent.
+ choices: ["present", "absent"]
+ required: true
+ type: str
+ log_group_name:
+ description:
+ - The name of the log group where the metric filter is applied on.
+ required: true
+ type: str
+ filter_name:
+ description:
+ - A name for the metric filter you create.
+ required: true
+ type: str
+ filter_pattern:
+ description:
+ - A filter pattern for extracting metric data out of ingested log events. Required when I(state=present).
+ type: str
+ metric_transformation:
+ description:
+ - A collection of information that defines how metric data gets emitted. Required when I(state=present).
+ type: dict
+ suboptions:
+ metric_name:
+ description:
+ - The name of the cloudWatch metric.
+ type: str
+ metric_namespace:
+ description:
+ - The namespace of the cloudWatch metric.
+ type: str
+ metric_value:
+ description:
+ - The value to publish to the cloudWatch metric when a filter pattern matches a log event.
+ type: str
+ default_value:
+ description:
+ - The value to emit when a filter pattern does not match a log event.
+ type: float
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: set metric filter on log group /fluentd/testcase
+ community.aws.cloudwatchlogs_log_group_metric_filter:
+ log_group_name: /fluentd/testcase
+ filter_name: BoxFreeStorage
+ filter_pattern: '{($.value = *) && ($.hostname = "box")}'
+ state: present
+ metric_transformation:
+ metric_name: box_free_space
+ metric_namespace: fluentd_metrics
+ metric_value: "$.value"
+
+- name: delete metric filter on log group /fluentd/testcase
+ community.aws.cloudwatchlogs_log_group_metric_filter:
+ log_group_name: /fluentd/testcase
+ filter_name: BoxFreeStorage
+ state: absent
+'''
+
+RETURN = """
+metric_filters:
+ description: Return the origin response value
+ returned: success
+ type: list
+ sample: [
+ {
+ "default_value": 3.1415,
+ "metric_name": "box_free_space",
+ "metric_namespace": "made_with_ansible",
+ "metric_value": "$.value"
+ }
+ ]
+
+"""
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError, WaiterError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def metricTransformationHandler(metricTransformations, originMetricTransformations=None):
+
+ if originMetricTransformations:
+ change = False
+ originMetricTransformations = camel_dict_to_snake_dict(
+ originMetricTransformations)
+ for item in ["default_value", "metric_name", "metric_namespace", "metric_value"]:
+ if metricTransformations.get(item) != originMetricTransformations.get(item):
+ change = True
+ else:
+ change = True
+
+ defaultValue = metricTransformations.get("default_value")
+ if isinstance(defaultValue, int) or isinstance(defaultValue, float):
+ retval = [
+ {
+ 'metricName': metricTransformations.get("metric_name"),
+ 'metricNamespace': metricTransformations.get("metric_namespace"),
+ 'metricValue': metricTransformations.get("metric_value"),
+ 'defaultValue': defaultValue
+ }
+ ]
+ else:
+ retval = [
+ {
+ 'metricName': metricTransformations.get("metric_name"),
+ 'metricNamespace': metricTransformations.get("metric_namespace"),
+ 'metricValue': metricTransformations.get("metric_value"),
+ }
+ ]
+
+ return retval, change
+
+
+def main():
+
+ arg_spec = dict(
+ state=dict(type='str', required=True, choices=['present', 'absent']),
+ log_group_name=dict(type='str', required=True),
+ filter_name=dict(type='str', required=True),
+ filter_pattern=dict(type='str'),
+ metric_transformation=dict(type='dict', options=dict(
+ metric_name=dict(type='str'),
+ metric_namespace=dict(type='str'),
+ metric_value=dict(type='str'),
+ default_value=dict(type='float')
+ )),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True,
+ required_if=[('state', 'present', ['metric_transformation', 'filter_pattern'])]
+ )
+
+ log_group_name = module.params.get("log_group_name")
+ filter_name = module.params.get("filter_name")
+ filter_pattern = module.params.get("filter_pattern")
+ metric_transformation = module.params.get("metric_transformation")
+ state = module.params.get("state")
+
+ cwl = module.client('logs')
+
+ # check if metric filter exists
+ response = cwl.describe_metric_filters(
+ logGroupName=log_group_name,
+ filterNamePrefix=filter_name
+ )
+
+ if len(response.get("metricFilters")) == 1:
+ originMetricTransformations = response.get(
+ "metricFilters")[0].get("metricTransformations")[0]
+ originFilterPattern = response.get("metricFilters")[
+ 0].get("filterPattern")
+ else:
+ originMetricTransformations = None
+ originFilterPattern = None
+ change = False
+ metricTransformation = None
+
+ if state == "absent" and originMetricTransformations:
+ if not module.check_mode:
+ response = cwl.delete_metric_filter(
+ logGroupName=log_group_name,
+ filterName=filter_name
+ )
+ change = True
+ metricTransformation = [camel_dict_to_snake_dict(item) for item in [originMetricTransformations]]
+
+ elif state == "present":
+ metricTransformation, change = metricTransformationHandler(
+ metricTransformations=metric_transformation, originMetricTransformations=originMetricTransformations)
+
+ change = change or filter_pattern != originFilterPattern
+
+ if change:
+ if not module.check_mode:
+ response = cwl.put_metric_filter(
+ logGroupName=log_group_name,
+ filterName=filter_name,
+ filterPattern=filter_pattern,
+ metricTransformations=metricTransformation
+ )
+
+ metricTransformation = [camel_dict_to_snake_dict(item) for item in metricTransformation]
+
+ module.exit_json(changed=change, metric_filters=metricTransformation)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/data_pipeline.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/data_pipeline.py
new file mode 100644
index 00000000..2e49dcc6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/data_pipeline.py
@@ -0,0 +1,638 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: data_pipeline
+version_added: 1.0.0
+author:
+ - Raghu Udiyar (@raags) <raghusiddarth@gmail.com>
+ - Sloane Hertel (@s-hertel) <shertel@redhat.com>
+requirements: [ "boto3" ]
+short_description: Create and manage AWS Datapipelines
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+description:
+ - Create and manage AWS Datapipelines. Creation is not idempotent in AWS, so the C(uniqueId) is created by hashing the options (minus objects)
+ given to the datapipeline.
+ - The pipeline definition must be in the format given here
+ U(https://docs.aws.amazon.com/datapipeline/latest/APIReference/API_PutPipelineDefinition.html#API_PutPipelineDefinition_RequestSyntax).
+ - Operations will wait for a configurable amount of time to ensure the pipeline is in the requested state.
+options:
+ name:
+ description:
+ - The name of the Datapipeline to create/modify/delete.
+ required: true
+ type: str
+ description:
+ description:
+ - An optional description for the pipeline being created.
+ default: ''
+ type: str
+ objects:
+ type: list
+ elements: dict
+ description:
+ - A list of pipeline object definitions, each of which is a dict that takes the keys I(id), I(name) and I(fields).
+ suboptions:
+ id:
+ description:
+ - The ID of the object.
+ type: str
+ name:
+ description:
+ - The name of the object.
+ type: str
+ fields:
+ description:
+ - Key-value pairs that define the properties of the object.
+ - The value is specified as a reference to another object I(refValue) or as a string value I(stringValue)
+ but not as both.
+ type: list
+ elements: dict
+ suboptions:
+ key:
+ type: str
+ description:
+ - The field identifier.
+ stringValue:
+ type: str
+ description:
+ - The field value.
+ - Exactly one of I(stringValue) and I(refValue) may be specified.
+ refValue:
+ type: str
+ description:
+ - The field value, expressed as the identifier of another object.
+ - Exactly one of I(stringValue) and I(refValue) may be specified.
+ parameters:
+ description:
+ - A list of parameter objects (dicts) in the pipeline definition.
+ type: list
+ elements: dict
+ suboptions:
+ id:
+ description:
+ - The ID of the parameter object.
+ attributes:
+ description:
+ - A list of attributes (dicts) of the parameter object.
+ type: list
+ elements: dict
+ suboptions:
+ key:
+ description: The field identifier.
+ type: str
+ stringValue:
+ description: The field value.
+ type: str
+
+ values:
+ description:
+ - A list of parameter values (dicts) in the pipeline definition.
+ type: list
+ elements: dict
+ suboptions:
+ id:
+ description: The ID of the parameter value
+ type: str
+ stringValue:
+ description: The field value
+ type: str
+ timeout:
+ description:
+ - Time in seconds to wait for the pipeline to transition to the requested state, fail otherwise.
+ default: 300
+ type: int
+ state:
+ description:
+ - The requested state of the pipeline.
+ choices: ['present', 'absent', 'active', 'inactive']
+ default: present
+ type: str
+ tags:
+ description:
+ - A dict of key:value pair(s) to add to the pipeline.
+ type: dict
+ version:
+ description:
+ - The version option has never had any effect and will be removed after 2022-06-01.
+ type: str
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create pipeline
+- community.aws.data_pipeline:
+ name: test-dp
+ region: us-west-2
+ objects: "{{pipelineObjects}}"
+ parameters: "{{pipelineParameters}}"
+ values: "{{pipelineValues}}"
+ tags:
+ key1: val1
+ key2: val2
+ state: present
+
+# Example populating and activating a pipeline that demonstrates two ways of providing pipeline objects
+- community.aws.data_pipeline:
+ name: test-dp
+ objects:
+ - "id": "DefaultSchedule"
+ "name": "Every 1 day"
+ "fields":
+ - "key": "period"
+ "stringValue": "1 days"
+ - "key": "type"
+ "stringValue": "Schedule"
+ - "key": "startAt"
+ "stringValue": "FIRST_ACTIVATION_DATE_TIME"
+ - "id": "Default"
+ "name": "Default"
+ "fields": [ { "key": "resourceRole", "stringValue": "my_resource_role" },
+ { "key": "role", "stringValue": "DataPipelineDefaultRole" },
+ { "key": "pipelineLogUri", "stringValue": "s3://my_s3_log.txt" },
+ { "key": "scheduleType", "stringValue": "cron" },
+ { "key": "schedule", "refValue": "DefaultSchedule" },
+ { "key": "failureAndRerunMode", "stringValue": "CASCADE" } ]
+ state: active
+
+# Activate pipeline
+- community.aws.data_pipeline:
+ name: test-dp
+ region: us-west-2
+ state: active
+
+# Delete pipeline
+- community.aws.data_pipeline:
+ name: test-dp
+ region: us-west-2
+ state: absent
+
+'''
+
+RETURN = r'''
+changed:
+ description: whether the data pipeline has been modified
+ type: bool
+ returned: always
+ sample:
+ changed: true
+result:
+ description:
+ - Contains the data pipeline data (data_pipeline) and a return message (msg).
+ If the data pipeline exists data_pipeline will contain the keys description, name,
+ pipeline_id, state, tags, and unique_id. If the data pipeline does not exist then
+ data_pipeline will be an empty dict. The msg describes the status of the operation.
+ returned: always
+ type: dict
+'''
+
+import hashlib
+import json
+import time
+import traceback
+
+try:
+ import boto3
+ import botocore
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_text
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+DP_ACTIVE_STATES = ['ACTIVE', 'SCHEDULED']
+DP_INACTIVE_STATES = ['INACTIVE', 'PENDING', 'FINISHED', 'DELETING']
+DP_ACTIVATING_STATE = 'ACTIVATING'
+DP_DEACTIVATING_STATE = 'DEACTIVATING'
+PIPELINE_DOESNT_EXIST = '^.*Pipeline with id: {0} does not exist$'
+
+
+class DataPipelineNotFound(Exception):
+ pass
+
+
+class TimeOutException(Exception):
+ pass
+
+
+def pipeline_id(client, name):
+ """Return pipeline id for the given pipeline name
+
+ :param object client: boto3 datapipeline client
+ :param string name: pipeline name
+ :returns: pipeline id
+ :raises: DataPipelineNotFound
+
+ """
+ pipelines = client.list_pipelines()
+ for dp in pipelines['pipelineIdList']:
+ if dp['name'] == name:
+ return dp['id']
+ raise DataPipelineNotFound
+
+
+def pipeline_description(client, dp_id):
+ """Return pipeline description list
+
+ :param object client: boto3 datapipeline client
+ :returns: pipeline description dictionary
+ :raises: DataPipelineNotFound
+
+ """
+ try:
+ return client.describe_pipelines(pipelineIds=[dp_id])
+ except ClientError as e:
+ raise DataPipelineNotFound
+
+
+def pipeline_field(client, dp_id, field):
+ """Return a pipeline field from the pipeline description.
+
+ The available fields are listed in describe_pipelines output.
+
+ :param object client: boto3 datapipeline client
+ :param string dp_id: pipeline id
+ :param string field: pipeline description field
+ :returns: pipeline field information
+
+ """
+ dp_description = pipeline_description(client, dp_id)
+ for field_key in dp_description['pipelineDescriptionList'][0]['fields']:
+ if field_key['key'] == field:
+ return field_key['stringValue']
+ raise KeyError("Field key {0} not found!".format(field))
+
+
+def run_with_timeout(timeout, func, *func_args, **func_kwargs):
+ """Run func with the provided args and kwargs, and wait until
+ timeout for truthy return value
+
+ :param int timeout: time to wait for status
+ :param function func: function to run, should return True or False
+ :param args func_args: function args to pass to func
+ :param kwargs func_kwargs: function key word args
+ :returns: True if func returns truthy within timeout
+ :raises: TimeOutException
+
+ """
+
+ for count in range(timeout // 10):
+ if func(*func_args, **func_kwargs):
+ return True
+ else:
+ # check every 10s
+ time.sleep(10)
+
+ raise TimeOutException
+
+
+def check_dp_exists(client, dp_id):
+ """Check if datapipeline exists
+
+ :param object client: boto3 datapipeline client
+ :param string dp_id: pipeline id
+ :returns: True or False
+
+ """
+ try:
+ # pipeline_description raises DataPipelineNotFound
+ if pipeline_description(client, dp_id):
+ return True
+ else:
+ return False
+ except DataPipelineNotFound:
+ return False
+
+
+def check_dp_status(client, dp_id, status):
+ """Checks if datapipeline matches states in status list
+
+ :param object client: boto3 datapipeline client
+ :param string dp_id: pipeline id
+ :param list status: list of states to check against
+ :returns: True or False
+
+ """
+ if not isinstance(status, list):
+ raise AssertionError()
+ if pipeline_field(client, dp_id, field="@pipelineState") in status:
+ return True
+ else:
+ return False
+
+
+def pipeline_status_timeout(client, dp_id, status, timeout):
+ args = (client, dp_id, status)
+ return run_with_timeout(timeout, check_dp_status, *args)
+
+
+def pipeline_exists_timeout(client, dp_id, timeout):
+ args = (client, dp_id)
+ return run_with_timeout(timeout, check_dp_exists, *args)
+
+
+def activate_pipeline(client, module):
+ """Activates pipeline
+
+ """
+ dp_name = module.params.get('name')
+ timeout = module.params.get('timeout')
+
+ try:
+ dp_id = pipeline_id(client, dp_name)
+ except DataPipelineNotFound:
+ module.fail_json(msg='Data Pipeline {0} not found'.format(dp_name))
+
+ if pipeline_field(client, dp_id, field="@pipelineState") in DP_ACTIVE_STATES:
+ changed = False
+ else:
+ try:
+ client.activate_pipeline(pipelineId=dp_id)
+ except ClientError as e:
+ if e.response["Error"]["Code"] == "InvalidRequestException":
+ module.fail_json(msg="You need to populate your pipeline before activation.")
+ try:
+ pipeline_status_timeout(client, dp_id, status=DP_ACTIVE_STATES,
+ timeout=timeout)
+ except TimeOutException:
+ if pipeline_field(client, dp_id, field="@pipelineState") == "FINISHED":
+ # activated but completed more rapidly than it was checked
+ pass
+ else:
+ module.fail_json(msg=('Data Pipeline {0} failed to activate '
+ 'within timeout {1} seconds').format(dp_name, timeout))
+ changed = True
+
+ data_pipeline = get_result(client, dp_id)
+ result = {'data_pipeline': data_pipeline,
+ 'msg': 'Data Pipeline {0} activated.'.format(dp_name)}
+
+ return (changed, result)
+
+
+def deactivate_pipeline(client, module):
+ """Deactivates pipeline
+
+ """
+ dp_name = module.params.get('name')
+ timeout = module.params.get('timeout')
+
+ try:
+ dp_id = pipeline_id(client, dp_name)
+ except DataPipelineNotFound:
+ module.fail_json(msg='Data Pipeline {0} not found'.format(dp_name))
+
+ if pipeline_field(client, dp_id, field="@pipelineState") in DP_INACTIVE_STATES:
+ changed = False
+ else:
+ client.deactivate_pipeline(pipelineId=dp_id)
+ try:
+ pipeline_status_timeout(client, dp_id, status=DP_INACTIVE_STATES,
+ timeout=timeout)
+ except TimeOutException:
+ module.fail_json(msg=('Data Pipeline {0} failed to deactivate'
+ 'within timeout {1} seconds').format(dp_name, timeout))
+ changed = True
+
+ data_pipeline = get_result(client, dp_id)
+ result = {'data_pipeline': data_pipeline,
+ 'msg': 'Data Pipeline {0} deactivated.'.format(dp_name)}
+
+ return (changed, result)
+
+
+def _delete_dp_with_check(dp_id, client, timeout):
+ client.delete_pipeline(pipelineId=dp_id)
+ try:
+ pipeline_status_timeout(client=client, dp_id=dp_id, status=[PIPELINE_DOESNT_EXIST], timeout=timeout)
+ except DataPipelineNotFound:
+ return True
+
+
+def delete_pipeline(client, module):
+ """Deletes pipeline
+
+ """
+ dp_name = module.params.get('name')
+ timeout = module.params.get('timeout')
+
+ try:
+ dp_id = pipeline_id(client, dp_name)
+ _delete_dp_with_check(dp_id, client, timeout)
+ changed = True
+ except DataPipelineNotFound:
+ changed = False
+ except TimeOutException:
+ module.fail_json(msg=('Data Pipeline {0} failed to delete'
+ 'within timeout {1} seconds').format(dp_name, timeout))
+ result = {'data_pipeline': {},
+ 'msg': 'Data Pipeline {0} deleted'.format(dp_name)}
+
+ return (changed, result)
+
+
+def build_unique_id(module):
+ data = dict(module.params)
+ # removing objects from the unique id so we can update objects or populate the pipeline after creation without needing to make a new pipeline
+ [data.pop(each, None) for each in ('objects', 'timeout')]
+ json_data = json.dumps(data, sort_keys=True).encode("utf-8")
+ hashed_data = hashlib.md5(json_data).hexdigest()
+ return hashed_data
+
+
+def format_tags(tags):
+ """ Reformats tags
+
+ :param dict tags: dict of data pipeline tags (e.g. {key1: val1, key2: val2, key3: val3})
+ :returns: list of dicts (e.g. [{key: key1, value: val1}, {key: key2, value: val2}, {key: key3, value: val3}])
+
+ """
+ return [dict(key=k, value=v) for k, v in tags.items()]
+
+
+def get_result(client, dp_id):
+ """ Get the current state of the data pipeline and reformat it to snake_case for exit_json
+
+ :param object client: boto3 datapipeline client
+ :param string dp_id: pipeline id
+ :returns: reformatted dict of pipeline description
+
+ """
+ # pipeline_description returns a pipelineDescriptionList of length 1
+ # dp is a dict with keys "description" (str), "fields" (list), "name" (str), "pipelineId" (str), "tags" (dict)
+ dp = pipeline_description(client, dp_id)['pipelineDescriptionList'][0]
+
+ # Get uniqueId and pipelineState in fields to add to the exit_json result
+ dp["unique_id"] = pipeline_field(client, dp_id, field="uniqueId")
+ dp["pipeline_state"] = pipeline_field(client, dp_id, field="@pipelineState")
+
+ # Remove fields; can't make a list snake_case and most of the data is redundant
+ del dp["fields"]
+
+ # Note: tags is already formatted fine so we don't need to do anything with it
+
+ # Reformat data pipeline and add reformatted fields back
+ dp = camel_dict_to_snake_dict(dp)
+ return dp
+
+
+def diff_pipeline(client, module, objects, unique_id, dp_name):
+ """Check if there's another pipeline with the same unique_id and if so, checks if the object needs to be updated
+ """
+ result = {}
+ changed = False
+ create_dp = False
+
+ # See if there is already a pipeline with the same unique_id
+ unique_id = build_unique_id(module)
+ try:
+ dp_id = pipeline_id(client, dp_name)
+ dp_unique_id = to_text(pipeline_field(client, dp_id, field="uniqueId"))
+ if dp_unique_id != unique_id:
+ # A change is expected but not determined. Updated to a bool in create_pipeline().
+ changed = "NEW_VERSION"
+ create_dp = True
+ # Unique ids are the same - check if pipeline needs modification
+ else:
+ dp_objects = client.get_pipeline_definition(pipelineId=dp_id)['pipelineObjects']
+ # Definition needs to be updated
+ if dp_objects != objects:
+ changed, msg = define_pipeline(client, module, objects, dp_id)
+ # No changes
+ else:
+ msg = 'Data Pipeline {0} is present'.format(dp_name)
+ data_pipeline = get_result(client, dp_id)
+ result = {'data_pipeline': data_pipeline,
+ 'msg': msg}
+ except DataPipelineNotFound:
+ create_dp = True
+
+ return create_dp, changed, result
+
+
+def define_pipeline(client, module, objects, dp_id):
+ """Puts pipeline definition
+
+ """
+ dp_name = module.params.get('name')
+
+ if pipeline_field(client, dp_id, field="@pipelineState") == "FINISHED":
+ msg = 'Data Pipeline {0} is unable to be updated while in state FINISHED.'.format(dp_name)
+ changed = False
+
+ elif objects:
+ parameters = module.params.get('parameters')
+ values = module.params.get('values')
+
+ try:
+ client.put_pipeline_definition(pipelineId=dp_id,
+ pipelineObjects=objects,
+ parameterObjects=parameters,
+ parameterValues=values)
+ msg = 'Data Pipeline {0} has been updated.'.format(dp_name)
+ changed = True
+ except ClientError as e:
+ module.fail_json(msg="Failed to put the definition for pipeline {0}. Check that string/reference fields"
+ "are not empty and that the number of objects in the pipeline does not exceed maximum allowed"
+ "objects".format(dp_name), exception=traceback.format_exc())
+ else:
+ changed = False
+ msg = ""
+
+ return changed, msg
+
+
+def create_pipeline(client, module):
+ """Creates datapipeline. Uses uniqueId to achieve idempotency.
+
+ """
+ dp_name = module.params.get('name')
+ objects = module.params.get('objects', None)
+ description = module.params.get('description', '')
+ tags = module.params.get('tags')
+ timeout = module.params.get('timeout')
+
+ unique_id = build_unique_id(module)
+ create_dp, changed, result = diff_pipeline(client, module, objects, unique_id, dp_name)
+
+ if changed == "NEW_VERSION":
+ # delete old version
+ changed, creation_result = delete_pipeline(client, module)
+
+ # There isn't a pipeline or it has different parameters than the pipeline in existence.
+ if create_dp:
+ # Make pipeline
+ try:
+ tags = format_tags(tags)
+ dp = client.create_pipeline(name=dp_name,
+ uniqueId=unique_id,
+ description=description,
+ tags=tags)
+ dp_id = dp['pipelineId']
+ pipeline_exists_timeout(client, dp_id, timeout)
+ except ClientError as e:
+ module.fail_json(msg="Failed to create the data pipeline {0}.".format(dp_name), exception=traceback.format_exc())
+ except TimeOutException:
+ module.fail_json(msg=('Data Pipeline {0} failed to create'
+ 'within timeout {1} seconds').format(dp_name, timeout))
+ # Put pipeline definition
+ changed, msg = define_pipeline(client, module, objects, dp_id)
+
+ changed = True
+ data_pipeline = get_result(client, dp_id)
+ result = {'data_pipeline': data_pipeline,
+ 'msg': 'Data Pipeline {0} created.'.format(dp_name) + msg}
+
+ return (changed, result)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ version=dict(removed_at_date='2022-06-01', removed_from_collection='community.aws'),
+ description=dict(required=False, default=''),
+ objects=dict(required=False, type='list', default=[], elements='dict'),
+ parameters=dict(required=False, type='list', default=[], elements='dict'),
+ timeout=dict(required=False, type='int', default=300),
+ state=dict(default='present', choices=['present', 'absent',
+ 'active', 'inactive']),
+ tags=dict(required=False, type='dict', default={}),
+ values=dict(required=False, type='list', default=[], elements='dict'),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ try:
+ client = module.client('datapipeline')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ state = module.params.get('state')
+ if state == 'present':
+ changed, result = create_pipeline(client, module)
+ elif state == 'absent':
+ changed, result = delete_pipeline(client, module)
+ elif state == 'active':
+ changed, result = activate_pipeline(client, module)
+ elif state == 'inactive':
+ changed, result = deactivate_pipeline(client, module)
+
+ module.exit_json(result=result, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/dms_endpoint.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/dms_endpoint.py
new file mode 100644
index 00000000..829aae27
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/dms_endpoint.py
@@ -0,0 +1,468 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dms_endpoint
+version_added: 1.0.0
+short_description: Creates or destroys a data migration services endpoint
+description:
+ - Creates or destroys a data migration services endpoint,
+ that can be used to replicate data.
+options:
+ state:
+ description:
+ - State of the endpoint.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ endpointidentifier:
+ description:
+ - An identifier name for the endpoint.
+ type: str
+ required: true
+ endpointtype:
+ description:
+ - Type of endpoint we want to manage.
+ choices: ['source', 'target']
+ type: str
+ required: true
+ enginename:
+ description:
+ - Database engine that we want to use, please refer to
+ the AWS DMS for more information on the supported
+ engines and their limitations.
+ choices: ['mysql', 'oracle', 'postgres', 'mariadb', 'aurora',
+ 'redshift', 's3', 'db2', 'azuredb', 'sybase',
+ 'dynamodb', 'mongodb', 'sqlserver']
+ type: str
+ required: true
+ username:
+ description:
+ - Username our endpoint will use to connect to the database.
+ type: str
+ password:
+ description:
+ - Password used to connect to the database
+ this attribute can only be written
+ the AWS API does not return this parameter.
+ type: str
+ servername:
+ description:
+ - Servername that the endpoint will connect to.
+ type: str
+ port:
+ description:
+ - TCP port for access to the database.
+ type: int
+ databasename:
+ description:
+ - Name for the database on the origin or target side.
+ type: str
+ extraconnectionattributes:
+ description:
+ - Extra attributes for the database connection, the AWS documentation
+ states " For more information about extra connection attributes,
+ see the documentation section for your data store."
+ type: str
+ kmskeyid:
+ description:
+ - Encryption key to use to encrypt replication storage and
+ connection information.
+ type: str
+ tags:
+ description:
+ - A list of tags to add to the endpoint.
+ type: dict
+ certificatearn:
+ description:
+ - Amazon Resource Name (ARN) for the certificate.
+ type: str
+ sslmode:
+ description:
+ - Mode used for the SSL connection.
+ default: none
+ choices: ['none', 'require', 'verify-ca', 'verify-full']
+ type: str
+ serviceaccessrolearn:
+ description:
+ - Amazon Resource Name (ARN) for the service access role that you
+ want to use to create the endpoint.
+ type: str
+ externaltabledefinition:
+ description:
+ - The external table definition.
+ type: str
+ dynamodbsettings:
+ description:
+ - Settings in JSON format for the target Amazon DynamoDB endpoint
+ if source or target is dynamodb.
+ type: dict
+ s3settings:
+ description:
+ - S3 buckets settings for the target Amazon S3 endpoint.
+ type: dict
+ dmstransfersettings:
+ description:
+ - The settings in JSON format for the DMS transfer type of
+ source endpoint.
+ type: dict
+ mongodbsettings:
+ description:
+ - Settings in JSON format for the source MongoDB endpoint.
+ type: dict
+ kinesissettings:
+ description:
+ - Settings in JSON format for the target Amazon Kinesis
+ Data Streams endpoint.
+ type: dict
+ elasticsearchsettings:
+ description:
+ - Settings in JSON format for the target Elasticsearch endpoint.
+ type: dict
+ wait:
+ description:
+ - Whether Ansible should wait for the object to be deleted when I(state=absent).
+ type: bool
+ default: false
+ timeout:
+ description:
+ - Time in seconds we should wait for when deleting a resource.
+ - Required when I(wait=true).
+ type: int
+ retries:
+ description:
+ - number of times we should retry when deleting a resource
+ - Required when I(wait=true).
+ type: int
+author:
+ - "Rui Moreira (@ruimoreira)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details
+- name: Endpoint Creation
+ community.aws.dms_endpoint:
+ state: absent
+ endpointidentifier: 'testsource'
+ endpointtype: source
+ enginename: aurora
+ username: testing1
+ password: testint1234
+ servername: testing.domain.com
+ port: 3306
+ databasename: 'testdb'
+ sslmode: none
+ wait: false
+'''
+
+RETURN = ''' # '''
+
+import traceback
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+backoff_params = dict(tries=5, delay=1, backoff=1.5)
+
+
+@AWSRetry.backoff(**backoff_params)
+def describe_endpoints(connection, endpoint_identifier):
+ """ checks if the endpoint exists """
+ try:
+ endpoint_filter = dict(Name='endpoint-id',
+ Values=[endpoint_identifier])
+ return connection.describe_endpoints(Filters=[endpoint_filter])
+ except botocore.exceptions.ClientError:
+ return {'Endpoints': []}
+
+
+@AWSRetry.backoff(**backoff_params)
+def dms_delete_endpoint(client, **params):
+ """deletes the DMS endpoint based on the EndpointArn"""
+ if module.params.get('wait'):
+ return delete_dms_endpoint(client)
+ else:
+ return client.delete_endpoint(**params)
+
+
+@AWSRetry.backoff(**backoff_params)
+def dms_create_endpoint(client, **params):
+ """ creates the DMS endpoint"""
+ return client.create_endpoint(**params)
+
+
+@AWSRetry.backoff(**backoff_params)
+def dms_modify_endpoint(client, **params):
+ """ updates the endpoint"""
+ return client.modify_endpoint(**params)
+
+
+@AWSRetry.backoff(**backoff_params)
+def get_endpoint_deleted_waiter(client):
+ return client.get_waiter('endpoint_deleted')
+
+
+def endpoint_exists(endpoint):
+ """ Returns boolean based on the existence of the endpoint
+ :param endpoint: dict containing the described endpoint
+ :return: bool
+ """
+ return bool(len(endpoint['Endpoints']))
+
+
+def delete_dms_endpoint(connection):
+ try:
+ endpoint = describe_endpoints(connection,
+ module.params.get('endpointidentifier'))
+ endpoint_arn = endpoint['Endpoints'][0].get('EndpointArn')
+ delete_arn = dict(
+ EndpointArn=endpoint_arn
+ )
+ if module.params.get('wait'):
+
+ delete_output = connection.delete_endpoint(**delete_arn)
+ delete_waiter = get_endpoint_deleted_waiter(connection)
+ delete_waiter.wait(
+ Filters=[{
+ 'Name': 'endpoint-arn',
+ 'Values': [endpoint_arn]
+
+ }],
+ WaiterConfig={
+ 'Delay': module.params.get('timeout'),
+ 'MaxAttempts': module.params.get('retries')
+ }
+ )
+ return delete_output
+ else:
+ return connection.delete_endpoint(**delete_arn)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Failed to delete the DMS endpoint.",
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json(msg="Failed to delete the DMS endpoint.",
+ exception=traceback.format_exc())
+
+
+def create_module_params():
+ """
+ Reads the module parameters and returns a dict
+ :return: dict
+ """
+ endpoint_parameters = dict(
+ EndpointIdentifier=module.params.get('endpointidentifier'),
+ EndpointType=module.params.get('endpointtype'),
+ EngineName=module.params.get('enginename'),
+ Username=module.params.get('username'),
+ Password=module.params.get('password'),
+ ServerName=module.params.get('servername'),
+ Port=module.params.get('port'),
+ DatabaseName=module.params.get('databasename'),
+ SslMode=module.params.get('sslmode')
+ )
+ if module.params.get('EndpointArn'):
+ endpoint_parameters['EndpointArn'] = module.params.get('EndpointArn')
+ if module.params.get('certificatearn'):
+ endpoint_parameters['CertificateArn'] = \
+ module.params.get('certificatearn')
+
+ if module.params.get('dmstransfersettings'):
+ endpoint_parameters['DmsTransferSettings'] = \
+ module.params.get('dmstransfersettings')
+
+ if module.params.get('extraconnectionattributes'):
+ endpoint_parameters['ExtraConnectionAttributes'] =\
+ module.params.get('extraconnectionattributes')
+
+ if module.params.get('kmskeyid'):
+ endpoint_parameters['KmsKeyId'] = module.params.get('kmskeyid')
+
+ if module.params.get('tags'):
+ endpoint_parameters['Tags'] = module.params.get('tags')
+
+ if module.params.get('serviceaccessrolearn'):
+ endpoint_parameters['ServiceAccessRoleArn'] = \
+ module.params.get('serviceaccessrolearn')
+
+ if module.params.get('externaltabledefinition'):
+ endpoint_parameters['ExternalTableDefinition'] = \
+ module.params.get('externaltabledefinition')
+
+ if module.params.get('dynamodbsettings'):
+ endpoint_parameters['DynamoDbSettings'] = \
+ module.params.get('dynamodbsettings')
+
+ if module.params.get('s3settings'):
+ endpoint_parameters['S3Settings'] = module.params.get('s3settings')
+
+ if module.params.get('mongodbsettings'):
+ endpoint_parameters['MongoDbSettings'] = \
+ module.params.get('mongodbsettings')
+
+ if module.params.get('kinesissettings'):
+ endpoint_parameters['KinesisSettings'] = \
+ module.params.get('kinesissettings')
+
+ if module.params.get('elasticsearchsettings'):
+ endpoint_parameters['ElasticsearchSettings'] = \
+ module.params.get('elasticsearchsettings')
+
+ if module.params.get('wait'):
+ endpoint_parameters['wait'] = module.boolean(module.params.get('wait'))
+
+ if module.params.get('timeout'):
+ endpoint_parameters['timeout'] = module.params.get('timeout')
+
+ if module.params.get('retries'):
+ endpoint_parameters['retries'] = module.params.get('retries')
+
+ return endpoint_parameters
+
+
+def compare_params(param_described):
+ """
+ Compares the dict obtained from the describe DMS endpoint and
+ what we are reading from the values in the template We can
+ never compare the password as boto3's method for describing
+ a DMS endpoint does not return the value for
+ the password for security reasons ( I assume )
+ """
+ modparams = create_module_params()
+ changed = False
+ for paramname in modparams:
+ if paramname == 'Password' or paramname in param_described \
+ and param_described[paramname] == modparams[paramname] or \
+ str(param_described[paramname]).lower() \
+ == modparams[paramname]:
+ pass
+ else:
+ changed = True
+ return changed
+
+
+def modify_dms_endpoint(connection):
+
+ try:
+ params = create_module_params()
+ return dms_modify_endpoint(connection, **params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Failed to update DMS endpoint.",
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json(msg="Failed to update DMS endpoint.",
+ exception=traceback.format_exc())
+
+
+def create_dms_endpoint(connection):
+ """
+ Function to create the dms endpoint
+ :param connection: boto3 aws connection
+ :return: information about the dms endpoint object
+ """
+
+ try:
+ params = create_module_params()
+ return dms_create_endpoint(connection, **params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Failed to create DMS endpoint.",
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json(msg="Failed to create DMS endpoint.",
+ exception=traceback.format_exc())
+
+
+def main():
+ argument_spec = dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ endpointidentifier=dict(required=True),
+ endpointtype=dict(choices=['source', 'target'], required=True),
+ enginename=dict(choices=['mysql', 'oracle', 'postgres', 'mariadb',
+ 'aurora', 'redshift', 's3', 'db2', 'azuredb',
+ 'sybase', 'dynamodb', 'mongodb', 'sqlserver'],
+ required=True),
+ username=dict(),
+ password=dict(no_log=True),
+ servername=dict(),
+ port=dict(type='int'),
+ databasename=dict(),
+ extraconnectionattributes=dict(),
+ kmskeyid=dict(),
+ tags=dict(type='dict'),
+ certificatearn=dict(),
+ sslmode=dict(choices=['none', 'require', 'verify-ca', 'verify-full'],
+ default='none'),
+ serviceaccessrolearn=dict(),
+ externaltabledefinition=dict(),
+ dynamodbsettings=dict(type='dict'),
+ s3settings=dict(type='dict'),
+ dmstransfersettings=dict(type='dict'),
+ mongodbsettings=dict(type='dict'),
+ kinesissettings=dict(type='dict'),
+ elasticsearchsettings=dict(type='dict'),
+ wait=dict(type='bool', default=False),
+ timeout=dict(type='int'),
+ retries=dict(type='int')
+ )
+ global module
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ["state", "absent", ["wait"]],
+ ["wait", "True", ["timeout"]],
+ ["wait", "True", ["retries"]],
+ ],
+ supports_check_mode=False
+ )
+ exit_message = None
+ changed = False
+
+ state = module.params.get('state')
+
+ dmsclient = module.client('dms')
+ endpoint = describe_endpoints(dmsclient,
+ module.params.get('endpointidentifier'))
+ if state == 'present':
+ if endpoint_exists(endpoint):
+ module.params['EndpointArn'] = \
+ endpoint['Endpoints'][0].get('EndpointArn')
+ params_changed = compare_params(endpoint["Endpoints"][0])
+ if params_changed:
+ updated_dms = modify_dms_endpoint(dmsclient)
+ exit_message = updated_dms
+ changed = True
+ else:
+ module.exit_json(changed=False, msg="Endpoint Already Exists")
+ else:
+ dms_properties = create_dms_endpoint(dmsclient)
+ exit_message = dms_properties
+ changed = True
+ elif state == 'absent':
+ if endpoint_exists(endpoint):
+ delete_results = delete_dms_endpoint(dmsclient)
+ exit_message = delete_results
+ changed = True
+ else:
+ changed = False
+ exit_message = 'DMS Endpoint does not exist'
+
+ module.exit_json(changed=changed, msg=exit_message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/dms_replication_subnet_group.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/dms_replication_subnet_group.py
new file mode 100644
index 00000000..5aa633b4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/dms_replication_subnet_group.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dms_replication_subnet_group
+version_added: 1.0.0
+short_description: creates or destroys a data migration services subnet group
+description:
+ - Creates or destroys a data migration services subnet group.
+options:
+ state:
+ description:
+ - State of the subnet group.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ identifier:
+ description:
+ - The name for the replication subnet group.
+ This value is stored as a lowercase string.
+ Must contain no more than 255 alphanumeric characters,
+ periods, spaces, underscores, or hyphens. Must not be "default".
+ type: str
+ required: true
+ description:
+ description:
+ - The description for the subnet group.
+ type: str
+ required: true
+ subnet_ids:
+ description:
+ - A list containing the subnet ids for the replication subnet group,
+ needs to be at least 2 items in the list.
+ type: list
+ elements: str
+ required: true
+author:
+ - "Rui Moreira (@ruimoreira)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- community.aws.dms_replication_subnet_group:
+ state: present
+ identifier: "dev-sngroup"
+ description: "Development Subnet Group asdasdas"
+ subnet_ids: ['subnet-id1','subnet-id2']
+'''
+
+RETURN = ''' # '''
+
+import traceback
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+backoff_params = dict(tries=5, delay=1, backoff=1.5)
+
+
+@AWSRetry.backoff(**backoff_params)
+def describe_subnet_group(connection, subnet_group):
+ """checks if instance exists"""
+ try:
+ subnet_group_filter = dict(Name='replication-subnet-group-id',
+ Values=[subnet_group])
+ return connection.describe_replication_subnet_groups(Filters=[subnet_group_filter])
+ except botocore.exceptions.ClientError:
+ return {'ReplicationSubnetGroups': []}
+
+
+@AWSRetry.backoff(**backoff_params)
+def replication_subnet_group_create(connection, **params):
+ """ creates the replication subnet group """
+ return connection.create_replication_subnet_group(**params)
+
+
+@AWSRetry.backoff(**backoff_params)
+def replication_subnet_group_modify(connection, **modify_params):
+ return connection.modify_replication_subnet_group(**modify_params)
+
+
+@AWSRetry.backoff(**backoff_params)
+def replication_subnet_group_delete(module, connection):
+ subnetid = module.params.get('identifier')
+ delete_parameters = dict(ReplicationSubnetGroupIdentifier=subnetid)
+ return connection.delete_replication_subnet_group(**delete_parameters)
+
+
+def replication_subnet_exists(subnet):
+ """ Returns boolean based on the existence of the endpoint
+ :param endpoint: dict containing the described endpoint
+ :return: bool
+ """
+ return bool(len(subnet['ReplicationSubnetGroups']))
+
+
+def create_module_params(module):
+ """
+ Reads the module parameters and returns a dict
+ :return: dict
+ """
+ instance_parameters = dict(
+ # ReplicationSubnetGroupIdentifier gets translated to lower case anyway by the API
+ ReplicationSubnetGroupIdentifier=module.params.get('identifier').lower(),
+ ReplicationSubnetGroupDescription=module.params.get('description'),
+ SubnetIds=module.params.get('subnet_ids'),
+ )
+
+ return instance_parameters
+
+
+def compare_params(module, param_described):
+ """
+ Compares the dict obtained from the describe function and
+ what we are reading from the values in the template We can
+ never compare passwords as boto3's method for describing
+ a DMS endpoint does not return the value for
+ the password for security reasons ( I assume )
+ """
+ modparams = create_module_params(module)
+ changed = False
+ # need to sanitize values that get returned from the API
+ if 'VpcId' in param_described.keys():
+ param_described.pop('VpcId')
+ if 'SubnetGroupStatus' in param_described.keys():
+ param_described.pop('SubnetGroupStatus')
+ for paramname in modparams.keys():
+ if paramname in param_described.keys() and \
+ param_described.get(paramname) == modparams[paramname]:
+ pass
+ elif paramname == 'SubnetIds':
+ subnets = []
+ for subnet in param_described.get('Subnets'):
+ subnets.append(subnet.get('SubnetIdentifier'))
+ for modulesubnet in modparams['SubnetIds']:
+ if modulesubnet in subnets:
+ pass
+ else:
+ changed = True
+ return changed
+
+
+def create_replication_subnet_group(module, connection):
+ try:
+ params = create_module_params(module)
+ return replication_subnet_group_create(connection, **params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Failed to create DMS replication subnet group.",
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json(msg="Failed to create DMS replication subnet group.",
+ exception=traceback.format_exc())
+
+
+def modify_replication_subnet_group(module, connection):
+ try:
+ modify_params = create_module_params(module)
+ return replication_subnet_group_modify(connection, **modify_params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Failed to Modify the DMS replication subnet group.",
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json(msg="Failed to Modify the DMS replication subnet group.",
+ exception=traceback.format_exc())
+
+
+def main():
+ argument_spec = dict(
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ identifier=dict(type='str', required=True),
+ description=dict(type='str', required=True),
+ subnet_ids=dict(type='list', elements='str', required=True),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+ exit_message = None
+ changed = False
+
+ state = module.params.get('state')
+ dmsclient = module.client('dms')
+ subnet_group = describe_subnet_group(dmsclient,
+ module.params.get('identifier'))
+ if state == 'present':
+ if replication_subnet_exists(subnet_group):
+ if compare_params(module, subnet_group["ReplicationSubnetGroups"][0]):
+ if not module.check_mode:
+ exit_message = modify_replication_subnet_group(module, dmsclient)
+ else:
+ exit_message = dmsclient
+ changed = True
+ else:
+ exit_message = "No changes to Subnet group"
+ else:
+ if not module.check_mode:
+ exit_message = create_replication_subnet_group(module, dmsclient)
+ changed = True
+ else:
+ exit_message = "Check mode enabled"
+
+ elif state == 'absent':
+ if replication_subnet_exists(subnet_group):
+ if not module.check_mode:
+ replication_subnet_group_delete(module, dmsclient)
+ changed = True
+ exit_message = "Replication subnet group Deleted"
+ else:
+ exit_message = dmsclient
+ changed = True
+
+ else:
+ changed = False
+ exit_message = "Replication subnet group does not exist"
+
+ module.exit_json(changed=changed, msg=exit_message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/dynamodb_table.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/dynamodb_table.py
new file mode 100644
index 00000000..35d9cd4d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/dynamodb_table.py
@@ -0,0 +1,512 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: dynamodb_table
+version_added: 1.0.0
+short_description: Create, update or delete AWS Dynamo DB tables
+description:
+ - Create or delete AWS Dynamo DB tables.
+ - Can update the provisioned throughput on existing tables.
+ - Returns the status of the specified table.
+author: Alan Loi (@loia)
+requirements:
+ - "boto >= 2.37.0"
+ - "boto3 >= 1.4.4 (for tagging)"
+options:
+ state:
+ description:
+ - Create or delete the table.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ name:
+ description:
+ - Name of the table.
+ required: true
+ type: str
+ hash_key_name:
+ description:
+ - Name of the hash key.
+ - Required when C(state=present).
+ type: str
+ hash_key_type:
+ description:
+ - Type of the hash key.
+ choices: ['STRING', 'NUMBER', 'BINARY']
+ default: 'STRING'
+ type: str
+ range_key_name:
+ description:
+ - Name of the range key.
+ type: str
+ range_key_type:
+ description:
+ - Type of the range key.
+ choices: ['STRING', 'NUMBER', 'BINARY']
+ default: 'STRING'
+ type: str
+ read_capacity:
+ description:
+ - Read throughput capacity (units) to provision.
+ default: 1
+ type: int
+ write_capacity:
+ description:
+ - Write throughput capacity (units) to provision.
+ default: 1
+ type: int
+ indexes:
+ description:
+ - list of dictionaries describing indexes to add to the table. global indexes can be updated. local indexes don't support updates or have throughput.
+ - "required options: ['name', 'type', 'hash_key_name']"
+ - "other options: ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity']"
+ suboptions:
+ name:
+ description: The name of the index.
+ type: str
+ required: true
+ type:
+ description:
+ - The type of index.
+ - "Valid types: C(all), C(global_all), C(global_include), C(global_keys_only), C(include), C(keys_only)"
+ type: str
+ required: true
+ hash_key_name:
+ description: The name of the hash-based key.
+ required: true
+ type: str
+ hash_key_type:
+ description: The type of the hash-based key.
+ type: str
+ range_key_name:
+ description: The name of the range-based key.
+ type: str
+ range_key_type:
+ type: str
+ description: The type of the range-based key.
+ includes:
+ type: list
+ description: A list of fields to include when using C(global_include) or C(include) indexes.
+ read_capacity:
+ description:
+ - Read throughput capacity (units) to provision for the index.
+ type: int
+ write_capacity:
+ description:
+ - Write throughput capacity (units) to provision for the index.
+ type: int
+ default: []
+ type: list
+ elements: dict
+ tags:
+ description:
+ - A hash/dictionary of tags to add to the new instance or for starting/stopping instance by tag.
+ - 'For example: C({"key":"value"}) and C({"key":"value","key2":"value2"})'
+ type: dict
+ wait_for_active_timeout:
+ description:
+ - how long before wait gives up, in seconds. only used when tags is set
+ default: 60
+ type: int
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+- name: Create dynamo table with hash and range primary key
+ community.aws.dynamodb_table:
+ name: my-table
+ region: us-east-1
+ hash_key_name: id
+ hash_key_type: STRING
+ range_key_name: create_time
+ range_key_type: NUMBER
+ read_capacity: 2
+ write_capacity: 2
+ tags:
+ tag_name: tag_value
+
+- name: Update capacity on existing dynamo table
+ community.aws.dynamodb_table:
+ name: my-table
+ region: us-east-1
+ read_capacity: 10
+ write_capacity: 10
+
+- name: set index on existing dynamo table
+ community.aws.dynamodb_table:
+ name: my-table
+ region: us-east-1
+ indexes:
+ - name: NamedIndex
+ type: global_include
+ hash_key_name: id
+ range_key_name: create_time
+ includes:
+ - other_field
+ - other_field2
+ read_capacity: 10
+ write_capacity: 10
+
+- name: Delete dynamo table
+ community.aws.dynamodb_table:
+ name: my-table
+ region: us-east-1
+ state: absent
+'''
+
+RETURN = r'''
+table_status:
+ description: The current status of the table.
+ returned: success
+ type: str
+ sample: ACTIVE
+'''
+
+import time
+import traceback
+
+try:
+ import boto
+ import boto.dynamodb2
+ from boto.dynamodb2.table import Table
+ from boto.dynamodb2.fields import HashKey, RangeKey, AllIndex, GlobalAllIndex, GlobalIncludeIndex, GlobalKeysOnlyIndex, IncludeIndex, KeysOnlyIndex
+ from boto.dynamodb2.types import STRING, NUMBER, BINARY
+ from boto.exception import BotoServerError, NoAuthHandlerFound, JSONResponseError
+ from boto.dynamodb2.exceptions import ValidationException
+ DYNAMO_TYPE_MAP = {
+ 'STRING': STRING,
+ 'NUMBER': NUMBER,
+ 'BINARY': BINARY
+ }
+ # Boto 2 is mandatory, Boto3 is only needed for tagging
+ import botocore
+except ImportError:
+ pass # Handled by ec2.HAS_BOTO and ec2.HAS_BOTO3
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3
+
+
+DYNAMO_TYPE_DEFAULT = 'STRING'
+INDEX_REQUIRED_OPTIONS = ['name', 'type', 'hash_key_name']
+INDEX_OPTIONS = INDEX_REQUIRED_OPTIONS + ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity']
+INDEX_TYPE_OPTIONS = ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only']
+
+
+def create_or_update_dynamo_table(connection, module, boto3_dynamodb=None, boto3_sts=None, region=None):
+ table_name = module.params.get('name')
+ hash_key_name = module.params.get('hash_key_name')
+ hash_key_type = module.params.get('hash_key_type')
+ range_key_name = module.params.get('range_key_name')
+ range_key_type = module.params.get('range_key_type')
+ read_capacity = module.params.get('read_capacity')
+ write_capacity = module.params.get('write_capacity')
+ all_indexes = module.params.get('indexes')
+ tags = module.params.get('tags')
+ wait_for_active_timeout = module.params.get('wait_for_active_timeout')
+
+ for index in all_indexes:
+ validate_index(index, module)
+
+ schema = get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type)
+
+ throughput = {
+ 'read': read_capacity,
+ 'write': write_capacity
+ }
+
+ indexes, global_indexes = get_indexes(all_indexes)
+
+ result = dict(
+ region=region,
+ table_name=table_name,
+ hash_key_name=hash_key_name,
+ hash_key_type=hash_key_type,
+ range_key_name=range_key_name,
+ range_key_type=range_key_type,
+ read_capacity=read_capacity,
+ write_capacity=write_capacity,
+ indexes=all_indexes,
+ )
+
+ try:
+ table = Table(table_name, connection=connection)
+
+ if dynamo_table_exists(table):
+ result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode, global_indexes=global_indexes)
+ else:
+ if not module.check_mode:
+ Table.create(table_name, connection=connection, schema=schema, throughput=throughput, indexes=indexes, global_indexes=global_indexes)
+ result['changed'] = True
+
+ if not module.check_mode:
+ result['table_status'] = table.describe()['Table']['TableStatus']
+
+ if tags:
+ # only tables which are active can be tagged
+ wait_until_table_active(module, table, wait_for_active_timeout)
+ account_id = get_account_id(boto3_sts)
+ boto3_dynamodb.tag_resource(
+ ResourceArn='arn:aws:dynamodb:' +
+ region +
+ ':' +
+ account_id +
+ ':table/' +
+ table_name,
+ Tags=ansible_dict_to_boto3_tag_list(tags))
+ result['tags'] = tags
+
+ except BotoServerError:
+ result['msg'] = 'Failed to create/update dynamo table due to error: ' + traceback.format_exc()
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+def get_account_id(boto3_sts):
+ return boto3_sts.get_caller_identity()["Account"]
+
+
+def wait_until_table_active(module, table, wait_timeout):
+ max_wait_time = time.time() + wait_timeout
+ while (max_wait_time > time.time()) and (table.describe()['Table']['TableStatus'] != 'ACTIVE'):
+ time.sleep(5)
+ if max_wait_time <= time.time():
+ # waiting took too long
+ module.fail_json(msg="timed out waiting for table to exist")
+
+
+def delete_dynamo_table(connection, module):
+ table_name = module.params.get('name')
+
+ result = dict(
+ region=module.params.get('region'),
+ table_name=table_name,
+ )
+
+ try:
+ table = Table(table_name, connection=connection)
+
+ if dynamo_table_exists(table):
+ if not module.check_mode:
+ table.delete()
+ result['changed'] = True
+
+ else:
+ result['changed'] = False
+
+ except BotoServerError:
+ result['msg'] = 'Failed to delete dynamo table due to error: ' + traceback.format_exc()
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+def dynamo_table_exists(table):
+ try:
+ table.describe()
+ return True
+
+ except JSONResponseError as e:
+ if e.message and e.message.startswith('Requested resource not found'):
+ return False
+ else:
+ raise e
+
+
+def update_dynamo_table(table, throughput=None, check_mode=False, global_indexes=None):
+ table.describe() # populate table details
+ throughput_changed = False
+ global_indexes_changed = False
+ if has_throughput_changed(table, throughput):
+ if not check_mode:
+ throughput_changed = table.update(throughput=throughput)
+ else:
+ throughput_changed = True
+
+ removed_indexes, added_indexes, index_throughput_changes = get_changed_global_indexes(table, global_indexes)
+ if removed_indexes:
+ if not check_mode:
+ for name, index in removed_indexes.items():
+ global_indexes_changed = table.delete_global_secondary_index(name) or global_indexes_changed
+ else:
+ global_indexes_changed = True
+
+ if added_indexes:
+ if not check_mode:
+ for name, index in added_indexes.items():
+ global_indexes_changed = table.create_global_secondary_index(global_index=index) or global_indexes_changed
+ else:
+ global_indexes_changed = True
+
+ if index_throughput_changes:
+ if not check_mode:
+ # todo: remove try once boto has https://github.com/boto/boto/pull/3447 fixed
+ try:
+ global_indexes_changed = table.update_global_secondary_index(global_indexes=index_throughput_changes) or global_indexes_changed
+ except ValidationException:
+ pass
+ else:
+ global_indexes_changed = True
+
+ return throughput_changed or global_indexes_changed
+
+
+def has_throughput_changed(table, new_throughput):
+ if not new_throughput:
+ return False
+
+ return new_throughput['read'] != table.throughput['read'] or \
+ new_throughput['write'] != table.throughput['write']
+
+
+def get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type):
+ if range_key_name:
+ schema = [
+ HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT])),
+ RangeKey(range_key_name, DYNAMO_TYPE_MAP.get(range_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT]))
+ ]
+ else:
+ schema = [
+ HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT]))
+ ]
+ return schema
+
+
+def get_changed_global_indexes(table, global_indexes):
+ table.describe()
+
+ table_index_info = dict((index.name, index.schema()) for index in table.global_indexes)
+ table_index_objects = dict((index.name, index) for index in table.global_indexes)
+ set_index_info = dict((index.name, index.schema()) for index in global_indexes)
+ set_index_objects = dict((index.name, index) for index in global_indexes)
+
+ removed_indexes = dict((name, index) for name, index in table_index_info.items() if name not in set_index_info)
+ added_indexes = dict((name, set_index_objects[name]) for name, index in set_index_info.items() if name not in table_index_info)
+ # todo: uncomment once boto has https://github.com/boto/boto/pull/3447 fixed
+ # for name, index in set_index_objects.items():
+ # if (name not in added_indexes and
+ # (index.throughput['read'] != str(table_index_objects[name].throughput['read']) or
+ # index.throughput['write'] != str(table_index_objects[name].throughput['write']))):
+ # index_throughput_changes[name] = index.throughput
+ # todo: remove once boto has https://github.com/boto/boto/pull/3447 fixed
+ index_throughput_changes = dict((name, index.throughput) for name, index in set_index_objects.items() if name not in added_indexes)
+
+ return removed_indexes, added_indexes, index_throughput_changes
+
+
+def validate_index(index, module):
+ for key, val in index.items():
+ if key not in INDEX_OPTIONS:
+ module.fail_json(msg='%s is not a valid option for an index' % key)
+ for required_option in INDEX_REQUIRED_OPTIONS:
+ if required_option not in index:
+ module.fail_json(msg='%s is a required option for an index' % required_option)
+ if index['type'] not in INDEX_TYPE_OPTIONS:
+ module.fail_json(msg='%s is not a valid index type, must be one of %s' % (index['type'], INDEX_TYPE_OPTIONS))
+
+
+def get_indexes(all_indexes):
+ indexes = []
+ global_indexes = []
+ for index in all_indexes:
+ name = index['name']
+ schema = get_schema_param(index.get('hash_key_name'), index.get('hash_key_type'), index.get('range_key_name'), index.get('range_key_type'))
+ throughput = {
+ 'read': index.get('read_capacity', 1),
+ 'write': index.get('write_capacity', 1)
+ }
+
+ if index['type'] == 'all':
+ indexes.append(AllIndex(name, parts=schema))
+
+ elif index['type'] == 'global_all':
+ global_indexes.append(GlobalAllIndex(name, parts=schema, throughput=throughput))
+
+ elif index['type'] == 'global_include':
+ global_indexes.append(GlobalIncludeIndex(name, parts=schema, throughput=throughput, includes=index['includes']))
+
+ elif index['type'] == 'global_keys_only':
+ global_indexes.append(GlobalKeysOnlyIndex(name, parts=schema, throughput=throughput))
+
+ elif index['type'] == 'include':
+ indexes.append(IncludeIndex(name, parts=schema, includes=index['includes']))
+
+ elif index['type'] == 'keys_only':
+ indexes.append(KeysOnlyIndex(name, parts=schema))
+
+ return indexes, global_indexes
+
+
+def main():
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ hash_key_name=dict(type='str'),
+ hash_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']),
+ range_key_name=dict(type='str'),
+ range_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']),
+ read_capacity=dict(default=1, type='int'),
+ write_capacity=dict(default=1, type='int'),
+ indexes=dict(default=[], type='list', elements='dict'),
+ tags=dict(type='dict'),
+ wait_for_active_timeout=dict(default=60, type='int'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ check_boto3=False,
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ if not HAS_BOTO3 and module.params.get('tags'):
+ module.fail_json(msg='boto3 required when using tags for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+ if not region:
+ module.fail_json(msg='region must be specified')
+
+ try:
+ connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params)
+ except (NoAuthHandlerFound, AnsibleAWSError) as e:
+ module.fail_json(msg=str(e))
+
+ if module.params.get('tags'):
+ try:
+ boto3_dynamodb = module.client('dynamodb')
+ if not hasattr(boto3_dynamodb, 'tag_resource'):
+ module.fail_json(msg='boto3 connection does not have tag_resource(), likely due to using an old version')
+ boto3_sts = module.client('sts')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+ else:
+ boto3_dynamodb = None
+ boto3_sts = None
+
+ state = module.params.get('state')
+ if state == 'present':
+ create_or_update_dynamo_table(connection, module, boto3_dynamodb, boto3_sts, region)
+ elif state == 'absent':
+ delete_dynamo_table(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/dynamodb_ttl.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/dynamodb_ttl.py
new file mode 100644
index 00000000..b23c0ab0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/dynamodb_ttl.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dynamodb_ttl
+version_added: 1.0.0
+short_description: Set TTL for a given DynamoDB table
+description:
+- Uses boto3 to set TTL.
+- Requires botocore version 1.5.24 or higher.
+options:
+ state:
+ description:
+ - State to set DynamoDB table to.
+ choices: ['enable', 'disable']
+ required: false
+ type: str
+ table_name:
+ description:
+ - Name of the DynamoDB table to work on.
+ required: true
+ type: str
+ attribute_name:
+ description:
+ - The name of the Time To Live attribute used to store the expiration time for items in the table.
+ - This appears to be required by the API even when disabling TTL.
+ required: true
+ type: str
+
+author: Ted Timmons (@tedder)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements: [ botocore>=1.5.24, boto3 ]
+'''
+
+EXAMPLES = '''
+- name: enable TTL on my cowfacts table
+ community.aws.dynamodb_ttl:
+ state: enable
+ table_name: cowfacts
+ attribute_name: cow_deleted_date
+
+- name: disable TTL on my cowfacts table
+ community.aws.dynamodb_ttl:
+ state: disable
+ table_name: cowfacts
+ attribute_name: cow_deleted_date
+'''
+
+RETURN = '''
+current_status:
+ description: current or new TTL specification.
+ type: dict
+ returned: always
+ sample:
+ - { "AttributeName": "deploy_timestamp", "TimeToLiveStatus": "ENABLED" }
+ - { "AttributeName": "deploy_timestamp", "Enabled": true }
+'''
+
+import distutils.version
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+def get_current_ttl_state(c, table_name):
+ '''Fetch the state dict for a table.'''
+ current_state = c.describe_time_to_live(TableName=table_name)
+ return current_state.get('TimeToLiveDescription')
+
+
+def does_state_need_changing(attribute_name, desired_state, current_spec):
+ '''Run checks to see if the table needs to be modified. Basically a dirty check.'''
+ if not current_spec:
+ # we don't have an entry (or a table?)
+ return True
+
+ if desired_state.lower() == 'enable' and current_spec.get('TimeToLiveStatus') not in ['ENABLING', 'ENABLED']:
+ return True
+ if desired_state.lower() == 'disable' and current_spec.get('TimeToLiveStatus') not in ['DISABLING', 'DISABLED']:
+ return True
+ if attribute_name != current_spec.get('AttributeName'):
+ return True
+
+ return False
+
+
+def set_ttl_state(c, table_name, state, attribute_name):
+ '''Set our specification. Returns the update_time_to_live specification dict,
+ which is different than the describe_* call.'''
+ is_enabled = False
+ if state.lower() == 'enable':
+ is_enabled = True
+
+ ret = c.update_time_to_live(
+ TableName=table_name,
+ TimeToLiveSpecification={
+ 'Enabled': is_enabled,
+ 'AttributeName': attribute_name
+ }
+ )
+
+ return ret.get('TimeToLiveSpecification')
+
+
+def main():
+ argument_spec = dict(
+ state=dict(choices=['enable', 'disable']),
+ table_name=dict(required=True),
+ attribute_name=dict(required=True),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ )
+
+ if distutils.version.StrictVersion(botocore.__version__) < distutils.version.StrictVersion('1.5.24'):
+ # TTL was added in this version.
+ module.fail_json(msg='Found botocore in version {0}, but >= {1} is required for TTL support'.format(botocore.__version__, '1.5.24'))
+
+ try:
+ dbclient = module.client('dynamodb')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ result = {'changed': False}
+ state = module.params['state']
+
+ # wrap all our calls to catch the standard exceptions. We don't pass `module` in to the
+ # methods so it's easier to do here.
+ try:
+ current_state = get_current_ttl_state(dbclient, module.params['table_name'])
+
+ if does_state_need_changing(module.params['attribute_name'], module.params['state'], current_state):
+ # changes needed
+ new_state = set_ttl_state(dbclient, module.params['table_name'], module.params['state'], module.params['attribute_name'])
+ result['current_status'] = new_state
+ result['changed'] = True
+ else:
+ # no changes needed
+ result['current_status'] = current_state
+
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e, msg="Failed to get or update ttl state")
+ except botocore.exceptions.ParamValidationError as e:
+ module.fail_json_aws(e, msg="Failed due to invalid parameters")
+ except ValueError as e:
+ module.fail_json_aws(e, msg="Failed")
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_ami_copy.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_ami_copy.py
new file mode 100644
index 00000000..38f11231
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_ami_copy.py
@@ -0,0 +1,223 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_ami_copy
+version_added: 1.0.0
+short_description: copies AMI between AWS regions, return new image id
+description:
+ - Copies AMI from a source region to a destination region. B(Since version 2.3 this module depends on boto3.)
+options:
+ source_region:
+ description:
+ - The source region the AMI should be copied from.
+ required: true
+ type: str
+ source_image_id:
+ description:
+ - The ID of the AMI in source region that should be copied.
+ required: true
+ type: str
+ name:
+ description:
+ - The name of the new AMI to copy. (As of 2.3 the default is C(default), in prior versions it was C(null).)
+ default: "default"
+ type: str
+ description:
+ description:
+ - An optional human-readable string describing the contents and purpose of the new AMI.
+ type: str
+ encrypted:
+ description:
+ - Whether or not the destination snapshots of the copied AMI should be encrypted.
+ type: bool
+ default: false
+ kms_key_id:
+ description:
+ - KMS key id used to encrypt the image. If not specified, uses default EBS Customer Master Key (CMK) for your account.
+ type: str
+ wait:
+ description:
+ - Wait for the copied AMI to be in state C(available) before returning.
+ type: bool
+ default: 'no'
+ wait_timeout:
+ description:
+ - How long before wait gives up, in seconds.
+ - Prior to 2.3 the default was C(1200).
+ - From 2.3-2.5 this option was deprecated in favor of boto3 waiter defaults.
+ - This was reenabled in 2.6 to allow timeouts greater than 10 minutes.
+ default: 600
+ type: int
+ tags:
+ description:
+ - 'A hash/dictionary of tags to add to the new copied AMI: C({"key":"value"}) and C({"key":"value","key":"value"})'
+ type: dict
+ tag_equality:
+ description:
+ - Whether to use tags if the source AMI already exists in the target region. If this is set, and all tags match
+ in an existing AMI, the AMI will not be copied again.
+ default: false
+ type: bool
+author:
+- Amir Moulavi (@amir343) <amir.moulavi@gmail.com>
+- Tim C (@defunctio) <defunct@defunct.io>
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements:
+ - boto3
+'''
+
+EXAMPLES = '''
+- name: Basic AMI Copy
+ community.aws.ec2_ami_copy:
+ source_region: us-east-1
+ region: eu-west-1
+ source_image_id: ami-xxxxxxx
+
+- name: AMI copy wait until available
+ community.aws.ec2_ami_copy:
+ source_region: us-east-1
+ region: eu-west-1
+ source_image_id: ami-xxxxxxx
+ wait: yes
+ wait_timeout: 1200 # Default timeout is 600
+ register: image_id
+
+- name: Named AMI copy
+ community.aws.ec2_ami_copy:
+ source_region: us-east-1
+ region: eu-west-1
+ source_image_id: ami-xxxxxxx
+ name: My-Awesome-AMI
+ description: latest patch
+
+- name: Tagged AMI copy (will not copy the same AMI twice)
+ community.aws.ec2_ami_copy:
+ source_region: us-east-1
+ region: eu-west-1
+ source_image_id: ami-xxxxxxx
+ tags:
+ Name: My-Super-AMI
+ Patch: 1.2.3
+ tag_equality: yes
+
+- name: Encrypted AMI copy
+ community.aws.ec2_ami_copy:
+ source_region: us-east-1
+ region: eu-west-1
+ source_image_id: ami-xxxxxxx
+ encrypted: yes
+
+- name: Encrypted AMI copy with specified key
+ community.aws.ec2_ami_copy:
+ source_region: us-east-1
+ region: eu-west-1
+ source_image_id: ami-xxxxxxx
+ encrypted: yes
+ kms_key_id: arn:aws:kms:us-east-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b
+'''
+
+RETURN = '''
+image_id:
+ description: AMI ID of the copied AMI
+ returned: always
+ type: str
+ sample: ami-e689729e
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible.module_utils._text import to_native
+
+try:
+ from botocore.exceptions import ClientError, NoCredentialsError, WaiterError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def copy_image(module, ec2):
+ """
+ Copies an AMI
+
+ module : AnsibleAWSModule object
+ ec2: ec2 connection object
+ """
+
+ image = None
+ changed = False
+ tags = module.params.get('tags')
+
+ params = {'SourceRegion': module.params.get('source_region'),
+ 'SourceImageId': module.params.get('source_image_id'),
+ 'Name': module.params.get('name'),
+ 'Description': module.params.get('description'),
+ 'Encrypted': module.params.get('encrypted'),
+ }
+ if module.params.get('kms_key_id'):
+ params['KmsKeyId'] = module.params.get('kms_key_id')
+
+ try:
+ if module.params.get('tag_equality'):
+ filters = [{'Name': 'tag:%s' % k, 'Values': [v]} for (k, v) in module.params.get('tags').items()]
+ filters.append(dict(Name='state', Values=['available', 'pending']))
+ images = ec2.describe_images(Filters=filters)
+ if len(images['Images']) > 0:
+ image = images['Images'][0]
+ if not image:
+ image = ec2.copy_image(**params)
+ image_id = image['ImageId']
+ if tags:
+ ec2.create_tags(Resources=[image_id],
+ Tags=ansible_dict_to_boto3_tag_list(tags))
+ changed = True
+
+ if module.params.get('wait'):
+ delay = 15
+ max_attempts = module.params.get('wait_timeout') // delay
+ image_id = image.get('ImageId')
+ ec2.get_waiter('image_available').wait(
+ ImageIds=[image_id],
+ WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}
+ )
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(image))
+ except WaiterError as e:
+ module.fail_json_aws(e, msg='An error occurred waiting for the image to become available')
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Could not copy AMI")
+ except Exception as e:
+ module.fail_json(msg='Unhandled exception. (%s)' % to_native(e))
+
+
+def main():
+ argument_spec = dict(
+ source_region=dict(required=True),
+ source_image_id=dict(required=True),
+ name=dict(default='default'),
+ description=dict(default=''),
+ encrypted=dict(type='bool', default=False, required=False),
+ kms_key_id=dict(type='str', required=False),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=600),
+ tags=dict(type='dict'),
+ tag_equality=dict(type='bool', default=False))
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+ # TODO: Check botocore version
+ ec2 = module.client('ec2')
+ copy_image(module, ec2)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_asg.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_asg.py
new file mode 100644
index 00000000..568b0fca
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_asg.py
@@ -0,0 +1,1805 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_asg
+version_added: 1.0.0
+short_description: Create or delete AWS AutoScaling Groups (ASGs)
+description:
+ - Can create or delete AWS AutoScaling Groups.
+ - Can be used with the M(community.aws.ec2_lc) module to manage Launch Configurations.
+author: "Gareth Rushgrove (@garethr)"
+requirements: [ "boto3", "botocore" ]
+options:
+ state:
+ description:
+ - Register or deregister the instance.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ name:
+ description:
+ - Unique name for group to be created or deleted.
+ required: true
+ type: str
+ load_balancers:
+ description:
+ - List of ELB names to use for the group. Use for classic load balancers.
+ type: list
+ elements: str
+ target_group_arns:
+ description:
+ - List of target group ARNs to use for the group. Use for application load balancers.
+ type: list
+ elements: str
+ availability_zones:
+ description:
+ - List of availability zone names in which to create the group.
+ - Defaults to all the availability zones in the region if I(vpc_zone_identifier) is not set.
+ type: list
+ elements: str
+ launch_config_name:
+ description:
+ - Name of the Launch configuration to use for the group. See the community.aws.ec2_lc) module for managing these.
+ - If unspecified then the current group value will be used. One of I(launch_config_name) or I(launch_template) must be provided.
+ type: str
+ launch_template:
+ description:
+ - Dictionary describing the Launch Template to use
+ suboptions:
+ version:
+ description:
+ - The version number of the launch template to use.
+ - Defaults to latest version if not provided.
+ type: str
+ launch_template_name:
+ description:
+ - The name of the launch template. Only one of I(launch_template_name) or I(launch_template_id) is required.
+ type: str
+ launch_template_id:
+ description:
+ - The id of the launch template. Only one of I(launch_template_name) or I(launch_template_id) is required.
+ type: str
+ type: dict
+ min_size:
+ description:
+ - Minimum number of instances in group, if unspecified then the current group value will be used.
+ type: int
+ max_size:
+ description:
+ - Maximum number of instances in group, if unspecified then the current group value will be used.
+ type: int
+ max_instance_lifetime:
+ description:
+ - The maximum amount of time, in seconds, that an instance can be in service.
+ - Maximum instance lifetime must be equal to 0, between 604800 and 31536000 seconds (inclusive), or not specified.
+ - Value of 0 removes lifetime restriction.
+ type: int
+ mixed_instances_policy:
+ description:
+ - A mixed instance policy to use for the ASG.
+ - Only used when the ASG is configured to use a Launch Template (I(launch_template)).
+ - 'See also U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-mixedinstancespolicy.html)'
+ required: false
+ suboptions:
+ instance_types:
+ description:
+ - A list of instance_types.
+ type: list
+ elements: str
+ type: dict
+ placement_group:
+ description:
+ - Physical location of your cluster placement group created in Amazon EC2.
+ type: str
+ desired_capacity:
+ description:
+ - Desired number of instances in group, if unspecified then the current group value will be used.
+ type: int
+ replace_all_instances:
+ description:
+ - In a rolling fashion, replace all instances that used the old launch configuration with one from the new launch configuration.
+ It increases the ASG size by I(replace_batch_size), waits for the new instances to be up and running.
+ After that, it terminates a batch of old instances, waits for the replacements, and repeats, until all old instances are replaced.
+ Once that's done the ASG size is reduced back to the expected size.
+ default: false
+ type: bool
+ replace_batch_size:
+ description:
+ - Number of instances you'd like to replace at a time. Used with I(replace_all_instances).
+ required: false
+ default: 1
+ type: int
+ replace_instances:
+ description:
+ - List of I(instance_ids) belonging to the named AutoScalingGroup that you would like to terminate and be replaced with instances
+ matching the current launch configuration.
+ type: list
+ elements: str
+ lc_check:
+ description:
+ - Check to make sure instances that are being replaced with I(replace_instances) do not already have the current I(launch_config).
+ default: true
+ type: bool
+ lt_check:
+ description:
+ - Check to make sure instances that are being replaced with I(replace_instances) do not already have the current
+ I(launch_template or I(launch_template) I(version).
+ default: true
+ type: bool
+ vpc_zone_identifier:
+ description:
+ - List of VPC subnets to use
+ type: list
+ elements: str
+ tags:
+ description:
+ - A list of tags to add to the Auto Scale Group.
+ - Optional key is I(propagate_at_launch), which defaults to true.
+ - When I(propagate_at_launch) is true the tags will be propagated to the Instances created.
+ type: list
+ elements: dict
+ health_check_period:
+ description:
+ - Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
+ required: false
+ default: 300
+ type: int
+ health_check_type:
+ description:
+ - The service you want the health status from, Amazon EC2 or Elastic Load Balancer.
+ required: false
+ default: EC2
+ choices: ['EC2', 'ELB']
+ type: str
+ default_cooldown:
+ description:
+ - The number of seconds after a scaling activity completes before another can begin.
+ default: 300
+ type: int
+ wait_timeout:
+ description:
+ - How long to wait for instances to become viable when replaced. If you experience the error "Waited too long for ELB instances to be healthy",
+ try increasing this value.
+ default: 300
+ type: int
+ wait_for_instances:
+ description:
+ - Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all
+ instances have a lifecycle_state of "InService" and a health_status of "Healthy".
+ default: true
+ type: bool
+ termination_policies:
+ description:
+ - An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity.
+ - Using I(termination_policies=Default) when modifying an existing AutoScalingGroup will result in the existing policy being retained
+ instead of changed to C(Default).
+ - 'Valid values include: C(Default), C(OldestInstance), C(NewestInstance), C(OldestLaunchConfiguration), C(ClosestToNextInstanceHour)'
+ - 'Full documentation of valid values can be found in the AWS documentation:'
+ - 'U(https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-termination.html#custom-termination-policy)'
+ default: Default
+ type: list
+ elements: str
+ notification_topic:
+ description:
+ - A SNS topic ARN to send auto scaling notifications to.
+ type: str
+ notification_types:
+ description:
+ - A list of auto scaling events to trigger notifications on.
+ default:
+ - 'autoscaling:EC2_INSTANCE_LAUNCH'
+ - 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR'
+ - 'autoscaling:EC2_INSTANCE_TERMINATE'
+ - 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'
+ required: false
+ type: list
+ elements: str
+ suspend_processes:
+ description:
+ - A list of scaling processes to suspend.
+ - 'Valid values include:'
+ - C(Launch), C(Terminate), C(HealthCheck), C(ReplaceUnhealthy), C(AZRebalance), C(AlarmNotification), C(ScheduledActions), C(AddToLoadBalancer)
+ - 'Full documentation of valid values can be found in the AWS documentation:'
+ - 'U(https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html)'
+ default: []
+ type: list
+ elements: str
+ metrics_collection:
+ description:
+ - Enable ASG metrics collection.
+ type: bool
+ default: false
+ metrics_granularity:
+ description:
+ - When I(metrics_collection=true) this will determine the granularity of metrics collected by CloudWatch.
+ default: "1Minute"
+ type: str
+ metrics_list:
+ description:
+ - List of autoscaling metrics to collect when I(metrics_collection=true).
+ default:
+ - 'GroupMinSize'
+ - 'GroupMaxSize'
+ - 'GroupDesiredCapacity'
+ - 'GroupInServiceInstances'
+ - 'GroupPendingInstances'
+ - 'GroupStandbyInstances'
+ - 'GroupTerminatingInstances'
+ - 'GroupTotalInstances'
+ type: list
+ elements: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Basic configuration with Launch Configuration
+
+- community.aws.ec2_asg:
+ name: special
+ load_balancers: [ 'lb1', 'lb2' ]
+ availability_zones: [ 'eu-west-1a', 'eu-west-1b' ]
+ launch_config_name: 'lc-1'
+ min_size: 1
+ max_size: 10
+ desired_capacity: 5
+ vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ]
+ tags:
+ - environment: production
+ propagate_at_launch: no
+
+# Rolling ASG Updates
+
+# Below is an example of how to assign a new launch config to an ASG and terminate old instances.
+#
+# All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in
+# a rolling fashion with instances using the current launch configuration, "my_new_lc".
+#
+# This could also be considered a rolling deploy of a pre-baked AMI.
+#
+# If this is a newly created group, the instances will not be replaced since all instances
+# will have the current launch configuration.
+
+- name: create launch config
+ community.aws.ec2_lc:
+ name: my_new_lc
+ image_id: ami-lkajsf
+ key_name: mykey
+ region: us-east-1
+ security_groups: sg-23423
+ instance_type: m1.small
+ assign_public_ip: yes
+
+- community.aws.ec2_asg:
+ name: myasg
+ launch_config_name: my_new_lc
+ health_check_period: 60
+ health_check_type: ELB
+ replace_all_instances: yes
+ min_size: 5
+ max_size: 5
+ desired_capacity: 5
+ region: us-east-1
+
+# To only replace a couple of instances instead of all of them, supply a list
+# to "replace_instances":
+
+- community.aws.ec2_asg:
+ name: myasg
+ launch_config_name: my_new_lc
+ health_check_period: 60
+ health_check_type: ELB
+ replace_instances:
+ - i-b345231
+ - i-24c2931
+ min_size: 5
+ max_size: 5
+ desired_capacity: 5
+ region: us-east-1
+
+# Basic Configuration with Launch Template
+
+- community.aws.ec2_asg:
+ name: special
+ load_balancers: [ 'lb1', 'lb2' ]
+ availability_zones: [ 'eu-west-1a', 'eu-west-1b' ]
+ launch_template:
+ version: '1'
+ launch_template_name: 'lt-example'
+ launch_template_id: 'lt-123456'
+ min_size: 1
+ max_size: 10
+ desired_capacity: 5
+ vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ]
+ tags:
+ - environment: production
+ propagate_at_launch: no
+
+# Basic Configuration with Launch Template using mixed instance policy
+
+- community.aws.ec2_asg:
+ name: special
+ load_balancers: [ 'lb1', 'lb2' ]
+ availability_zones: [ 'eu-west-1a', 'eu-west-1b' ]
+ launch_template:
+ version: '1'
+ launch_template_name: 'lt-example'
+ launch_template_id: 'lt-123456'
+ mixed_instances_policy:
+ instance_types:
+ - t3a.large
+ - t3.large
+ - t2.large
+ min_size: 1
+ max_size: 10
+ desired_capacity: 5
+ vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ]
+ tags:
+ - environment: production
+ propagate_at_launch: no
+'''
+
+RETURN = r'''
+---
+auto_scaling_group_name:
+ description: The unique name of the auto scaling group
+ returned: success
+ type: str
+ sample: "myasg"
+auto_scaling_group_arn:
+ description: The unique ARN of the autoscaling group
+ returned: success
+ type: str
+ sample: "arn:aws:autoscaling:us-east-1:123456789012:autoScalingGroup:6a09ad6d-eeee-1234-b987-ee123ced01ad:autoScalingGroupName/myasg"
+availability_zones:
+ description: The availability zones for the auto scaling group
+ returned: success
+ type: list
+ sample: [
+ "us-east-1d"
+ ]
+created_time:
+ description: Timestamp of create time of the auto scaling group
+ returned: success
+ type: str
+ sample: "2017-11-08T14:41:48.272000+00:00"
+default_cooldown:
+ description: The default cooldown time in seconds.
+ returned: success
+ type: int
+ sample: 300
+desired_capacity:
+ description: The number of EC2 instances that should be running in this group.
+ returned: success
+ type: int
+ sample: 3
+healthcheck_period:
+ description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
+ returned: success
+ type: int
+ sample: 30
+healthcheck_type:
+ description: The service you want the health status from, one of "EC2" or "ELB".
+ returned: success
+ type: str
+ sample: "ELB"
+healthy_instances:
+ description: Number of instances in a healthy state
+ returned: success
+ type: int
+ sample: 5
+in_service_instances:
+ description: Number of instances in service
+ returned: success
+ type: int
+ sample: 3
+instance_facts:
+ description: Dictionary of EC2 instances and their status as it relates to the ASG.
+ returned: success
+ type: dict
+ sample: {
+ "i-0123456789012": {
+ "health_status": "Healthy",
+ "launch_config_name": "public-webapp-production-1",
+ "lifecycle_state": "InService"
+ }
+ }
+instances:
+ description: list of instance IDs in the ASG
+ returned: success
+ type: list
+ sample: [
+ "i-0123456789012"
+ ]
+launch_config_name:
+ description: >
+ Name of launch configuration associated with the ASG. Same as launch_configuration_name,
+ provided for compatibility with ec2_asg module.
+ returned: success
+ type: str
+ sample: "public-webapp-production-1"
+load_balancers:
+ description: List of load balancers names attached to the ASG.
+ returned: success
+ type: list
+ sample: ["elb-webapp-prod"]
+max_instance_lifetime:
+ description: The maximum amount of time, in seconds, that an instance can be in service.
+ returned: success
+ type: int
+ sample: 604800
+max_size:
+ description: Maximum size of group
+ returned: success
+ type: int
+ sample: 3
+min_size:
+ description: Minimum size of group
+ returned: success
+ type: int
+ sample: 1
+mixed_instance_policy:
+ description: Returns the list of instance types if a mixed instance policy is set.
+ returned: success
+ type: list
+ sample: ["t3.micro", "t3a.micro"]
+pending_instances:
+ description: Number of instances in pending state
+ returned: success
+ type: int
+ sample: 1
+tags:
+ description: List of tags for the ASG, and whether or not each tag propagates to instances at launch.
+ returned: success
+ type: list
+ sample: [
+ {
+ "key": "Name",
+ "value": "public-webapp-production-1",
+ "resource_id": "public-webapp-production-1",
+ "resource_type": "auto-scaling-group",
+ "propagate_at_launch": "true"
+ },
+ {
+ "key": "env",
+ "value": "production",
+ "resource_id": "public-webapp-production-1",
+ "resource_type": "auto-scaling-group",
+ "propagate_at_launch": "true"
+ }
+ ]
+target_group_arns:
+ description: List of ARNs of the target groups that the ASG populates
+ returned: success
+ type: list
+ sample: [
+ "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-host-hello/1a2b3c4d5e6f1a2b",
+ "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-path-world/abcd1234abcd1234"
+ ]
+target_group_names:
+ description: List of names of the target groups that the ASG populates
+ returned: success
+ type: list
+ sample: [
+ "target-group-host-hello",
+ "target-group-path-world"
+ ]
+termination_policies:
+ description: A list of termination policies for the group.
+ returned: success
+ type: list
+ sample: ["Default"]
+unhealthy_instances:
+ description: Number of instances in an unhealthy state
+ returned: success
+ type: int
+ sample: 0
+viable_instances:
+ description: Number of instances in a viable state
+ returned: success
+ type: int
+ sample: 1
+vpc_zone_identifier:
+ description: VPC zone ID / subnet id for the auto scaling group
+ returned: success
+ type: str
+ sample: "subnet-a31ef45f"
+metrics_collection:
+ description: List of enabled AutosSalingGroup metrics
+ returned: success
+ type: list
+ sample: [
+ {
+ "Granularity": "1Minute",
+ "Metric": "GroupInServiceInstances"
+ }
+ ]
+'''
+
+import time
+import traceback
+
+from ansible.module_utils._text import to_native
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (
+ AWSRetry,
+ camel_dict_to_snake_dict
+)
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity',
+ 'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName',
+ 'LoadBalancerNames', 'MaxInstanceLifetime', 'MaxSize', 'MinSize',
+ 'AutoScalingGroupName', 'PlacementGroup', 'TerminationPolicies',
+ 'VPCZoneIdentifier')
+
+INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name')
+
+backoff_params = dict(tries=10, delay=3, backoff=1.5)
+
+
+@AWSRetry.backoff(**backoff_params)
+def describe_autoscaling_groups(connection, group_name):
+ pg = connection.get_paginator('describe_auto_scaling_groups')
+ return pg.paginate(AutoScalingGroupNames=[group_name]).build_full_result().get('AutoScalingGroups', [])
+
+
+@AWSRetry.backoff(**backoff_params)
+def deregister_lb_instances(connection, lb_name, instance_id):
+ connection.deregister_instances_from_load_balancer(LoadBalancerName=lb_name, Instances=[dict(InstanceId=instance_id)])
+
+
+@AWSRetry.backoff(**backoff_params)
+def describe_instance_health(connection, lb_name, instances):
+ params = dict(LoadBalancerName=lb_name)
+ if instances:
+ params.update(Instances=instances)
+ return connection.describe_instance_health(**params)
+
+
+@AWSRetry.backoff(**backoff_params)
+def describe_target_health(connection, target_group_arn, instances):
+ return connection.describe_target_health(TargetGroupArn=target_group_arn, Targets=instances)
+
+
+@AWSRetry.backoff(**backoff_params)
+def suspend_asg_processes(connection, asg_name, processes):
+ connection.suspend_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes)
+
+
+@AWSRetry.backoff(**backoff_params)
+def resume_asg_processes(connection, asg_name, processes):
+ connection.resume_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes)
+
+
+@AWSRetry.backoff(**backoff_params)
+def describe_launch_configurations(connection, launch_config_name):
+ pg = connection.get_paginator('describe_launch_configurations')
+ return pg.paginate(LaunchConfigurationNames=[launch_config_name]).build_full_result()
+
+
+@AWSRetry.backoff(**backoff_params)
+def describe_launch_templates(connection, launch_template):
+ if launch_template['launch_template_id'] is not None:
+ try:
+ lt = connection.describe_launch_templates(LaunchTemplateIds=[launch_template['launch_template_id']])
+ return lt
+ except (botocore.exceptions.ClientError) as e:
+ module.fail_json(msg="No launch template found matching: %s" % launch_template)
+ else:
+ try:
+ lt = connection.describe_launch_templates(LaunchTemplateNames=[launch_template['launch_template_name']])
+ return lt
+ except (botocore.exceptions.ClientError) as e:
+ module.fail_json(msg="No launch template found matching: %s" % launch_template)
+
+
+@AWSRetry.backoff(**backoff_params)
+def create_asg(connection, **params):
+ connection.create_auto_scaling_group(**params)
+
+
+@AWSRetry.backoff(**backoff_params)
+def put_notification_config(connection, asg_name, topic_arn, notification_types):
+ connection.put_notification_configuration(
+ AutoScalingGroupName=asg_name,
+ TopicARN=topic_arn,
+ NotificationTypes=notification_types
+ )
+
+
+@AWSRetry.backoff(**backoff_params)
+def del_notification_config(connection, asg_name, topic_arn):
+ connection.delete_notification_configuration(
+ AutoScalingGroupName=asg_name,
+ TopicARN=topic_arn
+ )
+
+
+@AWSRetry.backoff(**backoff_params)
+def attach_load_balancers(connection, asg_name, load_balancers):
+ connection.attach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers)
+
+
+@AWSRetry.backoff(**backoff_params)
+def detach_load_balancers(connection, asg_name, load_balancers):
+ connection.detach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers)
+
+
+@AWSRetry.backoff(**backoff_params)
+def attach_lb_target_groups(connection, asg_name, target_group_arns):
+ connection.attach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns)
+
+
+@AWSRetry.backoff(**backoff_params)
+def detach_lb_target_groups(connection, asg_name, target_group_arns):
+ connection.detach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns)
+
+
+@AWSRetry.backoff(**backoff_params)
+def update_asg(connection, **params):
+ connection.update_auto_scaling_group(**params)
+
+
+@AWSRetry.backoff(catch_extra_error_codes=['ScalingActivityInProgress'], **backoff_params)
+def delete_asg(connection, asg_name, force_delete):
+ connection.delete_auto_scaling_group(AutoScalingGroupName=asg_name, ForceDelete=force_delete)
+
+
+@AWSRetry.backoff(**backoff_params)
+def terminate_asg_instance(connection, instance_id, decrement_capacity):
+ connection.terminate_instance_in_auto_scaling_group(InstanceId=instance_id,
+ ShouldDecrementDesiredCapacity=decrement_capacity)
+
+
+def enforce_required_arguments_for_create():
+ ''' As many arguments are not required for autoscale group deletion
+ they cannot be mandatory arguments for the module, so we enforce
+ them here '''
+ missing_args = []
+ if module.params.get('launch_config_name') is None and module.params.get('launch_template') is None:
+ module.fail_json(msg="Missing either launch_config_name or launch_template for autoscaling group create")
+ for arg in ('min_size', 'max_size'):
+ if module.params[arg] is None:
+ missing_args.append(arg)
+ if missing_args:
+ module.fail_json(msg="Missing required arguments for autoscaling group create: %s" % ",".join(missing_args))
+
+
+def get_properties(autoscaling_group):
+ properties = dict(
+ healthy_instances=0,
+ in_service_instances=0,
+ unhealthy_instances=0,
+ pending_instances=0,
+ viable_instances=0,
+ terminating_instances=0
+ )
+ instance_facts = dict()
+ autoscaling_group_instances = autoscaling_group.get('Instances')
+
+ if autoscaling_group_instances:
+ properties['instances'] = [i['InstanceId'] for i in autoscaling_group_instances]
+ for i in autoscaling_group_instances:
+ instance_facts[i['InstanceId']] = {
+ 'health_status': i['HealthStatus'],
+ 'lifecycle_state': i['LifecycleState']
+ }
+ if 'LaunchConfigurationName' in i:
+ instance_facts[i['InstanceId']]['launch_config_name'] = i['LaunchConfigurationName']
+ elif 'LaunchTemplate' in i:
+ instance_facts[i['InstanceId']]['launch_template'] = i['LaunchTemplate']
+
+ if i['HealthStatus'] == 'Healthy' and i['LifecycleState'] == 'InService':
+ properties['viable_instances'] += 1
+
+ if i['HealthStatus'] == 'Healthy':
+ properties['healthy_instances'] += 1
+ else:
+ properties['unhealthy_instances'] += 1
+
+ if i['LifecycleState'] == 'InService':
+ properties['in_service_instances'] += 1
+ if i['LifecycleState'] == 'Terminating':
+ properties['terminating_instances'] += 1
+ if i['LifecycleState'] == 'Pending':
+ properties['pending_instances'] += 1
+ else:
+ properties['instances'] = []
+
+ properties['auto_scaling_group_name'] = autoscaling_group.get('AutoScalingGroupName')
+ properties['auto_scaling_group_arn'] = autoscaling_group.get('AutoScalingGroupARN')
+ properties['availability_zones'] = autoscaling_group.get('AvailabilityZones')
+ properties['created_time'] = autoscaling_group.get('CreatedTime')
+ properties['instance_facts'] = instance_facts
+ properties['load_balancers'] = autoscaling_group.get('LoadBalancerNames')
+ if 'LaunchConfigurationName' in autoscaling_group:
+ properties['launch_config_name'] = autoscaling_group.get('LaunchConfigurationName')
+ else:
+ properties['launch_template'] = autoscaling_group.get('LaunchTemplate')
+ properties['tags'] = autoscaling_group.get('Tags')
+ properties['max_instance_lifetime'] = autoscaling_group.get('MaxInstanceLifetime')
+ properties['min_size'] = autoscaling_group.get('MinSize')
+ properties['max_size'] = autoscaling_group.get('MaxSize')
+ properties['desired_capacity'] = autoscaling_group.get('DesiredCapacity')
+ properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown')
+ properties['healthcheck_grace_period'] = autoscaling_group.get('HealthCheckGracePeriod')
+ properties['healthcheck_type'] = autoscaling_group.get('HealthCheckType')
+ properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown')
+ properties['termination_policies'] = autoscaling_group.get('TerminationPolicies')
+ properties['target_group_arns'] = autoscaling_group.get('TargetGroupARNs')
+ properties['vpc_zone_identifier'] = autoscaling_group.get('VPCZoneIdentifier')
+ raw_mixed_instance_object = autoscaling_group.get('MixedInstancesPolicy')
+ if raw_mixed_instance_object:
+ properties['mixed_instances_policy'] = [x['InstanceType'] for x in raw_mixed_instance_object.get('LaunchTemplate').get('Overrides')]
+
+ metrics = autoscaling_group.get('EnabledMetrics')
+ if metrics:
+ metrics.sort(key=lambda x: x["Metric"])
+ properties['metrics_collection'] = metrics
+
+ if properties['target_group_arns']:
+ elbv2_connection = module.client('elbv2')
+ tg_paginator = elbv2_connection.get_paginator('describe_target_groups')
+ tg_result = tg_paginator.paginate(
+ TargetGroupArns=properties['target_group_arns']
+ ).build_full_result()
+ target_groups = tg_result['TargetGroups']
+ else:
+ target_groups = []
+
+ properties['target_group_names'] = [
+ tg['TargetGroupName']
+ for tg in target_groups
+ ]
+
+ return properties
+
+
+def get_launch_object(connection, ec2_connection):
+ launch_object = dict()
+ launch_config_name = module.params.get('launch_config_name')
+ launch_template = module.params.get('launch_template')
+ mixed_instances_policy = module.params.get('mixed_instances_policy')
+ if launch_config_name is None and launch_template is None:
+ return launch_object
+ elif launch_config_name:
+ try:
+ launch_configs = describe_launch_configurations(connection, launch_config_name)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json(msg="Failed to describe launch configurations",
+ exception=traceback.format_exc())
+ if len(launch_configs['LaunchConfigurations']) == 0:
+ module.fail_json(msg="No launch config found with name %s" % launch_config_name)
+ launch_object = {"LaunchConfigurationName": launch_configs['LaunchConfigurations'][0]['LaunchConfigurationName']}
+ return launch_object
+ elif launch_template:
+ lt = describe_launch_templates(ec2_connection, launch_template)['LaunchTemplates'][0]
+ if launch_template['version'] is not None:
+ launch_object = {"LaunchTemplate": {"LaunchTemplateId": lt['LaunchTemplateId'], "Version": launch_template['version']}}
+ else:
+ launch_object = {"LaunchTemplate": {"LaunchTemplateId": lt['LaunchTemplateId'], "Version": str(lt['LatestVersionNumber'])}}
+
+ if mixed_instances_policy:
+ instance_types = mixed_instances_policy.get('instance_types', [])
+ policy = {
+ 'LaunchTemplate': {
+ 'LaunchTemplateSpecification': launch_object['LaunchTemplate']
+ }
+ }
+ if instance_types:
+ policy['LaunchTemplate']['Overrides'] = []
+ for instance_type in instance_types:
+ instance_type_dict = {'InstanceType': instance_type}
+ policy['LaunchTemplate']['Overrides'].append(instance_type_dict)
+ launch_object['MixedInstancesPolicy'] = policy
+ return launch_object
+
+
+def elb_dreg(asg_connection, group_name, instance_id):
+ as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
+ wait_timeout = module.params.get('wait_timeout')
+ count = 1
+ if as_group['LoadBalancerNames'] and as_group['HealthCheckType'] == 'ELB':
+ elb_connection = module.client('elb')
+ else:
+ return
+
+ for lb in as_group['LoadBalancerNames']:
+ deregister_lb_instances(elb_connection, lb, instance_id)
+ module.debug("De-registering %s from ELB %s" % (instance_id, lb))
+
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time() and count > 0:
+ count = 0
+ for lb in as_group['LoadBalancerNames']:
+ lb_instances = describe_instance_health(elb_connection, lb, [])
+ for i in lb_instances['InstanceStates']:
+ if i['InstanceId'] == instance_id and i['State'] == "InService":
+ count += 1
+ module.debug("%s: %s, %s" % (i['InstanceId'], i['State'], i['Description']))
+ time.sleep(10)
+
+ if wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="Waited too long for instance to deregister. {0}".format(time.asctime()))
+
+
+def elb_healthy(asg_connection, elb_connection, group_name):
+ healthy_instances = set()
+ as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
+ props = get_properties(as_group)
+ # get healthy, inservice instances from ASG
+ instances = []
+ for instance, settings in props['instance_facts'].items():
+ if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
+ instances.append(dict(InstanceId=instance))
+ module.debug("ASG considers the following instances InService and Healthy: %s" % instances)
+ module.debug("ELB instance status:")
+ lb_instances = list()
+ for lb in as_group.get('LoadBalancerNames'):
+ # we catch a race condition that sometimes happens if the instance exists in the ASG
+ # but has not yet show up in the ELB
+ try:
+ lb_instances = describe_instance_health(elb_connection, lb, instances)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'InvalidInstance':
+ return None
+
+ module.fail_json(msg="Failed to get load balancer.",
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json(msg="Failed to get load balancer.",
+ exception=traceback.format_exc())
+
+ for i in lb_instances.get('InstanceStates'):
+ if i['State'] == "InService":
+ healthy_instances.add(i['InstanceId'])
+ module.debug("ELB Health State %s: %s" % (i['InstanceId'], i['State']))
+ return len(healthy_instances)
+
+
+def tg_healthy(asg_connection, elbv2_connection, group_name):
+ healthy_instances = set()
+ as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
+ props = get_properties(as_group)
+ # get healthy, inservice instances from ASG
+ instances = []
+ for instance, settings in props['instance_facts'].items():
+ if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
+ instances.append(dict(Id=instance))
+ module.debug("ASG considers the following instances InService and Healthy: %s" % instances)
+ module.debug("Target Group instance status:")
+ tg_instances = list()
+ for tg in as_group.get('TargetGroupARNs'):
+ # we catch a race condition that sometimes happens if the instance exists in the ASG
+ # but has not yet show up in the ELB
+ try:
+ tg_instances = describe_target_health(elbv2_connection, tg, instances)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'InvalidInstance':
+ return None
+
+ module.fail_json(msg="Failed to get target group.",
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json(msg="Failed to get target group.",
+ exception=traceback.format_exc())
+
+ for i in tg_instances.get('TargetHealthDescriptions'):
+ if i['TargetHealth']['State'] == "healthy":
+ healthy_instances.add(i['Target']['Id'])
+ module.debug("Target Group Health State %s: %s" % (i['Target']['Id'], i['TargetHealth']['State']))
+ return len(healthy_instances)
+
+
+def wait_for_elb(asg_connection, group_name):
+ wait_timeout = module.params.get('wait_timeout')
+
+ # if the health_check_type is ELB, we want to query the ELBs directly for instance
+ # status as to avoid health_check_grace period that is awarded to ASG instances
+ as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
+
+ if as_group.get('LoadBalancerNames') and as_group.get('HealthCheckType') == 'ELB':
+ module.debug("Waiting for ELB to consider instances healthy.")
+ elb_connection = module.client('elb')
+
+ wait_timeout = time.time() + wait_timeout
+ healthy_instances = elb_healthy(asg_connection, elb_connection, group_name)
+
+ while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time():
+ healthy_instances = elb_healthy(asg_connection, elb_connection, group_name)
+ module.debug("ELB thinks %s instances are healthy." % healthy_instances)
+ time.sleep(10)
+ if wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime())
+ module.debug("Waiting complete. ELB thinks %s instances are healthy." % healthy_instances)
+
+
+def wait_for_target_group(asg_connection, group_name):
+ wait_timeout = module.params.get('wait_timeout')
+
+ # if the health_check_type is ELB, we want to query the ELBs directly for instance
+ # status as to avoid health_check_grace period that is awarded to ASG instances
+ as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
+
+ if as_group.get('TargetGroupARNs') and as_group.get('HealthCheckType') == 'ELB':
+ module.debug("Waiting for Target Group to consider instances healthy.")
+ elbv2_connection = module.client('elbv2')
+
+ wait_timeout = time.time() + wait_timeout
+ healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name)
+
+ while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time():
+ healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name)
+ module.debug("Target Group thinks %s instances are healthy." % healthy_instances)
+ time.sleep(10)
+ if wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime())
+ module.debug("Waiting complete. Target Group thinks %s instances are healthy." % healthy_instances)
+
+
+def suspend_processes(ec2_connection, as_group):
+ suspend_processes = set(module.params.get('suspend_processes'))
+
+ try:
+ suspended_processes = set([p['ProcessName'] for p in as_group['SuspendedProcesses']])
+ except AttributeError:
+ # New ASG being created, no suspended_processes defined yet
+ suspended_processes = set()
+
+ if suspend_processes == suspended_processes:
+ return False
+
+ resume_processes = list(suspended_processes - suspend_processes)
+ if resume_processes:
+ resume_asg_processes(ec2_connection, module.params.get('name'), resume_processes)
+
+ if suspend_processes:
+ suspend_asg_processes(ec2_connection, module.params.get('name'), list(suspend_processes))
+
+ return True
+
+
+def create_autoscaling_group(connection):
+ group_name = module.params.get('name')
+ load_balancers = module.params['load_balancers']
+ target_group_arns = module.params['target_group_arns']
+ availability_zones = module.params['availability_zones']
+ launch_config_name = module.params.get('launch_config_name')
+ launch_template = module.params.get('launch_template')
+ mixed_instances_policy = module.params.get('mixed_instances_policy')
+ min_size = module.params['min_size']
+ max_size = module.params['max_size']
+ max_instance_lifetime = module.params.get('max_instance_lifetime')
+ placement_group = module.params.get('placement_group')
+ desired_capacity = module.params.get('desired_capacity')
+ vpc_zone_identifier = module.params.get('vpc_zone_identifier')
+ set_tags = module.params.get('tags')
+ health_check_period = module.params.get('health_check_period')
+ health_check_type = module.params.get('health_check_type')
+ default_cooldown = module.params.get('default_cooldown')
+ wait_for_instances = module.params.get('wait_for_instances')
+ wait_timeout = module.params.get('wait_timeout')
+ termination_policies = module.params.get('termination_policies')
+ notification_topic = module.params.get('notification_topic')
+ notification_types = module.params.get('notification_types')
+ metrics_collection = module.params.get('metrics_collection')
+ metrics_granularity = module.params.get('metrics_granularity')
+ metrics_list = module.params.get('metrics_list')
+
+ try:
+ as_groups = describe_autoscaling_groups(connection, group_name)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json(msg="Failed to describe auto scaling groups.",
+ exception=traceback.format_exc())
+
+ ec2_connection = module.client('ec2')
+
+ if vpc_zone_identifier:
+ vpc_zone_identifier = ','.join(vpc_zone_identifier)
+
+ asg_tags = []
+ for tag in set_tags:
+ for k, v in tag.items():
+ if k != 'propagate_at_launch':
+ asg_tags.append(dict(Key=k,
+ Value=to_native(v),
+ PropagateAtLaunch=bool(tag.get('propagate_at_launch', True)),
+ ResourceType='auto-scaling-group',
+ ResourceId=group_name))
+ if not as_groups:
+ if not vpc_zone_identifier and not availability_zones:
+ availability_zones = module.params['availability_zones'] = [zone['ZoneName'] for
+ zone in ec2_connection.describe_availability_zones()['AvailabilityZones']]
+
+ enforce_required_arguments_for_create()
+
+ if desired_capacity is None:
+ desired_capacity = min_size
+ ag = dict(
+ AutoScalingGroupName=group_name,
+ MinSize=min_size,
+ MaxSize=max_size,
+ DesiredCapacity=desired_capacity,
+ Tags=asg_tags,
+ HealthCheckGracePeriod=health_check_period,
+ HealthCheckType=health_check_type,
+ DefaultCooldown=default_cooldown,
+ TerminationPolicies=termination_policies)
+ if vpc_zone_identifier:
+ ag['VPCZoneIdentifier'] = vpc_zone_identifier
+ if availability_zones:
+ ag['AvailabilityZones'] = availability_zones
+ if placement_group:
+ ag['PlacementGroup'] = placement_group
+ if load_balancers:
+ ag['LoadBalancerNames'] = load_balancers
+ if target_group_arns:
+ ag['TargetGroupARNs'] = target_group_arns
+ if max_instance_lifetime:
+ ag['MaxInstanceLifetime'] = max_instance_lifetime
+
+ launch_object = get_launch_object(connection, ec2_connection)
+ if 'LaunchConfigurationName' in launch_object:
+ ag['LaunchConfigurationName'] = launch_object['LaunchConfigurationName']
+ elif 'LaunchTemplate' in launch_object:
+ if 'MixedInstancesPolicy' in launch_object:
+ ag['MixedInstancesPolicy'] = launch_object['MixedInstancesPolicy']
+ else:
+ ag['LaunchTemplate'] = launch_object['LaunchTemplate']
+ else:
+ module.fail_json(msg="Missing LaunchConfigurationName or LaunchTemplate",
+ exception=traceback.format_exc())
+
+ try:
+ create_asg(connection, **ag)
+ if metrics_collection:
+ connection.enable_metrics_collection(AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list)
+
+ all_ag = describe_autoscaling_groups(connection, group_name)
+ if len(all_ag) == 0:
+ module.fail_json(msg="No auto scaling group found with the name %s" % group_name)
+ as_group = all_ag[0]
+ suspend_processes(connection, as_group)
+ if wait_for_instances:
+ wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
+ if load_balancers:
+ wait_for_elb(connection, group_name)
+ # Wait for target group health if target group(s)defined
+ if target_group_arns:
+ wait_for_target_group(connection, group_name)
+ if notification_topic:
+ put_notification_config(connection, group_name, notification_topic, notification_types)
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ asg_properties = get_properties(as_group)
+ changed = True
+ return changed, asg_properties
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Failed to create Autoscaling Group.",
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json(msg="Failed to create Autoscaling Group.",
+ exception=traceback.format_exc())
+ else:
+ as_group = as_groups[0]
+ initial_asg_properties = get_properties(as_group)
+ changed = False
+
+ if suspend_processes(connection, as_group):
+ changed = True
+
+ # process tag changes
+ if len(set_tags) > 0:
+ have_tags = as_group.get('Tags')
+ want_tags = asg_tags
+ if have_tags:
+ have_tags.sort(key=lambda x: x["Key"])
+ if want_tags:
+ want_tags.sort(key=lambda x: x["Key"])
+ dead_tags = []
+ have_tag_keyvals = [x['Key'] for x in have_tags]
+ want_tag_keyvals = [x['Key'] for x in want_tags]
+
+ for dead_tag in set(have_tag_keyvals).difference(want_tag_keyvals):
+ changed = True
+ dead_tags.append(dict(ResourceId=as_group['AutoScalingGroupName'],
+ ResourceType='auto-scaling-group', Key=dead_tag))
+ have_tags = [have_tag for have_tag in have_tags if have_tag['Key'] != dead_tag]
+ if dead_tags:
+ connection.delete_tags(Tags=dead_tags)
+
+ zipped = zip(have_tags, want_tags)
+ if len(have_tags) != len(want_tags) or not all(x == y for x, y in zipped):
+ changed = True
+ connection.create_or_update_tags(Tags=asg_tags)
+
+ # Handle load balancer attachments/detachments
+ # Attach load balancers if they are specified but none currently exist
+ if load_balancers and not as_group['LoadBalancerNames']:
+ changed = True
+ try:
+ attach_load_balancers(connection, group_name, load_balancers)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Failed to update Autoscaling Group.",
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json(msg="Failed to update Autoscaling Group.",
+ exception=traceback.format_exc())
+
+ # Update load balancers if they are specified and one or more already exists
+ elif as_group['LoadBalancerNames']:
+ change_load_balancers = load_balancers is not None
+ # Get differences
+ if not load_balancers:
+ load_balancers = list()
+ wanted_elbs = set(load_balancers)
+
+ has_elbs = set(as_group['LoadBalancerNames'])
+ # check if all requested are already existing
+ if has_elbs - wanted_elbs and change_load_balancers:
+ # if wanted contains less than existing, then we need to delete some
+ elbs_to_detach = has_elbs.difference(wanted_elbs)
+ if elbs_to_detach:
+ changed = True
+ try:
+ detach_load_balancers(connection, group_name, list(elbs_to_detach))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json(msg="Failed to detach load balancers %s: %s." % (elbs_to_detach, to_native(e)),
+ exception=traceback.format_exc())
+ if wanted_elbs - has_elbs:
+ # if has contains less than wanted, then we need to add some
+ elbs_to_attach = wanted_elbs.difference(has_elbs)
+ if elbs_to_attach:
+ changed = True
+ try:
+ attach_load_balancers(connection, group_name, list(elbs_to_attach))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json(msg="Failed to attach load balancers %s: %s." % (elbs_to_attach, to_native(e)),
+ exception=traceback.format_exc())
+
+ # Handle target group attachments/detachments
+ # Attach target groups if they are specified but none currently exist
+ if target_group_arns and not as_group['TargetGroupARNs']:
+ changed = True
+ try:
+ attach_lb_target_groups(connection, group_name, target_group_arns)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Failed to update Autoscaling Group.",
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json(msg="Failed to update Autoscaling Group.",
+ exception=traceback.format_exc())
+ # Update target groups if they are specified and one or more already exists
+ elif target_group_arns is not None and as_group['TargetGroupARNs']:
+ # Get differences
+ wanted_tgs = set(target_group_arns)
+ has_tgs = set(as_group['TargetGroupARNs'])
+ # check if all requested are already existing
+ if has_tgs.issuperset(wanted_tgs):
+ # if wanted contains less than existing, then we need to delete some
+ tgs_to_detach = has_tgs.difference(wanted_tgs)
+ if tgs_to_detach:
+ changed = True
+ try:
+ detach_lb_target_groups(connection, group_name, list(tgs_to_detach))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json(msg="Failed to detach load balancer target groups %s: %s" % (tgs_to_detach, to_native(e)),
+ exception=traceback.format_exc())
+ if wanted_tgs.issuperset(has_tgs):
+ # if has contains less than wanted, then we need to add some
+ tgs_to_attach = wanted_tgs.difference(has_tgs)
+ if tgs_to_attach:
+ changed = True
+ try:
+ attach_lb_target_groups(connection, group_name, list(tgs_to_attach))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json(msg="Failed to attach load balancer target groups %s: %s" % (tgs_to_attach, to_native(e)),
+ exception=traceback.format_exc())
+
+ # check for attributes that aren't required for updating an existing ASG
+ # check if min_size/max_size/desired capacity have been specified and if not use ASG values
+ if min_size is None:
+ min_size = as_group['MinSize']
+ if max_size is None:
+ max_size = as_group['MaxSize']
+ if desired_capacity is None:
+ desired_capacity = as_group['DesiredCapacity']
+ ag = dict(
+ AutoScalingGroupName=group_name,
+ MinSize=min_size,
+ MaxSize=max_size,
+ DesiredCapacity=desired_capacity,
+ HealthCheckGracePeriod=health_check_period,
+ HealthCheckType=health_check_type,
+ DefaultCooldown=default_cooldown,
+ TerminationPolicies=termination_policies)
+
+ # Get the launch object (config or template) if one is provided in args or use the existing one attached to ASG if not.
+ launch_object = get_launch_object(connection, ec2_connection)
+ if 'LaunchConfigurationName' in launch_object:
+ ag['LaunchConfigurationName'] = launch_object['LaunchConfigurationName']
+ elif 'LaunchTemplate' in launch_object:
+ if 'MixedInstancesPolicy' in launch_object:
+ ag['MixedInstancesPolicy'] = launch_object['MixedInstancesPolicy']
+ else:
+ ag['LaunchTemplate'] = launch_object['LaunchTemplate']
+ else:
+ try:
+ ag['LaunchConfigurationName'] = as_group['LaunchConfigurationName']
+ except Exception:
+ launch_template = as_group['LaunchTemplate']
+ # Prefer LaunchTemplateId over Name as it's more specific. Only one can be used for update_asg.
+ ag['LaunchTemplate'] = {"LaunchTemplateId": launch_template['LaunchTemplateId'], "Version": launch_template['Version']}
+
+ if availability_zones:
+ ag['AvailabilityZones'] = availability_zones
+ if vpc_zone_identifier:
+ ag['VPCZoneIdentifier'] = vpc_zone_identifier
+ if max_instance_lifetime is not None:
+ ag['MaxInstanceLifetime'] = max_instance_lifetime
+
+ try:
+ update_asg(connection, **ag)
+
+ if metrics_collection:
+ connection.enable_metrics_collection(AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list)
+ else:
+ connection.disable_metrics_collection(AutoScalingGroupName=group_name, Metrics=metrics_list)
+
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json(msg="Failed to update autoscaling group: %s" % to_native(e),
+ exception=traceback.format_exc())
+ if notification_topic:
+ try:
+ put_notification_config(connection, group_name, notification_topic, notification_types)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Failed to update Autoscaling Group notifications.",
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json(msg="Failed to update Autoscaling Group notifications.",
+ exception=traceback.format_exc())
+ if wait_for_instances:
+ wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
+ # Wait for ELB health if ELB(s)defined
+ if load_balancers:
+ module.debug('\tWAITING FOR ELB HEALTH')
+ wait_for_elb(connection, group_name)
+ # Wait for target group health if target group(s)defined
+
+ if target_group_arns:
+ module.debug('\tWAITING FOR TG HEALTH')
+ wait_for_target_group(connection, group_name)
+
+ try:
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ asg_properties = get_properties(as_group)
+ if asg_properties != initial_asg_properties:
+ changed = True
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Failed to read existing Autoscaling Groups.",
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json(msg="Failed to read existing Autoscaling Groups.",
+ exception=traceback.format_exc())
+ return changed, asg_properties
+
+
+def delete_autoscaling_group(connection):
+ group_name = module.params.get('name')
+ notification_topic = module.params.get('notification_topic')
+ wait_for_instances = module.params.get('wait_for_instances')
+ wait_timeout = module.params.get('wait_timeout')
+
+ if notification_topic:
+ del_notification_config(connection, group_name, notification_topic)
+ groups = describe_autoscaling_groups(connection, group_name)
+ if groups:
+ wait_timeout = time.time() + wait_timeout
+ if not wait_for_instances:
+ delete_asg(connection, group_name, force_delete=True)
+ else:
+ updated_params = dict(AutoScalingGroupName=group_name, MinSize=0, MaxSize=0, DesiredCapacity=0)
+ update_asg(connection, **updated_params)
+ instances = True
+ while instances and wait_for_instances and wait_timeout >= time.time():
+ tmp_groups = describe_autoscaling_groups(connection, group_name)
+ if tmp_groups:
+ tmp_group = tmp_groups[0]
+ if not tmp_group.get('Instances'):
+ instances = False
+ time.sleep(10)
+
+ if wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime())
+
+ delete_asg(connection, group_name, force_delete=False)
+ while describe_autoscaling_groups(connection, group_name) and wait_timeout >= time.time():
+ time.sleep(5)
+ if wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="Waited too long for ASG to delete. %s" % time.asctime())
+ return True
+
+ return False
+
+
+def get_chunks(l, n):
+ for i in range(0, len(l), n):
+ yield l[i:i + n]
+
+
+def update_size(connection, group, max_size, min_size, dc):
+ module.debug("setting ASG sizes")
+ module.debug("minimum size: %s, desired_capacity: %s, max size: %s" % (min_size, dc, max_size))
+ updated_group = dict()
+ updated_group['AutoScalingGroupName'] = group['AutoScalingGroupName']
+ updated_group['MinSize'] = min_size
+ updated_group['MaxSize'] = max_size
+ updated_group['DesiredCapacity'] = dc
+ update_asg(connection, **updated_group)
+
+
+def replace(connection):
+ batch_size = module.params.get('replace_batch_size')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_for_instances = module.params.get('wait_for_instances')
+ group_name = module.params.get('name')
+ max_size = module.params.get('max_size')
+ min_size = module.params.get('min_size')
+ desired_capacity = module.params.get('desired_capacity')
+ launch_config_name = module.params.get('launch_config_name')
+ # Required to maintain the default value being set to 'true'
+ if launch_config_name:
+ lc_check = module.params.get('lc_check')
+ else:
+ lc_check = False
+ # Mirror above behavior for Launch Templates
+ launch_template = module.params.get('launch_template')
+ if launch_template:
+ lt_check = module.params.get('lt_check')
+ else:
+ lt_check = False
+ replace_instances = module.params.get('replace_instances')
+ replace_all_instances = module.params.get('replace_all_instances')
+
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ if desired_capacity is None:
+ desired_capacity = as_group['DesiredCapacity']
+
+ if wait_for_instances:
+ wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'], 'viable_instances')
+
+ props = get_properties(as_group)
+ instances = props['instances']
+ if replace_all_instances:
+ # If replacing all instances, then set replace_instances to current set
+ # This allows replace_instances and replace_all_instances to behave same
+ replace_instances = instances
+ if replace_instances:
+ instances = replace_instances
+
+ # check to see if instances are replaceable if checking launch configs
+ if launch_config_name:
+ new_instances, old_instances = get_instances_by_launch_config(props, lc_check, instances)
+ elif launch_template:
+ new_instances, old_instances = get_instances_by_launch_template(props, lt_check, instances)
+
+ num_new_inst_needed = desired_capacity - len(new_instances)
+
+ if lc_check or lt_check:
+ if num_new_inst_needed == 0 and old_instances:
+ module.debug("No new instances needed, but old instances are present. Removing old instances")
+ terminate_batch(connection, old_instances, instances, True)
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ props = get_properties(as_group)
+ changed = True
+ return changed, props
+
+ # we don't want to spin up extra instances if not necessary
+ if num_new_inst_needed < batch_size:
+ module.debug("Overriding batch size to %s" % num_new_inst_needed)
+ batch_size = num_new_inst_needed
+
+ if not old_instances:
+ changed = False
+ return changed, props
+
+ # check if min_size/max_size/desired capacity have been specified and if not use ASG values
+ if min_size is None:
+ min_size = as_group['MinSize']
+ if max_size is None:
+ max_size = as_group['MaxSize']
+
+ # set temporary settings and wait for them to be reached
+ # This should get overwritten if the number of instances left is less than the batch size.
+
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ update_size(connection, as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size)
+
+ if wait_for_instances:
+ wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'] + batch_size, 'viable_instances')
+ wait_for_elb(connection, group_name)
+ wait_for_target_group(connection, group_name)
+
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ props = get_properties(as_group)
+ instances = props['instances']
+ if replace_instances:
+ instances = replace_instances
+
+ module.debug("beginning main loop")
+ for i in get_chunks(instances, batch_size):
+ # break out of this loop if we have enough new instances
+ break_early, desired_size, term_instances = terminate_batch(connection, i, instances, False)
+
+ if wait_for_instances:
+ wait_for_term_inst(connection, term_instances)
+ wait_for_new_inst(connection, group_name, wait_timeout, desired_size, 'viable_instances')
+ wait_for_elb(connection, group_name)
+ wait_for_target_group(connection, group_name)
+
+ if break_early:
+ module.debug("breaking loop")
+ break
+
+ update_size(connection, as_group, max_size, min_size, desired_capacity)
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ asg_properties = get_properties(as_group)
+ module.debug("Rolling update complete.")
+ changed = True
+ return changed, asg_properties
+
+
+def get_instances_by_launch_config(props, lc_check, initial_instances):
+ new_instances = []
+ old_instances = []
+ # old instances are those that have the old launch config
+ if lc_check:
+ for i in props['instances']:
+ # Check if migrating from launch_template to launch_config first
+ if 'launch_template' in props['instance_facts'][i]:
+ old_instances.append(i)
+ elif props['instance_facts'][i].get('launch_config_name') == props['launch_config_name']:
+ new_instances.append(i)
+ else:
+ old_instances.append(i)
+
+ else:
+ module.debug("Comparing initial instances with current: %s" % initial_instances)
+ for i in props['instances']:
+ if i not in initial_instances:
+ new_instances.append(i)
+ else:
+ old_instances.append(i)
+
+ module.debug("New instances: %s, %s" % (len(new_instances), new_instances))
+ module.debug("Old instances: %s, %s" % (len(old_instances), old_instances))
+
+ return new_instances, old_instances
+
+
+def get_instances_by_launch_template(props, lt_check, initial_instances):
+ new_instances = []
+ old_instances = []
+ # old instances are those that have the old launch template or version of the same launch template
+ if lt_check:
+ for i in props['instances']:
+ # Check if migrating from launch_config_name to launch_template_name first
+ if 'launch_config_name' in props['instance_facts'][i]:
+ old_instances.append(i)
+ elif props['instance_facts'][i].get('launch_template') == props['launch_template']:
+ new_instances.append(i)
+ else:
+ old_instances.append(i)
+ else:
+ module.debug("Comparing initial instances with current: %s" % initial_instances)
+ for i in props['instances']:
+ if i not in initial_instances:
+ new_instances.append(i)
+ else:
+ old_instances.append(i)
+
+ module.debug("New instances: %s, %s" % (len(new_instances), new_instances))
+ module.debug("Old instances: %s, %s" % (len(old_instances), old_instances))
+
+ return new_instances, old_instances
+
+
+def list_purgeable_instances(props, lc_check, lt_check, replace_instances, initial_instances):
+ instances_to_terminate = []
+ instances = (inst_id for inst_id in replace_instances if inst_id in props['instances'])
+ # check to make sure instances given are actually in the given ASG
+ # and they have a non-current launch config
+ if 'launch_config_name' in module.params:
+ if lc_check:
+ for i in instances:
+ if (
+ 'launch_template' in props['instance_facts'][i]
+ or props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']
+ ):
+ instances_to_terminate.append(i)
+ else:
+ for i in instances:
+ if i in initial_instances:
+ instances_to_terminate.append(i)
+ elif 'launch_template' in module.params:
+ if lt_check:
+ for i in instances:
+ if (
+ 'launch_config_name' in props['instance_facts'][i]
+ or props['instance_facts'][i]['launch_template'] != props['launch_template']
+ ):
+ instances_to_terminate.append(i)
+ else:
+ for i in instances:
+ if i in initial_instances:
+ instances_to_terminate.append(i)
+
+ return instances_to_terminate
+
+
+def terminate_batch(connection, replace_instances, initial_instances, leftovers=False):
+ batch_size = module.params.get('replace_batch_size')
+ min_size = module.params.get('min_size')
+ desired_capacity = module.params.get('desired_capacity')
+ group_name = module.params.get('name')
+ lc_check = module.params.get('lc_check')
+ lt_check = module.params.get('lt_check')
+ decrement_capacity = False
+ break_loop = False
+
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ if desired_capacity is None:
+ desired_capacity = as_group['DesiredCapacity']
+
+ props = get_properties(as_group)
+ desired_size = as_group['MinSize']
+ if module.params.get('launch_config_name'):
+ new_instances, old_instances = get_instances_by_launch_config(props, lc_check, initial_instances)
+ else:
+ new_instances, old_instances = get_instances_by_launch_template(props, lt_check, initial_instances)
+ num_new_inst_needed = desired_capacity - len(new_instances)
+
+ # check to make sure instances given are actually in the given ASG
+ # and they have a non-current launch config
+ instances_to_terminate = list_purgeable_instances(props, lc_check, lt_check, replace_instances, initial_instances)
+
+ module.debug("new instances needed: %s" % num_new_inst_needed)
+ module.debug("new instances: %s" % new_instances)
+ module.debug("old instances: %s" % old_instances)
+ module.debug("batch instances: %s" % ",".join(instances_to_terminate))
+
+ if num_new_inst_needed == 0:
+ decrement_capacity = True
+ if as_group['MinSize'] != min_size:
+ if min_size is None:
+ min_size = as_group['MinSize']
+ updated_params = dict(AutoScalingGroupName=as_group['AutoScalingGroupName'], MinSize=min_size)
+ update_asg(connection, **updated_params)
+ module.debug("Updating minimum size back to original of %s" % min_size)
+ # if are some leftover old instances, but we are already at capacity with new ones
+ # we don't want to decrement capacity
+ if leftovers:
+ decrement_capacity = False
+ break_loop = True
+ instances_to_terminate = old_instances
+ desired_size = min_size
+ module.debug("No new instances needed")
+
+ if num_new_inst_needed < batch_size and num_new_inst_needed != 0:
+ instances_to_terminate = instances_to_terminate[:num_new_inst_needed]
+ decrement_capacity = False
+ break_loop = False
+ module.debug("%s new instances needed" % num_new_inst_needed)
+
+ module.debug("decrementing capacity: %s" % decrement_capacity)
+
+ for instance_id in instances_to_terminate:
+ elb_dreg(connection, group_name, instance_id)
+ module.debug("terminating instance: %s" % instance_id)
+ terminate_asg_instance(connection, instance_id, decrement_capacity)
+
+ # we wait to make sure the machines we marked as Unhealthy are
+ # no longer in the list
+
+ return break_loop, desired_size, instances_to_terminate
+
+
+def wait_for_term_inst(connection, term_instances):
+ wait_timeout = module.params.get('wait_timeout')
+ group_name = module.params.get('name')
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ count = 1
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time() and count > 0:
+ module.debug("waiting for instances to terminate")
+ count = 0
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ props = get_properties(as_group)
+ instance_facts = props['instance_facts']
+ instances = (i for i in instance_facts if i in term_instances)
+ for i in instances:
+ lifecycle = instance_facts[i]['lifecycle_state']
+ health = instance_facts[i]['health_status']
+ module.debug("Instance %s has state of %s,%s" % (i, lifecycle, health))
+ if lifecycle.startswith('Terminating') or health == 'Unhealthy':
+ count += 1
+ time.sleep(10)
+
+ if wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime())
+
+
+def wait_for_new_inst(connection, group_name, wait_timeout, desired_size, prop):
+ # make sure we have the latest stats after that last loop.
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ props = get_properties(as_group)
+ module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop]))
+ # now we make sure that we have enough instances in a viable state
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time() and desired_size > props[prop]:
+ module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop]))
+ time.sleep(10)
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ props = get_properties(as_group)
+ if wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="Waited too long for new instances to become viable. %s" % time.asctime())
+ module.debug("Reached %s: %s" % (prop, desired_size))
+ return props
+
+
+def asg_exists(connection):
+ group_name = module.params.get('name')
+ as_group = describe_autoscaling_groups(connection, group_name)
+ return bool(len(as_group))
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True, type='str'),
+ load_balancers=dict(type='list', elements='str'),
+ target_group_arns=dict(type='list', elements='str'),
+ availability_zones=dict(type='list', elements='str'),
+ launch_config_name=dict(type='str'),
+ launch_template=dict(
+ type='dict',
+ default=None,
+ options=dict(
+ version=dict(type='str'),
+ launch_template_name=dict(type='str'),
+ launch_template_id=dict(type='str'),
+ )
+ ),
+ min_size=dict(type='int'),
+ max_size=dict(type='int'),
+ max_instance_lifetime=dict(type='int'),
+ mixed_instances_policy=dict(
+ type='dict',
+ default=None,
+ options=dict(
+ instance_types=dict(
+ type='list',
+ elements='str'
+ ),
+ )
+ ),
+ placement_group=dict(type='str'),
+ desired_capacity=dict(type='int'),
+ vpc_zone_identifier=dict(type='list', elements='str'),
+ replace_batch_size=dict(type='int', default=1),
+ replace_all_instances=dict(type='bool', default=False),
+ replace_instances=dict(type='list', default=[], elements='str'),
+ lc_check=dict(type='bool', default=True),
+ lt_check=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=300),
+ state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(type='list', default=[], elements='dict'),
+ health_check_period=dict(type='int', default=300),
+ health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
+ default_cooldown=dict(type='int', default=300),
+ wait_for_instances=dict(type='bool', default=True),
+ termination_policies=dict(type='list', default='Default', elements='str'),
+ notification_topic=dict(type='str', default=None),
+ notification_types=dict(
+ type='list',
+ default=[
+ 'autoscaling:EC2_INSTANCE_LAUNCH',
+ 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR',
+ 'autoscaling:EC2_INSTANCE_TERMINATE',
+ 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'
+ ],
+ elements='str'
+ ),
+ suspend_processes=dict(type='list', default=[], elements='str'),
+ metrics_collection=dict(type='bool', default=False),
+ metrics_granularity=dict(type='str', default='1Minute'),
+ metrics_list=dict(
+ type='list',
+ default=[
+ 'GroupMinSize',
+ 'GroupMaxSize',
+ 'GroupDesiredCapacity',
+ 'GroupInServiceInstances',
+ 'GroupPendingInstances',
+ 'GroupStandbyInstances',
+ 'GroupTerminatingInstances',
+ 'GroupTotalInstances'
+ ],
+ elements='str'
+ )
+ )
+
+ global module
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['replace_all_instances', 'replace_instances'],
+ ['launch_config_name', 'launch_template']
+ ]
+ )
+
+ if (
+ module.params.get('max_instance_lifetime') is not None
+ and not module.botocore_at_least('1.13.21')
+ ):
+ module.fail_json(
+ msg='Botocore needs to be version 1.13.21 or higher to use max_instance_lifetime.'
+ )
+
+ if (
+ module.params.get('mixed_instances_policy') is not None
+ and not module.botocore_at_least('1.12.45')
+ ):
+ module.fail_json(
+ msg='Botocore needs to be version 1.12.45 or higher to use mixed_instances_policy.'
+ )
+
+ state = module.params.get('state')
+ replace_instances = module.params.get('replace_instances')
+ replace_all_instances = module.params.get('replace_all_instances')
+
+ connection = module.client('autoscaling')
+ changed = create_changed = replace_changed = False
+ exists = asg_exists(connection)
+
+ if state == 'present':
+ create_changed, asg_properties = create_autoscaling_group(connection)
+ elif state == 'absent':
+ changed = delete_autoscaling_group(connection)
+ module.exit_json(changed=changed)
+
+ # Only replace instances if asg existed at start of call
+ if (
+ exists
+ and (replace_all_instances or replace_instances)
+ and (module.params.get('launch_config_name') or module.params.get('launch_template'))
+ ):
+ replace_changed, asg_properties = replace(connection)
+
+ if create_changed or replace_changed:
+ changed = True
+
+ module.exit_json(changed=changed, **asg_properties)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_asg_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_asg_facts.py
new file mode 100644
index 00000000..07df4989
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_asg_facts.py
@@ -0,0 +1,410 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_asg_info
+version_added: 1.0.0
+short_description: Gather information about ec2 Auto Scaling Groups (ASGs) in AWS
+description:
+ - Gather information about ec2 Auto Scaling Groups (ASGs) in AWS
+ - This module was called C(ec2_asg_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+author: "Rob White (@wimnat)"
+options:
+ name:
+ description:
+ - The prefix or name of the auto scaling group(s) you are searching for.
+ - "Note: This is a regular expression match with implicit '^' (beginning of string). Append '$' for a complete name match."
+ type: str
+ required: false
+ tags:
+ description:
+ - >
+ A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' } to match against the auto scaling
+ group(s) you are searching for.
+ required: false
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Find all groups
+ community.aws.ec2_asg_info:
+ register: asgs
+
+- name: Find a group with matching name/prefix
+ community.aws.ec2_asg_info:
+ name: public-webserver-asg
+ register: asgs
+
+- name: Find a group with matching tags
+ community.aws.ec2_asg_info:
+ tags:
+ project: webapp
+ env: production
+ register: asgs
+
+- name: Find a group with matching name/prefix and tags
+ community.aws.ec2_asg_info:
+ name: myproject
+ tags:
+ env: production
+ register: asgs
+
+- name: Fail if no groups are found
+ community.aws.ec2_asg_info:
+ name: public-webserver-asg
+ register: asgs
+ failed_when: "{{ asgs.results | length == 0 }}"
+
+- name: Fail if more than 1 group is found
+ community.aws.ec2_asg_info:
+ name: public-webserver-asg
+ register: asgs
+ failed_when: "{{ asgs.results | length > 1 }}"
+'''
+
+RETURN = '''
+---
+auto_scaling_group_arn:
+ description: The Amazon Resource Name of the ASG
+ returned: success
+ type: str
+ sample: "arn:aws:autoscaling:us-west-2:1234567890:autoScalingGroup:10787c52-0bcb-427d-82ba-c8e4b008ed2e:autoScalingGroupName/public-webapp-production-1"
+auto_scaling_group_name:
+ description: Name of autoscaling group
+ returned: success
+ type: str
+ sample: "public-webapp-production-1"
+availability_zones:
+ description: List of Availability Zones that are enabled for this ASG.
+ returned: success
+ type: list
+ sample: ["us-west-2a", "us-west-2b", "us-west-2a"]
+created_time:
+ description: The date and time this ASG was created, in ISO 8601 format.
+ returned: success
+ type: str
+ sample: "2015-11-25T00:05:36.309Z"
+default_cooldown:
+ description: The default cooldown time in seconds.
+ returned: success
+ type: int
+ sample: 300
+desired_capacity:
+ description: The number of EC2 instances that should be running in this group.
+ returned: success
+ type: int
+ sample: 3
+health_check_period:
+ description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
+ returned: success
+ type: int
+ sample: 30
+health_check_type:
+ description: The service you want the health status from, one of "EC2" or "ELB".
+ returned: success
+ type: str
+ sample: "ELB"
+instances:
+ description: List of EC2 instances and their status as it relates to the ASG.
+ returned: success
+ type: list
+ sample: [
+ {
+ "availability_zone": "us-west-2a",
+ "health_status": "Healthy",
+ "instance_id": "i-es22ad25",
+ "launch_configuration_name": "public-webapp-production-1",
+ "lifecycle_state": "InService",
+ "protected_from_scale_in": "false"
+ }
+ ]
+launch_config_name:
+ description: >
+ Name of launch configuration associated with the ASG. Same as launch_configuration_name,
+ provided for compatibility with ec2_asg module.
+ returned: success
+ type: str
+ sample: "public-webapp-production-1"
+launch_configuration_name:
+ description: Name of launch configuration associated with the ASG.
+ returned: success
+ type: str
+ sample: "public-webapp-production-1"
+load_balancer_names:
+ description: List of load balancers names attached to the ASG.
+ returned: success
+ type: list
+ sample: ["elb-webapp-prod"]
+max_size:
+ description: Maximum size of group
+ returned: success
+ type: int
+ sample: 3
+min_size:
+ description: Minimum size of group
+ returned: success
+ type: int
+ sample: 1
+new_instances_protected_from_scale_in:
+ description: Whether or not new instances a protected from automatic scaling in.
+ returned: success
+ type: bool
+ sample: "false"
+placement_group:
+ description: Placement group into which instances are launched, if any.
+ returned: success
+ type: str
+ sample: None
+status:
+ description: The current state of the group when DeleteAutoScalingGroup is in progress.
+ returned: success
+ type: str
+ sample: None
+tags:
+ description: List of tags for the ASG, and whether or not each tag propagates to instances at launch.
+ returned: success
+ type: list
+ sample: [
+ {
+ "key": "Name",
+ "value": "public-webapp-production-1",
+ "resource_id": "public-webapp-production-1",
+ "resource_type": "auto-scaling-group",
+ "propagate_at_launch": "true"
+ },
+ {
+ "key": "env",
+ "value": "production",
+ "resource_id": "public-webapp-production-1",
+ "resource_type": "auto-scaling-group",
+ "propagate_at_launch": "true"
+ }
+ ]
+target_group_arns:
+ description: List of ARNs of the target groups that the ASG populates
+ returned: success
+ type: list
+ sample: [
+ "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-host-hello/1a2b3c4d5e6f1a2b",
+ "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-path-world/abcd1234abcd1234"
+ ]
+target_group_names:
+ description: List of names of the target groups that the ASG populates
+ returned: success
+ type: list
+ sample: [
+ "target-group-host-hello",
+ "target-group-path-world"
+ ]
+termination_policies:
+ description: A list of termination policies for the group.
+ returned: success
+ type: str
+ sample: ["Default"]
+'''
+
+import re
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def match_asg_tags(tags_to_match, asg):
+ for key, value in tags_to_match.items():
+ for tag in asg['Tags']:
+ if key == tag['Key'] and value == tag['Value']:
+ break
+ else:
+ return False
+ return True
+
+
+def find_asgs(conn, module, name=None, tags=None):
+ """
+ Args:
+ conn (boto3.AutoScaling.Client): Valid Boto3 ASG client.
+ name (str): Optional name of the ASG you are looking for.
+ tags (dict): Optional dictionary of tags and values to search for.
+
+ Basic Usage:
+ >>> name = 'public-webapp-production'
+ >>> tags = { 'env': 'production' }
+ >>> conn = boto3.client('autoscaling', region_name='us-west-2')
+ >>> results = find_asgs(name, conn)
+
+ Returns:
+ List
+ [
+ {
+ "auto_scaling_group_arn": (
+ "arn:aws:autoscaling:us-west-2:275977225706:autoScalingGroup:58abc686-9783-4528-b338-3ad6f1cbbbaf:"
+ "autoScalingGroupName/public-webapp-production"
+ ),
+ "auto_scaling_group_name": "public-webapp-production",
+ "availability_zones": ["us-west-2c", "us-west-2b", "us-west-2a"],
+ "created_time": "2016-02-02T23:28:42.481000+00:00",
+ "default_cooldown": 300,
+ "desired_capacity": 2,
+ "enabled_metrics": [],
+ "health_check_grace_period": 300,
+ "health_check_type": "ELB",
+ "instances":
+ [
+ {
+ "availability_zone": "us-west-2c",
+ "health_status": "Healthy",
+ "instance_id": "i-047a12cb",
+ "launch_configuration_name": "public-webapp-production-1",
+ "lifecycle_state": "InService",
+ "protected_from_scale_in": false
+ },
+ {
+ "availability_zone": "us-west-2a",
+ "health_status": "Healthy",
+ "instance_id": "i-7a29df2c",
+ "launch_configuration_name": "public-webapp-production-1",
+ "lifecycle_state": "InService",
+ "protected_from_scale_in": false
+ }
+ ],
+ "launch_config_name": "public-webapp-production-1",
+ "launch_configuration_name": "public-webapp-production-1",
+ "load_balancer_names": ["public-webapp-production-lb"],
+ "max_size": 4,
+ "min_size": 2,
+ "new_instances_protected_from_scale_in": false,
+ "placement_group": None,
+ "status": None,
+ "suspended_processes": [],
+ "tags":
+ [
+ {
+ "key": "Name",
+ "propagate_at_launch": true,
+ "resource_id": "public-webapp-production",
+ "resource_type": "auto-scaling-group",
+ "value": "public-webapp-production"
+ },
+ {
+ "key": "env",
+ "propagate_at_launch": true,
+ "resource_id": "public-webapp-production",
+ "resource_type": "auto-scaling-group",
+ "value": "production"
+ }
+ ],
+ "target_group_names": [],
+ "target_group_arns": [],
+ "termination_policies":
+ [
+ "Default"
+ ],
+ "vpc_zone_identifier":
+ [
+ "subnet-a1b1c1d1",
+ "subnet-a2b2c2d2",
+ "subnet-a3b3c3d3"
+ ]
+ }
+ ]
+ """
+
+ try:
+ asgs_paginator = conn.get_paginator('describe_auto_scaling_groups')
+ asgs = asgs_paginator.paginate().build_full_result()
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to describe AutoScalingGroups')
+
+ if not asgs:
+ return asgs
+
+ try:
+ elbv2 = module.client('elbv2')
+ except ClientError as e:
+ # This is nice to have, not essential
+ elbv2 = None
+ matched_asgs = []
+
+ if name is not None:
+ # if the user didn't specify a name
+ name_prog = re.compile(r'^' + name)
+
+ for asg in asgs['AutoScalingGroups']:
+ if name:
+ matched_name = name_prog.search(asg['AutoScalingGroupName'])
+ else:
+ matched_name = True
+
+ if tags:
+ matched_tags = match_asg_tags(tags, asg)
+ else:
+ matched_tags = True
+
+ if matched_name and matched_tags:
+ asg = camel_dict_to_snake_dict(asg)
+ # compatibility with ec2_asg module
+ if 'launch_configuration_name' in asg:
+ asg['launch_config_name'] = asg['launch_configuration_name']
+ # workaround for https://github.com/ansible/ansible/pull/25015
+ if 'target_group_ar_ns' in asg:
+ asg['target_group_arns'] = asg['target_group_ar_ns']
+ del(asg['target_group_ar_ns'])
+ if asg.get('target_group_arns'):
+ if elbv2:
+ try:
+ tg_paginator = elbv2.get_paginator('describe_target_groups')
+ tg_result = tg_paginator.paginate(TargetGroupArns=asg['target_group_arns']).build_full_result()
+ asg['target_group_names'] = [tg['TargetGroupName'] for tg in tg_result['TargetGroups']]
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'TargetGroupNotFound':
+ asg['target_group_names'] = []
+ else:
+ module.fail_json_aws(e, msg="Failed to describe Target Groups")
+ except BotoCoreError as e:
+ module.fail_json_aws(e, msg="Failed to describe Target Groups")
+ else:
+ asg['target_group_names'] = []
+ matched_asgs.append(asg)
+
+ return matched_asgs
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(type='str'),
+ tags=dict(type='dict'),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+ if module._name == 'ec2_asg_facts':
+ module.deprecate("The 'ec2_asg_facts' module has been renamed to 'ec2_asg_info'", date='2021-12-01', collection_name='community.aws')
+
+ asg_name = module.params.get('name')
+ asg_tags = module.params.get('tags')
+
+ autoscaling = module.client('autoscaling')
+
+ results = find_asgs(autoscaling, module, name=asg_name, tags=asg_tags)
+ module.exit_json(results=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_asg_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_asg_info.py
new file mode 100644
index 00000000..07df4989
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_asg_info.py
@@ -0,0 +1,410 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_asg_info
+version_added: 1.0.0
+short_description: Gather information about ec2 Auto Scaling Groups (ASGs) in AWS
+description:
+ - Gather information about ec2 Auto Scaling Groups (ASGs) in AWS
+ - This module was called C(ec2_asg_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+author: "Rob White (@wimnat)"
+options:
+ name:
+ description:
+ - The prefix or name of the auto scaling group(s) you are searching for.
+ - "Note: This is a regular expression match with implicit '^' (beginning of string). Append '$' for a complete name match."
+ type: str
+ required: false
+ tags:
+ description:
+ - >
+ A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' } to match against the auto scaling
+ group(s) you are searching for.
+ required: false
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Find all groups
+ community.aws.ec2_asg_info:
+ register: asgs
+
+- name: Find a group with matching name/prefix
+ community.aws.ec2_asg_info:
+ name: public-webserver-asg
+ register: asgs
+
+- name: Find a group with matching tags
+ community.aws.ec2_asg_info:
+ tags:
+ project: webapp
+ env: production
+ register: asgs
+
+- name: Find a group with matching name/prefix and tags
+ community.aws.ec2_asg_info:
+ name: myproject
+ tags:
+ env: production
+ register: asgs
+
+- name: Fail if no groups are found
+ community.aws.ec2_asg_info:
+ name: public-webserver-asg
+ register: asgs
+ failed_when: "{{ asgs.results | length == 0 }}"
+
+- name: Fail if more than 1 group is found
+ community.aws.ec2_asg_info:
+ name: public-webserver-asg
+ register: asgs
+ failed_when: "{{ asgs.results | length > 1 }}"
+'''
+
+RETURN = '''
+---
+auto_scaling_group_arn:
+ description: The Amazon Resource Name of the ASG
+ returned: success
+ type: str
+ sample: "arn:aws:autoscaling:us-west-2:1234567890:autoScalingGroup:10787c52-0bcb-427d-82ba-c8e4b008ed2e:autoScalingGroupName/public-webapp-production-1"
+auto_scaling_group_name:
+ description: Name of autoscaling group
+ returned: success
+ type: str
+ sample: "public-webapp-production-1"
+availability_zones:
+ description: List of Availability Zones that are enabled for this ASG.
+ returned: success
+ type: list
+ sample: ["us-west-2a", "us-west-2b", "us-west-2a"]
+created_time:
+ description: The date and time this ASG was created, in ISO 8601 format.
+ returned: success
+ type: str
+ sample: "2015-11-25T00:05:36.309Z"
+default_cooldown:
+ description: The default cooldown time in seconds.
+ returned: success
+ type: int
+ sample: 300
+desired_capacity:
+ description: The number of EC2 instances that should be running in this group.
+ returned: success
+ type: int
+ sample: 3
+health_check_period:
+ description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
+ returned: success
+ type: int
+ sample: 30
+health_check_type:
+ description: The service you want the health status from, one of "EC2" or "ELB".
+ returned: success
+ type: str
+ sample: "ELB"
+instances:
+ description: List of EC2 instances and their status as it relates to the ASG.
+ returned: success
+ type: list
+ sample: [
+ {
+ "availability_zone": "us-west-2a",
+ "health_status": "Healthy",
+ "instance_id": "i-es22ad25",
+ "launch_configuration_name": "public-webapp-production-1",
+ "lifecycle_state": "InService",
+ "protected_from_scale_in": "false"
+ }
+ ]
+launch_config_name:
+ description: >
+ Name of launch configuration associated with the ASG. Same as launch_configuration_name,
+ provided for compatibility with ec2_asg module.
+ returned: success
+ type: str
+ sample: "public-webapp-production-1"
+launch_configuration_name:
+ description: Name of launch configuration associated with the ASG.
+ returned: success
+ type: str
+ sample: "public-webapp-production-1"
+load_balancer_names:
+ description: List of load balancers names attached to the ASG.
+ returned: success
+ type: list
+ sample: ["elb-webapp-prod"]
+max_size:
+ description: Maximum size of group
+ returned: success
+ type: int
+ sample: 3
+min_size:
+ description: Minimum size of group
+ returned: success
+ type: int
+ sample: 1
+new_instances_protected_from_scale_in:
+ description: Whether or not new instances a protected from automatic scaling in.
+ returned: success
+ type: bool
+ sample: "false"
+placement_group:
+ description: Placement group into which instances are launched, if any.
+ returned: success
+ type: str
+ sample: None
+status:
+ description: The current state of the group when DeleteAutoScalingGroup is in progress.
+ returned: success
+ type: str
+ sample: None
+tags:
+ description: List of tags for the ASG, and whether or not each tag propagates to instances at launch.
+ returned: success
+ type: list
+ sample: [
+ {
+ "key": "Name",
+ "value": "public-webapp-production-1",
+ "resource_id": "public-webapp-production-1",
+ "resource_type": "auto-scaling-group",
+ "propagate_at_launch": "true"
+ },
+ {
+ "key": "env",
+ "value": "production",
+ "resource_id": "public-webapp-production-1",
+ "resource_type": "auto-scaling-group",
+ "propagate_at_launch": "true"
+ }
+ ]
+target_group_arns:
+ description: List of ARNs of the target groups that the ASG populates
+ returned: success
+ type: list
+ sample: [
+ "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-host-hello/1a2b3c4d5e6f1a2b",
+ "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-path-world/abcd1234abcd1234"
+ ]
+target_group_names:
+ description: List of names of the target groups that the ASG populates
+ returned: success
+ type: list
+ sample: [
+ "target-group-host-hello",
+ "target-group-path-world"
+ ]
+termination_policies:
+ description: A list of termination policies for the group.
+ returned: success
+ type: str
+ sample: ["Default"]
+'''
+
+import re
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def match_asg_tags(tags_to_match, asg):
+ for key, value in tags_to_match.items():
+ for tag in asg['Tags']:
+ if key == tag['Key'] and value == tag['Value']:
+ break
+ else:
+ return False
+ return True
+
+
+def find_asgs(conn, module, name=None, tags=None):
+ """
+ Args:
+ conn (boto3.AutoScaling.Client): Valid Boto3 ASG client.
+ name (str): Optional name of the ASG you are looking for.
+ tags (dict): Optional dictionary of tags and values to search for.
+
+ Basic Usage:
+ >>> name = 'public-webapp-production'
+ >>> tags = { 'env': 'production' }
+ >>> conn = boto3.client('autoscaling', region_name='us-west-2')
+ >>> results = find_asgs(name, conn)
+
+ Returns:
+ List
+ [
+ {
+ "auto_scaling_group_arn": (
+ "arn:aws:autoscaling:us-west-2:275977225706:autoScalingGroup:58abc686-9783-4528-b338-3ad6f1cbbbaf:"
+ "autoScalingGroupName/public-webapp-production"
+ ),
+ "auto_scaling_group_name": "public-webapp-production",
+ "availability_zones": ["us-west-2c", "us-west-2b", "us-west-2a"],
+ "created_time": "2016-02-02T23:28:42.481000+00:00",
+ "default_cooldown": 300,
+ "desired_capacity": 2,
+ "enabled_metrics": [],
+ "health_check_grace_period": 300,
+ "health_check_type": "ELB",
+ "instances":
+ [
+ {
+ "availability_zone": "us-west-2c",
+ "health_status": "Healthy",
+ "instance_id": "i-047a12cb",
+ "launch_configuration_name": "public-webapp-production-1",
+ "lifecycle_state": "InService",
+ "protected_from_scale_in": false
+ },
+ {
+ "availability_zone": "us-west-2a",
+ "health_status": "Healthy",
+ "instance_id": "i-7a29df2c",
+ "launch_configuration_name": "public-webapp-production-1",
+ "lifecycle_state": "InService",
+ "protected_from_scale_in": false
+ }
+ ],
+ "launch_config_name": "public-webapp-production-1",
+ "launch_configuration_name": "public-webapp-production-1",
+ "load_balancer_names": ["public-webapp-production-lb"],
+ "max_size": 4,
+ "min_size": 2,
+ "new_instances_protected_from_scale_in": false,
+ "placement_group": None,
+ "status": None,
+ "suspended_processes": [],
+ "tags":
+ [
+ {
+ "key": "Name",
+ "propagate_at_launch": true,
+ "resource_id": "public-webapp-production",
+ "resource_type": "auto-scaling-group",
+ "value": "public-webapp-production"
+ },
+ {
+ "key": "env",
+ "propagate_at_launch": true,
+ "resource_id": "public-webapp-production",
+ "resource_type": "auto-scaling-group",
+ "value": "production"
+ }
+ ],
+ "target_group_names": [],
+ "target_group_arns": [],
+ "termination_policies":
+ [
+ "Default"
+ ],
+ "vpc_zone_identifier":
+ [
+ "subnet-a1b1c1d1",
+ "subnet-a2b2c2d2",
+ "subnet-a3b3c3d3"
+ ]
+ }
+ ]
+ """
+
+ try:
+ asgs_paginator = conn.get_paginator('describe_auto_scaling_groups')
+ asgs = asgs_paginator.paginate().build_full_result()
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to describe AutoScalingGroups')
+
+ if not asgs:
+ return asgs
+
+ try:
+ elbv2 = module.client('elbv2')
+ except ClientError as e:
+ # This is nice to have, not essential
+ elbv2 = None
+ matched_asgs = []
+
+ if name is not None:
+ # if the user didn't specify a name
+ name_prog = re.compile(r'^' + name)
+
+ for asg in asgs['AutoScalingGroups']:
+ if name:
+ matched_name = name_prog.search(asg['AutoScalingGroupName'])
+ else:
+ matched_name = True
+
+ if tags:
+ matched_tags = match_asg_tags(tags, asg)
+ else:
+ matched_tags = True
+
+ if matched_name and matched_tags:
+ asg = camel_dict_to_snake_dict(asg)
+ # compatibility with ec2_asg module
+ if 'launch_configuration_name' in asg:
+ asg['launch_config_name'] = asg['launch_configuration_name']
+ # workaround for https://github.com/ansible/ansible/pull/25015
+ if 'target_group_ar_ns' in asg:
+ asg['target_group_arns'] = asg['target_group_ar_ns']
+ del(asg['target_group_ar_ns'])
+ if asg.get('target_group_arns'):
+ if elbv2:
+ try:
+ tg_paginator = elbv2.get_paginator('describe_target_groups')
+ tg_result = tg_paginator.paginate(TargetGroupArns=asg['target_group_arns']).build_full_result()
+ asg['target_group_names'] = [tg['TargetGroupName'] for tg in tg_result['TargetGroups']]
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'TargetGroupNotFound':
+ asg['target_group_names'] = []
+ else:
+ module.fail_json_aws(e, msg="Failed to describe Target Groups")
+ except BotoCoreError as e:
+ module.fail_json_aws(e, msg="Failed to describe Target Groups")
+ else:
+ asg['target_group_names'] = []
+ matched_asgs.append(asg)
+
+ return matched_asgs
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(type='str'),
+ tags=dict(type='dict'),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+ if module._name == 'ec2_asg_facts':
+ module.deprecate("The 'ec2_asg_facts' module has been renamed to 'ec2_asg_info'", date='2021-12-01', collection_name='community.aws')
+
+ asg_name = module.params.get('name')
+ asg_tags = module.params.get('tags')
+
+ autoscaling = module.client('autoscaling')
+
+ results = find_asgs(autoscaling, module, name=asg_name, tags=asg_tags)
+ module.exit_json(results=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_asg_lifecycle_hook.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_asg_lifecycle_hook.py
new file mode 100644
index 00000000..bab1ef37
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_asg_lifecycle_hook.py
@@ -0,0 +1,251 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_asg_lifecycle_hook
+version_added: 1.0.0
+short_description: Create, delete or update AWS ASG Lifecycle Hooks.
+description:
+ - Will create a new hook when I(state=present) and no given Hook is found.
+ - Will update an existing hook when I(state=present) and a Hook is found, but current and provided parameters differ.
+ - Will delete the hook when I(state=absent) and a Hook is found.
+author: Igor 'Tsigankov' Eyrich (@tsiganenok) <tsiganenok@gmail.com>
+options:
+ state:
+ description:
+ - Create or delete Lifecycle Hook.
+ - When I(state=present) updates existing hook or creates a new hook if not found.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ lifecycle_hook_name:
+ description:
+ - The name of the lifecycle hook.
+ required: true
+ type: str
+ autoscaling_group_name:
+ description:
+ - The name of the Auto Scaling group to which you want to assign the lifecycle hook.
+ required: true
+ type: str
+ transition:
+ description:
+ - The instance state to which you want to attach the lifecycle hook.
+ - Required when I(state=present).
+ choices: ['autoscaling:EC2_INSTANCE_TERMINATING', 'autoscaling:EC2_INSTANCE_LAUNCHING']
+ type: str
+ role_arn:
+ description:
+ - The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target.
+ type: str
+ notification_target_arn:
+ description:
+ - The ARN of the notification target that Auto Scaling will use to notify you when an
+ instance is in the transition state for the lifecycle hook.
+ - This target can be either an SQS queue or an SNS topic.
+ - If you specify an empty string, this overrides the current ARN.
+ type: str
+ notification_meta_data:
+ description:
+ - Contains additional information that you want to include any time Auto Scaling sends a message to the notification target.
+ type: str
+ heartbeat_timeout:
+ description:
+ - The amount of time, in seconds, that can elapse before the lifecycle hook times out.
+ When the lifecycle hook times out, Auto Scaling performs the default action.
+ You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat.
+ - By default Amazon AWS will use 3600 (1 hour)
+ type: int
+ default_result:
+ description:
+ - Defines the action the Auto Scaling group should take when the lifecycle hook timeout
+ elapses or if an unexpected failure occurs.
+ choices: ['ABANDON', 'CONTINUE']
+ default: ABANDON
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements: [ boto3>=1.4.4 ]
+
+'''
+
+EXAMPLES = '''
+- name: Create / Update lifecycle hook
+ community.aws.ec2_asg_lifecycle_hook:
+ region: eu-central-1
+ state: present
+ autoscaling_group_name: example
+ lifecycle_hook_name: example
+ transition: autoscaling:EC2_INSTANCE_LAUNCHING
+ heartbeat_timeout: 7000
+ default_result: ABANDON
+
+- name: Delete lifecycle hook
+ community.aws.ec2_asg_lifecycle_hook:
+ region: eu-central-1
+ state: absent
+ autoscaling_group_name: example
+ lifecycle_hook_name: example
+
+'''
+
+RETURN = '''
+
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+
+def create_lifecycle_hook(connection, module):
+ changed = False
+
+ lch_name = module.params.get('lifecycle_hook_name')
+ asg_name = module.params.get('autoscaling_group_name')
+ transition = module.params.get('transition')
+ role_arn = module.params.get('role_arn')
+ notification_target_arn = module.params.get('notification_target_arn')
+ notification_meta_data = module.params.get('notification_meta_data')
+ heartbeat_timeout = module.params.get('heartbeat_timeout')
+ default_result = module.params.get('default_result')
+
+ lch_params = {
+ 'LifecycleHookName': lch_name,
+ 'AutoScalingGroupName': asg_name,
+ 'LifecycleTransition': transition
+ }
+
+ if role_arn:
+ lch_params['RoleARN'] = role_arn
+
+ if notification_target_arn:
+ lch_params['NotificationTargetARN'] = notification_target_arn
+
+ if notification_meta_data:
+ lch_params['NotificationMetadata'] = notification_meta_data
+
+ if heartbeat_timeout:
+ lch_params['HeartbeatTimeout'] = heartbeat_timeout
+
+ if default_result:
+ lch_params['DefaultResult'] = default_result
+
+ try:
+ existing_hook = connection.describe_lifecycle_hooks(
+ AutoScalingGroupName=asg_name,
+ LifecycleHookNames=[lch_name]
+ )['LifecycleHooks']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get Lifecycle Hook")
+
+ if not existing_hook:
+ changed = True
+ else:
+ # GlobalTimeout is not configurable, but exists in response.
+ # Removing it helps to compare both dicts in order to understand
+ # what changes were done.
+ del(existing_hook[0]['GlobalTimeout'])
+ added, removed, modified, same = dict_compare(lch_params, existing_hook[0])
+ if added or removed or modified:
+ changed = True
+
+ if changed:
+ try:
+ connection.put_lifecycle_hook(**lch_params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to create LifecycleHook")
+
+ return(changed)
+
+
+def dict_compare(d1, d2):
+ d1_keys = set(d1.keys())
+ d2_keys = set(d2.keys())
+ intersect_keys = d1_keys.intersection(d2_keys)
+ added = d1_keys - d2_keys
+ removed = d2_keys - d1_keys
+ modified = False
+ for key in d1:
+ if d1[key] != d2[key]:
+ modified = True
+ break
+
+ same = set(o for o in intersect_keys if d1[o] == d2[o])
+ return added, removed, modified, same
+
+
+def delete_lifecycle_hook(connection, module):
+ changed = False
+
+ lch_name = module.params.get('lifecycle_hook_name')
+ asg_name = module.params.get('autoscaling_group_name')
+
+ try:
+ all_hooks = connection.describe_lifecycle_hooks(
+ AutoScalingGroupName=asg_name
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get Lifecycle Hooks")
+
+ for hook in all_hooks['LifecycleHooks']:
+ if hook['LifecycleHookName'] == lch_name:
+ lch_params = {
+ 'LifecycleHookName': lch_name,
+ 'AutoScalingGroupName': asg_name
+ }
+
+ try:
+ connection.delete_lifecycle_hook(**lch_params)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to delete LifecycleHook")
+ else:
+ pass
+
+ return(changed)
+
+
+def main():
+ argument_spec = dict(
+ autoscaling_group_name=dict(required=True, type='str'),
+ lifecycle_hook_name=dict(required=True, type='str'),
+ transition=dict(type='str', choices=['autoscaling:EC2_INSTANCE_TERMINATING', 'autoscaling:EC2_INSTANCE_LAUNCHING']),
+ role_arn=dict(type='str'),
+ notification_target_arn=dict(type='str'),
+ notification_meta_data=dict(type='str'),
+ heartbeat_timeout=dict(type='int'),
+ default_result=dict(default='ABANDON', choices=['ABANDON', 'CONTINUE']),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[['state', 'present', ['transition']]])
+ state = module.params.get('state')
+
+ connection = module.client('autoscaling')
+
+ changed = False
+
+ if state == 'present':
+ changed = create_lifecycle_hook(connection, module)
+ elif state == 'absent':
+ changed = delete_lifecycle_hook(connection, module)
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway.py
new file mode 100644
index 00000000..1e9fc1de
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_customer_gateway
+version_added: 1.0.0
+short_description: Manage an AWS customer gateway
+description:
+ - Manage an AWS customer gateway.
+author: Michael Baydoun (@MichaelBaydoun)
+requirements: [ botocore, boto3 ]
+notes:
+ - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the
+ first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent
+ requests do not create new customer gateway resources.
+ - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use
+ customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details.
+options:
+ bgp_asn:
+ description:
+ - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when I(state=present).
+ type: int
+ ip_address:
+ description:
+ - Internet-routable IP address for customers gateway, must be a static address.
+ required: true
+ type: str
+ name:
+ description:
+ - Name of the customer gateway.
+ required: true
+ type: str
+ routing:
+ description:
+ - The type of routing.
+ choices: ['static', 'dynamic']
+ default: dynamic
+ type: str
+ state:
+ description:
+ - Create or terminate the Customer Gateway.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: Create Customer Gateway
+ community.aws.ec2_customer_gateway:
+ bgp_asn: 12345
+ ip_address: 1.2.3.4
+ name: IndianapolisOffice
+ region: us-east-1
+ register: cgw
+
+- name: Delete Customer Gateway
+ community.aws.ec2_customer_gateway:
+ ip_address: 1.2.3.4
+ name: IndianapolisOffice
+ state: absent
+ region: us-east-1
+ register: cgw
+'''
+
+RETURN = '''
+gateway.customer_gateways:
+ description: details about the gateway that was created.
+ returned: success
+ type: complex
+ contains:
+ bgp_asn:
+ description: The Border Gateway Autonomous System Number.
+ returned: when exists and gateway is available.
+ sample: 65123
+ type: str
+ customer_gateway_id:
+ description: gateway id assigned by amazon.
+ returned: when exists and gateway is available.
+ sample: cgw-cb6386a2
+ type: str
+ ip_address:
+ description: ip address of your gateway device.
+ returned: when exists and gateway is available.
+ sample: 1.2.3.4
+ type: str
+ state:
+ description: state of gateway.
+ returned: when gateway exists and is available.
+ sample: available
+ type: str
+ tags:
+ description: Any tags on the gateway.
+ returned: when gateway exists and is available, and when tags exist.
+ type: list
+ type:
+ description: encryption type.
+ returned: when gateway exists and is available.
+ sample: ipsec.1
+ type: str
+'''
+
+try:
+ from botocore.exceptions import ClientError
+ import boto3
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+class Ec2CustomerGatewayManager:
+
+ def __init__(self, module):
+ self.module = module
+
+ try:
+ self.ec2 = module.client('ec2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=['IncorrectState'])
+ def ensure_cgw_absent(self, gw_id):
+ response = self.ec2.delete_customer_gateway(
+ DryRun=False,
+ CustomerGatewayId=gw_id
+ )
+ return response
+
+ def ensure_cgw_present(self, bgp_asn, ip_address):
+ if not bgp_asn:
+ bgp_asn = 65000
+ response = self.ec2.create_customer_gateway(
+ DryRun=False,
+ Type='ipsec.1',
+ PublicIp=ip_address,
+ BgpAsn=bgp_asn,
+ )
+ return response
+
+ def tag_cgw_name(self, gw_id, name):
+ response = self.ec2.create_tags(
+ DryRun=False,
+ Resources=[
+ gw_id,
+ ],
+ Tags=[
+ {
+ 'Key': 'Name',
+ 'Value': name
+ },
+ ]
+ )
+ return response
+
+ def describe_gateways(self, ip_address):
+ response = self.ec2.describe_customer_gateways(
+ DryRun=False,
+ Filters=[
+ {
+ 'Name': 'state',
+ 'Values': [
+ 'available',
+ ]
+ },
+ {
+ 'Name': 'ip-address',
+ 'Values': [
+ ip_address,
+ ]
+ }
+ ]
+ )
+ return response
+
+
+def main():
+ argument_spec = dict(
+ bgp_asn=dict(required=False, type='int'),
+ ip_address=dict(required=True),
+ name=dict(required=True),
+ routing=dict(default='dynamic', choices=['dynamic', 'static']),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ('routing', 'dynamic', ['bgp_asn'])
+ ]
+ )
+
+ gw_mgr = Ec2CustomerGatewayManager(module)
+
+ name = module.params.get('name')
+
+ existing = gw_mgr.describe_gateways(module.params['ip_address'])
+
+ results = dict(changed=False)
+ if module.params['state'] == 'present':
+ if existing['CustomerGateways']:
+ existing['CustomerGateway'] = existing['CustomerGateways'][0]
+ results['gateway'] = existing
+ if existing['CustomerGateway']['Tags']:
+ tag_array = existing['CustomerGateway']['Tags']
+ for key, value in enumerate(tag_array):
+ if value['Key'] == 'Name':
+ current_name = value['Value']
+ if current_name != name:
+ results['name'] = gw_mgr.tag_cgw_name(
+ results['gateway']['CustomerGateway']['CustomerGatewayId'],
+ module.params['name'],
+ )
+ results['changed'] = True
+ else:
+ if not module.check_mode:
+ results['gateway'] = gw_mgr.ensure_cgw_present(
+ module.params['bgp_asn'],
+ module.params['ip_address'],
+ )
+ results['name'] = gw_mgr.tag_cgw_name(
+ results['gateway']['CustomerGateway']['CustomerGatewayId'],
+ module.params['name'],
+ )
+ results['changed'] = True
+
+ elif module.params['state'] == 'absent':
+ if existing['CustomerGateways']:
+ existing['CustomerGateway'] = existing['CustomerGateways'][0]
+ results['gateway'] = existing
+ if not module.check_mode:
+ results['gateway'] = gw_mgr.ensure_cgw_absent(
+ existing['CustomerGateway']['CustomerGatewayId']
+ )
+ results['changed'] = True
+
+ pretty_results = camel_dict_to_snake_dict(results)
+ module.exit_json(**pretty_results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway_facts.py
new file mode 100644
index 00000000..12c6320e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway_facts.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_customer_gateway_info
+version_added: 1.0.0
+short_description: Gather information about customer gateways in AWS
+description:
+ - Gather information about customer gateways in AWS.
+ - This module was called C(ec2_customer_gateway_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+author: Madhura Naniwadekar (@Madhura-CSI)
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeCustomerGateways.html) for possible filters.
+ type: dict
+ customer_gateway_ids:
+ description:
+ - Get details of a specific customer gateways using customer gateway ID/IDs. This value should be provided as a list.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# # Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all customer gateways
+ community.aws.ec2_customer_gateway_info:
+
+- name: Gather information about a filtered list of customer gateways, based on tags
+ community.aws.ec2_customer_gateway_info:
+ region: ap-southeast-2
+ filters:
+ "tag:Name": test-customer-gateway
+ "tag:AltName": test-customer-gateway-alt
+ register: cust_gw_info
+
+- name: Gather information about a specific customer gateway by specifying customer gateway ID
+ community.aws.ec2_customer_gateway_info:
+ region: ap-southeast-2
+ customer_gateway_ids:
+ - 'cgw-48841a09'
+ - 'cgw-fec021ce'
+ register: cust_gw_info
+'''
+
+RETURN = r'''
+customer_gateways:
+ description: List of one or more customer gateways.
+ returned: always
+ type: list
+ sample: [
+ {
+ "bgp_asn": "65000",
+ "customer_gateway_id": "cgw-fec844ce",
+ "customer_gateway_name": "test-customer-gw",
+ "ip_address": "110.112.113.120",
+ "state": "available",
+ "tags": [
+ {
+ "key": "Name",
+ "value": "test-customer-gw"
+ }
+ ],
+ "type": "ipsec.1"
+ }
+ ]
+'''
+
+import json
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
+ boto3_tag_list_to_ansible_dict,
+ camel_dict_to_snake_dict,
+ )
+
+
+def date_handler(obj):
+ return obj.isoformat() if hasattr(obj, 'isoformat') else obj
+
+
+def list_customer_gateways(connection, module):
+ params = dict()
+
+ params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ params['CustomerGatewayIds'] = module.params.get('customer_gateway_ids')
+
+ try:
+ result = json.loads(json.dumps(connection.describe_customer_gateways(**params), default=date_handler))
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Could not describe customer gateways")
+ snaked_customer_gateways = [camel_dict_to_snake_dict(gateway) for gateway in result['CustomerGateways']]
+ if snaked_customer_gateways:
+ for customer_gateway in snaked_customer_gateways:
+ customer_gateway['tags'] = boto3_tag_list_to_ansible_dict(customer_gateway.get('tags', []))
+ customer_gateway_name = customer_gateway['tags'].get('Name')
+ if customer_gateway_name:
+ customer_gateway['customer_gateway_name'] = customer_gateway_name
+ module.exit_json(changed=False, customer_gateways=snaked_customer_gateways)
+
+
+def main():
+
+ argument_spec = dict(
+ customer_gateway_ids=dict(default=[], type='list', elements='str'),
+ filters=dict(default={}, type='dict')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ mutually_exclusive=[['customer_gateway_ids', 'filters']],
+ supports_check_mode=True)
+ if module._module._name == 'ec2_customer_gateway_facts':
+ module._module.deprecate("The 'ec2_customer_gateway_facts' module has been renamed to 'ec2_customer_gateway_info'",
+ date='2021-12-01', collection_name='community.aws')
+
+ connection = module.client('ec2')
+
+ list_customer_gateways(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway_info.py
new file mode 100644
index 00000000..12c6320e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway_info.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_customer_gateway_info
+version_added: 1.0.0
+short_description: Gather information about customer gateways in AWS
+description:
+ - Gather information about customer gateways in AWS.
+ - This module was called C(ec2_customer_gateway_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+author: Madhura Naniwadekar (@Madhura-CSI)
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeCustomerGateways.html) for possible filters.
+ type: dict
+ customer_gateway_ids:
+ description:
+ - Get details of a specific customer gateways using customer gateway ID/IDs. This value should be provided as a list.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# # Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all customer gateways
+ community.aws.ec2_customer_gateway_info:
+
+- name: Gather information about a filtered list of customer gateways, based on tags
+ community.aws.ec2_customer_gateway_info:
+ region: ap-southeast-2
+ filters:
+ "tag:Name": test-customer-gateway
+ "tag:AltName": test-customer-gateway-alt
+ register: cust_gw_info
+
+- name: Gather information about a specific customer gateway by specifying customer gateway ID
+ community.aws.ec2_customer_gateway_info:
+ region: ap-southeast-2
+ customer_gateway_ids:
+ - 'cgw-48841a09'
+ - 'cgw-fec021ce'
+ register: cust_gw_info
+'''
+
+RETURN = r'''
+customer_gateways:
+ description: List of one or more customer gateways.
+ returned: always
+ type: list
+ sample: [
+ {
+ "bgp_asn": "65000",
+ "customer_gateway_id": "cgw-fec844ce",
+ "customer_gateway_name": "test-customer-gw",
+ "ip_address": "110.112.113.120",
+ "state": "available",
+ "tags": [
+ {
+ "key": "Name",
+ "value": "test-customer-gw"
+ }
+ ],
+ "type": "ipsec.1"
+ }
+ ]
+'''
+
+import json
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
+ boto3_tag_list_to_ansible_dict,
+ camel_dict_to_snake_dict,
+ )
+
+
+def date_handler(obj):
+ return obj.isoformat() if hasattr(obj, 'isoformat') else obj
+
+
+def list_customer_gateways(connection, module):
+ params = dict()
+
+ params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ params['CustomerGatewayIds'] = module.params.get('customer_gateway_ids')
+
+ try:
+ result = json.loads(json.dumps(connection.describe_customer_gateways(**params), default=date_handler))
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Could not describe customer gateways")
+ snaked_customer_gateways = [camel_dict_to_snake_dict(gateway) for gateway in result['CustomerGateways']]
+ if snaked_customer_gateways:
+ for customer_gateway in snaked_customer_gateways:
+ customer_gateway['tags'] = boto3_tag_list_to_ansible_dict(customer_gateway.get('tags', []))
+ customer_gateway_name = customer_gateway['tags'].get('Name')
+ if customer_gateway_name:
+ customer_gateway['customer_gateway_name'] = customer_gateway_name
+ module.exit_json(changed=False, customer_gateways=snaked_customer_gateways)
+
+
+def main():
+
+ argument_spec = dict(
+ customer_gateway_ids=dict(default=[], type='list', elements='str'),
+ filters=dict(default={}, type='dict')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ mutually_exclusive=[['customer_gateway_ids', 'filters']],
+ supports_check_mode=True)
+ if module._module._name == 'ec2_customer_gateway_facts':
+ module._module.deprecate("The 'ec2_customer_gateway_facts' module has been renamed to 'ec2_customer_gateway_info'",
+ date='2021-12-01', collection_name='community.aws')
+
+ connection = module.client('ec2')
+
+ list_customer_gateways(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_eip.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_eip.py
new file mode 100644
index 00000000..6aa2a531
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_eip.py
@@ -0,0 +1,638 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_eip
+version_added: 1.0.0
+short_description: manages EC2 elastic IP (EIP) addresses.
+description:
+ - This module can allocate or release an EIP.
+ - This module can associate/disassociate an EIP with instances or network interfaces.
+options:
+ device_id:
+ description:
+ - The id of the device for the EIP. Can be an EC2 Instance id or Elastic Network Interface (ENI) id.
+ required: false
+ aliases: [ instance_id ]
+ type: str
+ public_ip:
+ description:
+ - The IP address of a previously allocated EIP.
+ - When I(public_ip=present) and device is specified, the EIP is associated with the device.
+ - When I(public_ip=absent) and device is specified, the EIP is disassociated from the device.
+ aliases: [ ip ]
+ type: str
+ state:
+ description:
+ - When C(state=present), allocate an EIP or associate an existing EIP with a device.
+ - When C(state=absent), disassociate the EIP from the device and optionally release it.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ in_vpc:
+ description:
+ - Allocate an EIP inside a VPC or not.
+ - Required if specifying an ENI with I(device_id).
+ default: false
+ type: bool
+ reuse_existing_ip_allowed:
+ description:
+ - Reuse an EIP that is not associated to a device (when available), instead of allocating a new one.
+ default: false
+ type: bool
+ release_on_disassociation:
+ description:
+ - Whether or not to automatically release the EIP when it is disassociated.
+ default: false
+ type: bool
+ private_ip_address:
+ description:
+ - The primary or secondary private IP address to associate with the Elastic IP address.
+ type: str
+ allow_reassociation:
+ description:
+ - Specify this option to allow an Elastic IP address that is already associated with another
+ network interface or instance to be re-associated with the specified instance or interface.
+ default: false
+ type: bool
+ tag_name:
+ description:
+ - When I(reuse_existing_ip_allowed=true), supplement with this option to only reuse
+ an Elastic IP if it is tagged with I(tag_name).
+ type: str
+ tag_value:
+ description:
+ - Supplements I(tag_name) but also checks that the value of the tag provided in I(tag_name) matches I(tag_value).
+ type: str
+ public_ipv4_pool:
+ description:
+ - Allocates the new Elastic IP from the provided public IPv4 pool (BYOIP)
+ only applies to newly allocated Elastic IPs, isn't validated when I(reuse_existing_ip_allowed=true).
+ type: str
+ wait_timeout:
+ description:
+ - The I(wait_timeout) option does nothing and will be removed after 2022-06-01
+ type: int
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+author: "Rick Mendes (@rickmendes) <rmendes@illumina.com>"
+notes:
+ - There may be a delay between the time the EIP is assigned and when
+ the cloud instance is reachable via the new address. Use wait_for and
+ pause to delay further playbook execution until the instance is reachable,
+ if necessary.
+ - This module returns multiple changed statuses on disassociation or release.
+ It returns an overall status based on any changes occurring. It also returns
+ individual changed statuses for disassociation and release.
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: associate an elastic IP with an instance
+ community.aws.ec2_eip:
+ device_id: i-1212f003
+ ip: 93.184.216.119
+
+- name: associate an elastic IP with a device
+ community.aws.ec2_eip:
+ device_id: eni-c8ad70f3
+ ip: 93.184.216.119
+
+- name: associate an elastic IP with a device and allow reassociation
+ community.aws.ec2_eip:
+ device_id: eni-c8ad70f3
+ public_ip: 93.184.216.119
+ allow_reassociation: true
+
+- name: disassociate an elastic IP from an instance
+ community.aws.ec2_eip:
+ device_id: i-1212f003
+ ip: 93.184.216.119
+ state: absent
+
+- name: disassociate an elastic IP with a device
+ community.aws.ec2_eip:
+ device_id: eni-c8ad70f3
+ ip: 93.184.216.119
+ state: absent
+
+- name: allocate a new elastic IP and associate it with an instance
+ community.aws.ec2_eip:
+ device_id: i-1212f003
+
+- name: allocate a new elastic IP without associating it to anything
+ community.aws.ec2_eip:
+ state: present
+ register: eip
+
+- name: output the IP
+ ansible.builtin.debug:
+ msg: "Allocated IP is {{ eip.public_ip }}"
+
+- name: provision new instances with ec2
+ amazon.aws.ec2:
+ keypair: mykey
+ instance_type: c1.medium
+ image: ami-40603AD1
+ wait: true
+ group: webserver
+ count: 3
+ register: ec2
+
+- name: associate new elastic IPs with each of the instances
+ community.aws.ec2_eip:
+ device_id: "{{ item }}"
+ loop: "{{ ec2.instance_ids }}"
+
+- name: allocate a new elastic IP inside a VPC in us-west-2
+ community.aws.ec2_eip:
+ region: us-west-2
+ in_vpc: true
+ register: eip
+
+- name: output the IP
+ ansible.builtin.debug:
+ msg: "Allocated IP inside a VPC is {{ eip.public_ip }}"
+
+- name: allocate eip - reuse unallocated ips (if found) with FREE tag
+ community.aws.ec2_eip:
+ region: us-east-1
+ in_vpc: true
+ reuse_existing_ip_allowed: true
+ tag_name: FREE
+
+- name: allocate eip - reuse unallocated ips if tag reserved is nope
+ community.aws.ec2_eip:
+ region: us-east-1
+ in_vpc: true
+ reuse_existing_ip_allowed: true
+ tag_name: reserved
+ tag_value: nope
+
+- name: allocate new eip - from servers given ipv4 pool
+ community.aws.ec2_eip:
+ region: us-east-1
+ in_vpc: true
+ public_ipv4_pool: ipv4pool-ec2-0588c9b75a25d1a02
+
+- name: allocate eip - from a given pool (if no free addresses where dev-servers tag is dynamic)
+ community.aws.ec2_eip:
+ region: us-east-1
+ in_vpc: true
+ reuse_existing_ip_allowed: true
+ tag_name: dev-servers
+ public_ipv4_pool: ipv4pool-ec2-0588c9b75a25d1a02
+
+- name: allocate eip from pool - check if tag reserved_for exists and value is our hostname
+ community.aws.ec2_eip:
+ region: us-east-1
+ in_vpc: true
+ reuse_existing_ip_allowed: true
+ tag_name: reserved_for
+ tag_value: "{{ inventory_hostname }}"
+ public_ipv4_pool: ipv4pool-ec2-0588c9b75a25d1a02
+'''
+
+RETURN = '''
+allocation_id:
+ description: allocation_id of the elastic ip
+ returned: on success
+ type: str
+ sample: eipalloc-51aa3a6c
+public_ip:
+ description: an elastic ip address
+ returned: on success
+ type: str
+ sample: 52.88.159.209
+'''
+
+try:
+ import botocore.exceptions
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+
+
+def associate_ip_and_device(ec2, module, address, private_ip_address, device_id, allow_reassociation, check_mode, is_instance=True):
+ if address_is_associated_with_device(ec2, module, address, device_id, is_instance):
+ return {'changed': False}
+
+ # If we're in check mode, nothing else to do
+ if not check_mode:
+ if is_instance:
+ try:
+ params = dict(
+ InstanceId=device_id,
+ AllowReassociation=allow_reassociation,
+ )
+ if private_ip_address:
+ params['PrivateIPAddress'] = private_ip_address
+ if address['Domain'] == 'vpc':
+ params['AllocationId'] = address['AllocationId']
+ else:
+ params['PublicIp'] = address['PublicIp']
+ res = ec2.associate_address(**params)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ msg = "Couldn't associate Elastic IP address with instance '{0}'".format(device_id)
+ module.fail_json_aws(e, msg=msg)
+ else:
+ params = dict(
+ NetworkInterfaceId=device_id,
+ AllocationId=address['AllocationId'],
+ AllowReassociation=allow_reassociation,
+ )
+
+ if private_ip_address:
+ params['PrivateIpAddress'] = private_ip_address
+
+ try:
+ res = ec2.associate_address(aws_retry=True, **params)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ msg = "Couldn't associate Elastic IP address with network interface '{0}'".format(device_id)
+ module.fail_json_aws(e, msg=msg)
+ if not res:
+ module.fail_json_aws(e, msg='Association failed.')
+
+ return {'changed': True}
+
+
+def disassociate_ip_and_device(ec2, module, address, device_id, check_mode, is_instance=True):
+ if not address_is_associated_with_device(ec2, module, address, device_id, is_instance):
+ return {'changed': False}
+
+ # If we're in check mode, nothing else to do
+ if not check_mode:
+ try:
+ if address['Domain'] == 'vpc':
+ res = ec2.disassociate_address(
+ AssociationId=address['AssociationId'], aws_retry=True
+ )
+ else:
+ res = ec2.disassociate_address(
+ PublicIp=address['PublicIp'], aws_retry=True
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Dissassociation of Elastic IP failed")
+
+ return {'changed': True}
+
+
+@AWSRetry.jittered_backoff()
+def find_address(ec2, module, public_ip, device_id, is_instance=True):
+ """ Find an existing Elastic IP address """
+ filters = []
+ kwargs = {}
+
+ if public_ip:
+ kwargs["PublicIps"] = [public_ip]
+ elif device_id:
+ if is_instance:
+ filters.append({"Name": 'instance-id', "Values": [device_id]})
+ else:
+ filters.append({'Name': 'network-interface-id', "Values": [device_id]})
+
+ if len(filters) > 0:
+ kwargs["Filters"] = filters
+ elif len(filters) == 0 and public_ip is None:
+ return None
+
+ try:
+ addresses = ec2.describe_addresses(**kwargs)
+ except is_boto3_error_code('InvalidAddress.NotFound') as e:
+ # If we're releasing and we can't find it, it's already gone...
+ if module.params.get('state') == 'absent':
+ module.exit_json(changed=False)
+ module.fail_json_aws(e, msg="Couldn't obtain list of existing Elastic IP addresses")
+
+ addresses = addresses["Addresses"]
+ if len(addresses) == 1:
+ return addresses[0]
+ elif len(addresses) > 1:
+ msg = "Found more than one address using args {0}".format(kwargs)
+ msg += "Addresses found: {0}".format(addresses)
+ module.fail_json_aws(botocore.exceptions.ClientError, msg=msg)
+
+
+def address_is_associated_with_device(ec2, module, address, device_id, is_instance=True):
+ """ Check if the elastic IP is currently associated with the device """
+ address = find_address(ec2, module, address["PublicIp"], device_id, is_instance)
+ if address:
+ if is_instance:
+ if "InstanceId" in address and address["InstanceId"] == device_id:
+ return address
+ else:
+ if "NetworkInterfaceId" in address and address["NetworkInterfaceId"] == device_id:
+ return address
+ return False
+
+
+def allocate_address(ec2, module, domain, reuse_existing_ip_allowed, check_mode, tag_dict=None, public_ipv4_pool=None):
+ """ Allocate a new elastic IP address (when needed) and return it """
+ if reuse_existing_ip_allowed:
+ filters = []
+ if not domain:
+ domain = 'standard'
+ filters.append({'Name': 'domain', "Values": [domain]})
+
+ if tag_dict is not None:
+ filters += ansible_dict_to_boto3_filter_list(tag_dict)
+
+ try:
+ all_addresses = ec2.describe_addresses(Filters=filters, aws_retry=True)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain list of existing Elastic IP addresses")
+
+ all_addresses = all_addresses["Addresses"]
+
+ if domain == 'vpc':
+ unassociated_addresses = [a for a in all_addresses
+ if not a.get('AssociationId', None)]
+ else:
+ unassociated_addresses = [a for a in all_addresses
+ if not a['InstanceId']]
+ if unassociated_addresses:
+ return unassociated_addresses[0], False
+
+ if public_ipv4_pool:
+ return allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool), True
+
+ try:
+ result = ec2.allocate_address(Domain=domain, aws_retry=True), True
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't allocate Elastic IP address")
+ return result
+
+
+def release_address(ec2, module, address, check_mode):
+ """ Release a previously allocated elastic IP address """
+
+ # If we're in check mode, nothing else to do
+ if not check_mode:
+ try:
+ result = ec2.release_address(AllocationId=address['AllocationId'], aws_retry=True)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't release Elastic IP address")
+
+ return {'changed': True}
+
+
+@AWSRetry.jittered_backoff()
+def describe_eni_with_backoff(ec2, module, device_id):
+ try:
+ return ec2.describe_network_interfaces(NetworkInterfaceIds=[device_id])
+ except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound') as e:
+ module.fail_json_aws(e, msg="Couldn't get list of network interfaces.")
+
+
+def find_device(ec2, module, device_id, is_instance=True):
+ """ Attempt to find the EC2 instance and return it """
+
+ if is_instance:
+ try:
+ paginator = ec2.get_paginator('describe_instances')
+ reservations = list(paginator.paginate(InstanceIds=[device_id]).search('Reservations[]'))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't get list of instances")
+
+ if len(reservations) == 1:
+ instances = reservations[0]['Instances']
+ if len(instances) == 1:
+ return instances[0]
+ else:
+ try:
+ interfaces = describe_eni_with_backoff(ec2, module, device_id)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't get list of network interfaces.")
+ if len(interfaces) == 1:
+ return interfaces[0]
+
+
+def ensure_present(ec2, module, domain, address, private_ip_address, device_id,
+ reuse_existing_ip_allowed, allow_reassociation, check_mode, is_instance=True):
+ changed = False
+
+ # Return the EIP object since we've been given a public IP
+ if not address:
+ if check_mode:
+ return {'changed': True}
+
+ address, changed = allocate_address(ec2, module, domain, reuse_existing_ip_allowed, check_mode)
+
+ if device_id:
+ # Allocate an IP for instance since no public_ip was provided
+ if is_instance:
+ instance = find_device(ec2, module, device_id)
+ if reuse_existing_ip_allowed:
+ if instance['VpcId'] and len(instance['VpcId']) > 0 and domain is None:
+ msg = "You must set 'in_vpc' to true to associate an instance with an existing ip in a vpc"
+ module.fail_json_aws(botocore.exceptions.ClientError, msg=msg)
+
+ # Associate address object (provided or allocated) with instance
+ assoc_result = associate_ip_and_device(
+ ec2, module, address, private_ip_address, device_id, allow_reassociation,
+ check_mode
+ )
+ else:
+ instance = find_device(ec2, module, device_id, is_instance=False)
+ # Associate address object (provided or allocated) with instance
+ assoc_result = associate_ip_and_device(
+ ec2, module, address, private_ip_address, device_id, allow_reassociation,
+ check_mode, is_instance=False
+ )
+
+ changed = changed or assoc_result['changed']
+
+ return {'changed': changed, 'public_ip': address['PublicIp'], 'allocation_id': address['AllocationId']}
+
+
+def ensure_absent(ec2, module, address, device_id, check_mode, is_instance=True):
+ if not address:
+ return {'changed': False}
+
+ # disassociating address from instance
+ if device_id:
+ if is_instance:
+ return disassociate_ip_and_device(
+ ec2, module, address, device_id, check_mode
+ )
+ else:
+ return disassociate_ip_and_device(
+ ec2, module, address, device_id, check_mode, is_instance=False
+ )
+ # releasing address
+ else:
+ return release_address(ec2, module, address, check_mode)
+
+
+def allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool):
+ # type: (EC2Connection, str, bool, str) -> Address
+ """ Overrides boto's allocate_address function to support BYOIP """
+ params = {}
+
+ if domain is not None:
+ params['Domain'] = domain
+
+ if public_ipv4_pool is not None:
+ params['PublicIpv4Pool'] = public_ipv4_pool
+
+ if check_mode:
+ params['DryRun'] = 'true'
+
+ try:
+ result = ec2.allocate_address(aws_retry=True, **params)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't allocate Elastic IP address")
+ return result
+
+
+def generate_tag_dict(module, tag_name, tag_value):
+ # type: (AnsibleAWSModule, str, str) -> Optional[Dict]
+ """ Generates a dictionary to be passed as a filter to Amazon """
+ if tag_name and not tag_value:
+ if tag_name.startswith('tag:'):
+ tag_name = tag_name.strip('tag:')
+ return {'tag-key': tag_name}
+
+ elif tag_name and tag_value:
+ if not tag_name.startswith('tag:'):
+ tag_name = 'tag:' + tag_name
+ return {tag_name: tag_value}
+
+ elif tag_value and not tag_name:
+ module.fail_json(msg="parameters are required together: ('tag_name', 'tag_value')")
+
+
+def main():
+ argument_spec = dict(
+ device_id=dict(required=False, aliases=['instance_id']),
+ public_ip=dict(required=False, aliases=['ip']),
+ state=dict(required=False, default='present',
+ choices=['present', 'absent']),
+ in_vpc=dict(required=False, type='bool', default=False),
+ reuse_existing_ip_allowed=dict(required=False, type='bool',
+ default=False),
+ release_on_disassociation=dict(required=False, type='bool', default=False),
+ allow_reassociation=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', removed_at_date='2022-06-01', removed_from_collection='community.aws'),
+ private_ip_address=dict(),
+ tag_name=dict(),
+ tag_value=dict(),
+ public_ipv4_pool=dict()
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_by={
+ 'private_ip_address': ['device_id'],
+ },
+ )
+
+ ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ device_id = module.params.get('device_id')
+ instance_id = module.params.get('instance_id')
+ public_ip = module.params.get('public_ip')
+ private_ip_address = module.params.get('private_ip_address')
+ state = module.params.get('state')
+ in_vpc = module.params.get('in_vpc')
+ domain = 'vpc' if in_vpc else None
+ reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed')
+ release_on_disassociation = module.params.get('release_on_disassociation')
+ allow_reassociation = module.params.get('allow_reassociation')
+ tag_name = module.params.get('tag_name')
+ tag_value = module.params.get('tag_value')
+ public_ipv4_pool = module.params.get('public_ipv4_pool')
+
+ if instance_id:
+ warnings = ["instance_id is no longer used, please use device_id going forward"]
+ is_instance = True
+ device_id = instance_id
+ else:
+ if device_id and device_id.startswith('i-'):
+ is_instance = True
+ elif device_id:
+ if device_id.startswith('eni-') and not in_vpc:
+ module.fail_json(msg="If you are specifying an ENI, in_vpc must be true")
+ is_instance = False
+
+ tag_dict = generate_tag_dict(module, tag_name, tag_value)
+
+ try:
+ if device_id:
+ address = find_address(ec2, module, public_ip, device_id, is_instance=is_instance)
+ else:
+ address = find_address(ec2, module, public_ip, None)
+
+ if state == 'present':
+ if device_id:
+ result = ensure_present(
+ ec2, module, domain, address, private_ip_address, device_id,
+ reuse_existing_ip_allowed, allow_reassociation,
+ module.check_mode, is_instance=is_instance
+ )
+ else:
+ if address:
+ changed = False
+ else:
+ address, changed = allocate_address(
+ ec2, module, domain, reuse_existing_ip_allowed,
+ module.check_mode, tag_dict, public_ipv4_pool
+ )
+ result = {
+ 'changed': changed,
+ 'public_ip': address['PublicIp'],
+ 'allocation_id': address['AllocationId']
+ }
+ else:
+ if device_id:
+ disassociated = ensure_absent(
+ ec2, module, address, device_id, module.check_mode, is_instance=is_instance
+ )
+
+ if release_on_disassociation and disassociated['changed']:
+ released = release_address(ec2, module, address, module.check_mode)
+ result = {
+ 'changed': True,
+ 'disassociated': disassociated,
+ 'released': released
+ }
+ else:
+ result = {
+ 'changed': disassociated['changed'],
+ 'disassociated': disassociated,
+ 'released': {'changed': False}
+ }
+ else:
+ released = release_address(ec2, module, address, module.check_mode)
+ result = {
+ 'changed': released['changed'],
+ 'disassociated': {'changed': False},
+ 'released': released
+ }
+
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(str(e))
+
+ if instance_id:
+ result['warnings'] = warnings
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_eip_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_eip_facts.py
new file mode 100644
index 00000000..553930db
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_eip_facts.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_eip_info
+version_added: 1.0.0
+short_description: List EC2 EIP details
+description:
+ - List details of EC2 Elastic IP addresses.
+ - This module was called C(ec2_eip_facts) before Ansible 2.9. The usage did not change.
+author: "Brad Macpherson (@iiibrad)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and filter
+ value. See U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-addresses.html#options)
+ for possible filters. Filter names and values are case sensitive.
+ required: false
+ default: {}
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details or the AWS region,
+# see the AWS Guide for details.
+
+- name: List all EIP addresses in the current region.
+ community.aws.ec2_eip_info:
+ register: regional_eip_addresses
+
+- name: List all EIP addresses for a VM.
+ community.aws.ec2_eip_info:
+ filters:
+ instance-id: i-123456789
+ register: my_vm_eips
+
+- ansible.builtin.debug:
+ msg: "{{ my_vm_eips.addresses | json_query(\"[?private_ip_address=='10.0.0.5']\") }}"
+
+- name: List all EIP addresses for several VMs.
+ community.aws.ec2_eip_info:
+ filters:
+ instance-id:
+ - i-123456789
+ - i-987654321
+ register: my_vms_eips
+
+- name: List all EIP addresses using the 'Name' tag as a filter.
+ community.aws.ec2_eip_info:
+ filters:
+ tag:Name: www.example.com
+ register: my_vms_eips
+
+- name: List all EIP addresses using the Allocation-id as a filter
+ community.aws.ec2_eip_info:
+ filters:
+ allocation-id: eipalloc-64de1b01
+ register: my_vms_eips
+
+# Set the variable eip_alloc to the value of the first allocation_id
+# and set the variable my_pub_ip to the value of the first public_ip
+- ansible.builtin.set_fact:
+ eip_alloc: my_vms_eips.addresses[0].allocation_id
+ my_pub_ip: my_vms_eips.addresses[0].public_ip
+
+'''
+
+
+RETURN = '''
+addresses:
+ description: Properties of all Elastic IP addresses matching the provided filters. Each element is a dict with all the information related to an EIP.
+ returned: on success
+ type: list
+ sample: [{
+ "allocation_id": "eipalloc-64de1b01",
+ "association_id": "eipassoc-0fe9ce90d6e983e97",
+ "domain": "vpc",
+ "instance_id": "i-01020cfeb25b0c84f",
+ "network_interface_id": "eni-02fdeadfd4beef9323b",
+ "network_interface_owner_id": "0123456789",
+ "private_ip_address": "10.0.0.1",
+ "public_ip": "54.81.104.1",
+ "tags": {
+ "Name": "test-vm-54.81.104.1"
+ }
+ }]
+
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
+ boto3_tag_list_to_ansible_dict,
+ camel_dict_to_snake_dict,
+ )
+try:
+ from botocore.exceptions import (BotoCoreError, ClientError)
+except ImportError:
+ pass # caught by imported AnsibleAWSModule
+
+
+def get_eips_details(module):
+ connection = module.client('ec2')
+ filters = module.params.get("filters")
+ try:
+ response = connection.describe_addresses(
+ Filters=ansible_dict_to_boto3_filter_list(filters)
+ )
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(
+ e,
+ msg="Error retrieving EIPs")
+
+ addresses = camel_dict_to_snake_dict(response)['addresses']
+ for address in addresses:
+ if 'tags' in address:
+ address['tags'] = boto3_tag_list_to_ansible_dict(address['tags'])
+ return addresses
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec=dict(
+ filters=dict(type='dict', default={})
+ ),
+ supports_check_mode=True
+ )
+ if module._module._name == 'ec2_eip_facts':
+ module._module.deprecate("The 'ec2_eip_facts' module has been renamed to 'ec2_eip_info'", date='2021-12-01', collection_name='community.aws')
+
+ module.exit_json(changed=False, addresses=get_eips_details(module))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_eip_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_eip_info.py
new file mode 100644
index 00000000..553930db
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_eip_info.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_eip_info
+version_added: 1.0.0
+short_description: List EC2 EIP details
+description:
+ - List details of EC2 Elastic IP addresses.
+ - This module was called C(ec2_eip_facts) before Ansible 2.9. The usage did not change.
+author: "Brad Macpherson (@iiibrad)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and filter
+ value. See U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-addresses.html#options)
+ for possible filters. Filter names and values are case sensitive.
+ required: false
+ default: {}
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details or the AWS region,
+# see the AWS Guide for details.
+
+- name: List all EIP addresses in the current region.
+ community.aws.ec2_eip_info:
+ register: regional_eip_addresses
+
+- name: List all EIP addresses for a VM.
+ community.aws.ec2_eip_info:
+ filters:
+ instance-id: i-123456789
+ register: my_vm_eips
+
+- ansible.builtin.debug:
+ msg: "{{ my_vm_eips.addresses | json_query(\"[?private_ip_address=='10.0.0.5']\") }}"
+
+- name: List all EIP addresses for several VMs.
+ community.aws.ec2_eip_info:
+ filters:
+ instance-id:
+ - i-123456789
+ - i-987654321
+ register: my_vms_eips
+
+- name: List all EIP addresses using the 'Name' tag as a filter.
+ community.aws.ec2_eip_info:
+ filters:
+ tag:Name: www.example.com
+ register: my_vms_eips
+
+- name: List all EIP addresses using the Allocation-id as a filter
+ community.aws.ec2_eip_info:
+ filters:
+ allocation-id: eipalloc-64de1b01
+ register: my_vms_eips
+
+# Set the variable eip_alloc to the value of the first allocation_id
+# and set the variable my_pub_ip to the value of the first public_ip
+- ansible.builtin.set_fact:
+ eip_alloc: my_vms_eips.addresses[0].allocation_id
+ my_pub_ip: my_vms_eips.addresses[0].public_ip
+
+'''
+
+
+RETURN = '''
+addresses:
+ description: Properties of all Elastic IP addresses matching the provided filters. Each element is a dict with all the information related to an EIP.
+ returned: on success
+ type: list
+ sample: [{
+ "allocation_id": "eipalloc-64de1b01",
+ "association_id": "eipassoc-0fe9ce90d6e983e97",
+ "domain": "vpc",
+ "instance_id": "i-01020cfeb25b0c84f",
+ "network_interface_id": "eni-02fdeadfd4beef9323b",
+ "network_interface_owner_id": "0123456789",
+ "private_ip_address": "10.0.0.1",
+ "public_ip": "54.81.104.1",
+ "tags": {
+ "Name": "test-vm-54.81.104.1"
+ }
+ }]
+
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
+ boto3_tag_list_to_ansible_dict,
+ camel_dict_to_snake_dict,
+ )
+try:
+ from botocore.exceptions import (BotoCoreError, ClientError)
+except ImportError:
+ pass # caught by imported AnsibleAWSModule
+
+
+def get_eips_details(module):
+ connection = module.client('ec2')
+ filters = module.params.get("filters")
+ try:
+ response = connection.describe_addresses(
+ Filters=ansible_dict_to_boto3_filter_list(filters)
+ )
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(
+ e,
+ msg="Error retrieving EIPs")
+
+ addresses = camel_dict_to_snake_dict(response)['addresses']
+ for address in addresses:
+ if 'tags' in address:
+ address['tags'] = boto3_tag_list_to_ansible_dict(address['tags'])
+ return addresses
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec=dict(
+ filters=dict(type='dict', default={})
+ ),
+ supports_check_mode=True
+ )
+ if module._module._name == 'ec2_eip_facts':
+ module._module.deprecate("The 'ec2_eip_facts' module has been renamed to 'ec2_eip_info'", date='2021-12-01', collection_name='community.aws')
+
+ module.exit_json(changed=False, addresses=get_eips_details(module))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_elb.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_elb.py
new file mode 100644
index 00000000..d9a6231f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_elb.py
@@ -0,0 +1,367 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_elb
+version_added: 1.0.0
+short_description: De-registers or registers instances from EC2 ELBs
+description:
+ - This module de-registers or registers an AWS EC2 instance from the ELBs
+ that it belongs to.
+ - Returns fact "ec2_elbs" which is a list of elbs attached to the instance
+ if state=absent is passed as an argument.
+ - Will be marked changed when called only if there are ELBs found to operate on.
+author: "John Jarvis (@jarv)"
+options:
+ state:
+ description:
+ - register or deregister the instance
+ required: true
+ choices: ['present', 'absent']
+ type: str
+ instance_id:
+ description:
+ - EC2 Instance ID
+ required: true
+ type: str
+ ec2_elbs:
+ description:
+ - List of ELB names, required for registration.
+ - The ec2_elbs fact should be used if there was a previous de-register.
+ type: list
+ elements: str
+ enable_availability_zone:
+ description:
+ - Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already
+ been enabled. If set to no, the task will fail if the availability zone is not enabled on the ELB.
+ type: bool
+ default: 'yes'
+ wait:
+ description:
+ - Wait for instance registration or deregistration to complete successfully before returning.
+ type: bool
+ default: 'yes'
+ wait_timeout:
+ description:
+ - Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs.
+ If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no.
+ default: 0
+ type: int
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r"""
+# basic pre_task and post_task example
+pre_tasks:
+ - name: Instance De-register
+ community.aws.ec2_elb:
+ instance_id: "{{ ansible_ec2_instance_id }}"
+ state: absent
+roles:
+ - myrole
+post_tasks:
+ - name: Instance Register
+ community.aws.ec2_elb:
+ instance_id: "{{ ansible_ec2_instance_id }}"
+ ec2_elbs: "{{ item }}"
+ state: present
+ loop: "{{ ec2_elbs }}"
+"""
+
+import time
+
+try:
+ import boto
+ import boto.ec2
+ import boto.ec2.autoscale
+ import boto.ec2.elb
+ from boto.regioninfo import RegionInfo
+except ImportError:
+ pass # Handled by HAS_BOTO
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+
+
+class ElbManager:
+ """Handles EC2 instance ELB registration and de-registration"""
+
+ def __init__(self, module, instance_id=None, ec2_elbs=None,
+ region=None, **aws_connect_params):
+ self.module = module
+ self.instance_id = instance_id
+ self.region = region
+ self.aws_connect_params = aws_connect_params
+ self.lbs = self._get_instance_lbs(ec2_elbs)
+ self.changed = False
+
+ def deregister(self, wait, timeout):
+ """De-register the instance from all ELBs and wait for the ELB
+ to report it out-of-service"""
+
+ for lb in self.lbs:
+ initial_state = self._get_instance_health(lb)
+ if initial_state is None:
+ # Instance isn't registered with this load
+ # balancer. Ignore it and try the next one.
+ continue
+
+ # The instance is not associated with any load balancer so nothing to do
+ if not self._get_instance_lbs():
+ return
+
+ lb.deregister_instances([self.instance_id])
+
+ # The ELB is changing state in some way. Either an instance that's
+ # InService is moving to OutOfService, or an instance that's
+ # already OutOfService is being deregistered.
+ self.changed = True
+
+ if wait:
+ self._await_elb_instance_state(lb, 'OutOfService', initial_state, timeout)
+
+ def register(self, wait, enable_availability_zone, timeout):
+ """Register the instance for all ELBs and wait for the ELB
+ to report the instance in-service"""
+ for lb in self.lbs:
+ initial_state = self._get_instance_health(lb)
+
+ if enable_availability_zone:
+ self._enable_availailability_zone(lb)
+
+ lb.register_instances([self.instance_id])
+
+ if wait:
+ self._await_elb_instance_state(lb, 'InService', initial_state, timeout)
+ else:
+ # We cannot assume no change was made if we don't wait
+ # to find out
+ self.changed = True
+
+ def exists(self, lbtest):
+ """ Verify that the named ELB actually exists """
+
+ found = False
+ for lb in self.lbs:
+ if lb.name == lbtest:
+ found = True
+ break
+ return found
+
+ def _enable_availailability_zone(self, lb):
+ """Enable the current instance's availability zone in the provided lb.
+ Returns True if the zone was enabled or False if no change was made.
+ lb: load balancer"""
+ instance = self._get_instance()
+ if instance.placement in lb.availability_zones:
+ return False
+
+ lb.enable_zones(zones=instance.placement)
+
+ # If successful, the new zone will have been added to
+ # lb.availability_zones
+ return instance.placement in lb.availability_zones
+
+ def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout):
+ """Wait for an ELB to change state
+ lb: load balancer
+ awaited_state : state to poll for (string)"""
+
+ wait_timeout = time.time() + timeout
+ while True:
+ instance_state = self._get_instance_health(lb)
+
+ if not instance_state:
+ msg = ("The instance %s could not be put in service on %s."
+ " Reason: Invalid Instance")
+ self.module.fail_json(msg=msg % (self.instance_id, lb))
+
+ if instance_state.state == awaited_state:
+ # Check the current state against the initial state, and only set
+ # changed if they are different.
+ if (initial_state is None) or (instance_state.state != initial_state.state):
+ self.changed = True
+ break
+ elif self._is_instance_state_pending(instance_state):
+ # If it's pending, we'll skip further checks and continue waiting
+ pass
+ elif (awaited_state == 'InService'
+ and instance_state.reason_code == "Instance"
+ and time.time() >= wait_timeout):
+ # If the reason_code for the instance being out of service is
+ # "Instance" this indicates a failure state, e.g. the instance
+ # has failed a health check or the ELB does not have the
+ # instance's availability zone enabled. The exact reason why is
+ # described in InstantState.description.
+ msg = ("The instance %s could not be put in service on %s."
+ " Reason: %s")
+ self.module.fail_json(msg=msg % (self.instance_id,
+ lb,
+ instance_state.description))
+ time.sleep(1)
+
+ def _is_instance_state_pending(self, instance_state):
+ """
+ Determines whether the instance_state is "pending", meaning there is
+ an operation under way to bring it in service.
+ """
+ # This is messy, because AWS provides no way to distinguish between
+ # an instance that is is OutOfService because it's pending vs. OutOfService
+ # because it's failing health checks. So we're forced to analyze the
+ # description, which is likely to be brittle.
+ return (instance_state and 'pending' in instance_state.description)
+
+ def _get_instance_health(self, lb):
+ """
+ Check instance health, should return status object or None under
+ certain error conditions.
+ """
+ try:
+ status = lb.get_instance_health([self.instance_id])[0]
+ except boto.exception.BotoServerError as e:
+ if e.error_code == 'InvalidInstance':
+ return None
+ else:
+ raise
+ return status
+
+ def _get_instance_lbs(self, ec2_elbs=None):
+ """Returns a list of ELBs attached to self.instance_id
+ ec2_elbs: an optional list of elb names that will be used
+ for elb lookup instead of returning what elbs
+ are attached to self.instance_id"""
+
+ if not ec2_elbs:
+ ec2_elbs = self._get_auto_scaling_group_lbs()
+
+ try:
+ elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ self.module.fail_json(msg=str(e))
+
+ elbs = []
+ marker = None
+ while True:
+ try:
+ newelbs = elb.get_all_load_balancers(marker=marker)
+ marker = newelbs.next_marker
+ elbs.extend(newelbs)
+ if not marker:
+ break
+ except TypeError:
+ # Older version of boto do not allow for params
+ elbs = elb.get_all_load_balancers()
+ break
+
+ if ec2_elbs:
+ lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs)
+ else:
+ lbs = []
+ for lb in elbs:
+ for info in lb.instances:
+ if self.instance_id == info.id:
+ lbs.append(lb)
+ return lbs
+
+ def _get_auto_scaling_group_lbs(self):
+ """Returns a list of ELBs associated with self.instance_id
+ indirectly through its auto scaling group membership"""
+
+ try:
+ asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ self.module.fail_json(msg=str(e))
+
+ asg_instances = asg.get_all_autoscaling_instances([self.instance_id])
+ if len(asg_instances) > 1:
+ self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.")
+
+ if not asg_instances:
+ asg_elbs = []
+ else:
+ asg_name = asg_instances[0].group_name
+
+ asgs = asg.get_all_groups([asg_name])
+ if len(asg_instances) != 1:
+ self.module.fail_json(msg="Illegal state, expected one auto scaling group.")
+
+ asg_elbs = asgs[0].load_balancers
+
+ return asg_elbs
+
+ def _get_instance(self):
+ """Returns a boto.ec2.InstanceObject for self.instance_id"""
+ try:
+ ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ self.module.fail_json(msg=str(e))
+ return ec2.get_only_instances(instance_ids=[self.instance_id])[0]
+
+
+def main():
+ argument_spec = dict(
+ state={'required': True, 'choices': ['present', 'absent']},
+ instance_id={'required': True},
+ ec2_elbs={'default': None, 'required': False, 'type': 'list', 'elements': 'str'},
+ enable_availability_zone={'default': True, 'required': False, 'type': 'bool'},
+ wait={'required': False, 'default': True, 'type': 'bool'},
+ wait_timeout={'required': False, 'default': 0, 'type': 'int'},
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ check_boto3=False,
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if not region:
+ module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
+
+ ec2_elbs = module.params['ec2_elbs']
+ wait = module.params['wait']
+ enable_availability_zone = module.params['enable_availability_zone']
+ timeout = module.params['wait_timeout']
+
+ if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
+ module.fail_json(msg="ELBs are required for registration")
+
+ instance_id = module.params['instance_id']
+ elb_man = ElbManager(module, instance_id, ec2_elbs, region=region, **aws_connect_params)
+
+ if ec2_elbs is not None:
+ for elb in ec2_elbs:
+ if not elb_man.exists(elb):
+ msg = "ELB %s does not exist" % elb
+ module.fail_json(msg=msg)
+
+ if not module.check_mode:
+ if module.params['state'] == 'present':
+ elb_man.register(wait, enable_availability_zone, timeout)
+ elif module.params['state'] == 'absent':
+ elb_man.deregister(wait, timeout)
+
+ ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]}
+ ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts)
+
+ module.exit_json(**ec2_facts_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_elb_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_elb_facts.py
new file mode 100644
index 00000000..b18e502d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_elb_facts.py
@@ -0,0 +1,253 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_elb_info
+version_added: 1.0.0
+short_description: Gather information about EC2 Elastic Load Balancers in AWS
+description:
+ - Gather information about EC2 Elastic Load Balancers in AWS
+ - This module was called C(ec2_elb_facts) before Ansible 2.9. The usage did not change.
+author:
+ - "Michael Schultz (@mjschultz)"
+ - "Fernando Jose Pando (@nand0p)"
+options:
+ names:
+ description:
+ - List of ELB names to gather information about. Pass this option to gather information about a set of ELBs, otherwise, all ELBs are returned.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+# Output format tries to match amazon.aws.ec2_elb_lb module input parameters
+
+- name: Gather information about all ELBs
+ community.aws.ec2_elb_info:
+ register: elb_info
+- ansible.builtin.debug:
+ msg: "{{ item.dns_name }}"
+ loop: "{{ elb_info.elbs }}"
+
+- name: Gather information about a particular ELB
+ community.aws.ec2_elb_info:
+ names: frontend-prod-elb
+ register: elb_info
+
+- ansible.builtin.debug:
+ msg: "{{ elb_info.elbs.0.dns_name }}"
+
+- name: Gather information about a set of ELBs
+ community.aws.ec2_elb_info:
+ names:
+ - frontend-prod-elb
+ - backend-prod-elb
+ register: elb_info
+
+- ansible.builtin.debug:
+ msg: "{{ item.dns_name }}"
+ loop: "{{ elb_info.elbs }}"
+
+'''
+
+import traceback
+
+try:
+ import boto.ec2.elb
+ from boto.ec2.tag import Tag
+ from boto.exception import BotoServerError
+except ImportError:
+ pass # Handled by ec2.HAS_BOTO
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO
+
+
+class ElbInformation(object):
+ """Handles ELB information."""
+
+ def __init__(self, module, names, region, **aws_connect_params):
+
+ self.module = module
+ self.names = names
+ self.region = region
+ self.aws_connect_params = aws_connect_params
+ self.connection = self._get_elb_connection()
+
+ def _get_tags(self, elbname):
+ params = {'LoadBalancerNames.member.1': elbname}
+ elb_tags = self.connection.get_list('DescribeTags', params, [('member', Tag)])
+ return dict((tag.Key, tag.Value) for tag in elb_tags if hasattr(tag, 'Key'))
+
+ @AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+ def _get_elb_connection(self):
+ return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
+
+ def _get_elb_listeners(self, listeners):
+ listener_list = []
+
+ for listener in listeners:
+ listener_dict = {
+ 'load_balancer_port': listener[0],
+ 'instance_port': listener[1],
+ 'protocol': listener[2],
+ 'instance_protocol': listener[3]
+ }
+
+ try:
+ ssl_certificate_id = listener[4]
+ except IndexError:
+ pass
+ else:
+ if ssl_certificate_id:
+ listener_dict['ssl_certificate_id'] = ssl_certificate_id
+
+ listener_list.append(listener_dict)
+
+ return listener_list
+
+ def _get_health_check(self, health_check):
+ protocol, port_path = health_check.target.split(':')
+ try:
+ port, path = port_path.split('/', 1)
+ path = '/{0}'.format(path)
+ except ValueError:
+ port = port_path
+ path = None
+
+ health_check_dict = {
+ 'ping_protocol': protocol.lower(),
+ 'ping_port': int(port),
+ 'response_timeout': health_check.timeout,
+ 'interval': health_check.interval,
+ 'unhealthy_threshold': health_check.unhealthy_threshold,
+ 'healthy_threshold': health_check.healthy_threshold,
+ }
+
+ if path:
+ health_check_dict['ping_path'] = path
+ return health_check_dict
+
+ @AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+ def _get_elb_info(self, elb):
+ elb_info = {
+ 'name': elb.name,
+ 'zones': elb.availability_zones,
+ 'dns_name': elb.dns_name,
+ 'canonical_hosted_zone_name': elb.canonical_hosted_zone_name,
+ 'canonical_hosted_zone_name_id': elb.canonical_hosted_zone_name_id,
+ 'hosted_zone_name': elb.canonical_hosted_zone_name,
+ 'hosted_zone_id': elb.canonical_hosted_zone_name_id,
+ 'instances': [instance.id for instance in elb.instances],
+ 'listeners': self._get_elb_listeners(elb.listeners),
+ 'scheme': elb.scheme,
+ 'security_groups': elb.security_groups,
+ 'health_check': self._get_health_check(elb.health_check),
+ 'subnets': elb.subnets,
+ 'instances_inservice': [],
+ 'instances_inservice_count': 0,
+ 'instances_outofservice': [],
+ 'instances_outofservice_count': 0,
+ 'instances_inservice_percent': 0.0,
+ 'tags': self._get_tags(elb.name)
+ }
+
+ if elb.vpc_id:
+ elb_info['vpc_id'] = elb.vpc_id
+
+ if elb.instances:
+ instance_health = self.connection.describe_instance_health(elb.name)
+ elb_info['instances_inservice'] = [inst.instance_id for inst in instance_health if inst.state == 'InService']
+ elb_info['instances_inservice_count'] = len(elb_info['instances_inservice'])
+ elb_info['instances_outofservice'] = [inst.instance_id for inst in instance_health if inst.state == 'OutOfService']
+ elb_info['instances_outofservice_count'] = len(elb_info['instances_outofservice'])
+ try:
+ elb_info['instances_inservice_percent'] = (
+ float(elb_info['instances_inservice_count']) /
+ float(elb_info['instances_inservice_count'] + elb_info['instances_outofservice_count'])
+ ) * 100.
+ except ZeroDivisionError:
+ elb_info['instances_inservice_percent'] = 0.
+ return elb_info
+
+ def list_elbs(self):
+ elb_array, token = [], None
+ get_elb_with_backoff = AWSRetry.backoff(tries=5, delay=5, backoff=2.0)(self.connection.get_all_load_balancers)
+ while True:
+ all_elbs = get_elb_with_backoff(marker=token)
+ token = all_elbs.next_marker
+
+ if all_elbs:
+ if self.names:
+ for existing_lb in all_elbs:
+ if existing_lb.name in self.names:
+ elb_array.append(existing_lb)
+ else:
+ elb_array.extend(all_elbs)
+ else:
+ break
+
+ if token is None:
+ break
+
+ return list(map(self._get_elb_info, elb_array))
+
+
+def main():
+ argument_spec = dict(
+ names={'default': [], 'type': 'list', 'elements': 'str'}
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+ if module._name == 'ec2_elb_facts':
+ module.deprecate("The 'ec2_elb_facts' module has been renamed to 'ec2_elb_info'", date='2021-12-01', collection_name='community.aws')
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ try:
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+ if not region:
+ module.fail_json(msg="region must be specified")
+
+ names = module.params['names']
+ elb_information = ElbInformation(
+ module, names, region, **aws_connect_params)
+
+ ec2_info_result = dict(changed=False,
+ elbs=elb_information.list_elbs())
+
+ except BotoServerError as err:
+ module.fail_json(msg="{0}: {1}".format(err.error_code, err.error_message),
+ exception=traceback.format_exc())
+
+ module.exit_json(**ec2_info_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_elb_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_elb_info.py
new file mode 100644
index 00000000..b18e502d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_elb_info.py
@@ -0,0 +1,253 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_elb_info
+version_added: 1.0.0
+short_description: Gather information about EC2 Elastic Load Balancers in AWS
+description:
+ - Gather information about EC2 Elastic Load Balancers in AWS
+ - This module was called C(ec2_elb_facts) before Ansible 2.9. The usage did not change.
+author:
+ - "Michael Schultz (@mjschultz)"
+ - "Fernando Jose Pando (@nand0p)"
+options:
+ names:
+ description:
+ - List of ELB names to gather information about. Pass this option to gather information about a set of ELBs, otherwise, all ELBs are returned.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+# Output format tries to match amazon.aws.ec2_elb_lb module input parameters
+
+- name: Gather information about all ELBs
+ community.aws.ec2_elb_info:
+ register: elb_info
+- ansible.builtin.debug:
+ msg: "{{ item.dns_name }}"
+ loop: "{{ elb_info.elbs }}"
+
+- name: Gather information about a particular ELB
+ community.aws.ec2_elb_info:
+ names: frontend-prod-elb
+ register: elb_info
+
+- ansible.builtin.debug:
+ msg: "{{ elb_info.elbs.0.dns_name }}"
+
+- name: Gather information about a set of ELBs
+ community.aws.ec2_elb_info:
+ names:
+ - frontend-prod-elb
+ - backend-prod-elb
+ register: elb_info
+
+- ansible.builtin.debug:
+ msg: "{{ item.dns_name }}"
+ loop: "{{ elb_info.elbs }}"
+
+'''
+
+import traceback
+
+try:
+ import boto.ec2.elb
+ from boto.ec2.tag import Tag
+ from boto.exception import BotoServerError
+except ImportError:
+ pass # Handled by ec2.HAS_BOTO
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO
+
+
+class ElbInformation(object):
+ """Handles ELB information."""
+
+ def __init__(self, module, names, region, **aws_connect_params):
+
+ self.module = module
+ self.names = names
+ self.region = region
+ self.aws_connect_params = aws_connect_params
+ self.connection = self._get_elb_connection()
+
+ def _get_tags(self, elbname):
+ params = {'LoadBalancerNames.member.1': elbname}
+ elb_tags = self.connection.get_list('DescribeTags', params, [('member', Tag)])
+ return dict((tag.Key, tag.Value) for tag in elb_tags if hasattr(tag, 'Key'))
+
+ @AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+ def _get_elb_connection(self):
+ return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
+
+ def _get_elb_listeners(self, listeners):
+ listener_list = []
+
+ for listener in listeners:
+ listener_dict = {
+ 'load_balancer_port': listener[0],
+ 'instance_port': listener[1],
+ 'protocol': listener[2],
+ 'instance_protocol': listener[3]
+ }
+
+ try:
+ ssl_certificate_id = listener[4]
+ except IndexError:
+ pass
+ else:
+ if ssl_certificate_id:
+ listener_dict['ssl_certificate_id'] = ssl_certificate_id
+
+ listener_list.append(listener_dict)
+
+ return listener_list
+
+ def _get_health_check(self, health_check):
+ protocol, port_path = health_check.target.split(':')
+ try:
+ port, path = port_path.split('/', 1)
+ path = '/{0}'.format(path)
+ except ValueError:
+ port = port_path
+ path = None
+
+ health_check_dict = {
+ 'ping_protocol': protocol.lower(),
+ 'ping_port': int(port),
+ 'response_timeout': health_check.timeout,
+ 'interval': health_check.interval,
+ 'unhealthy_threshold': health_check.unhealthy_threshold,
+ 'healthy_threshold': health_check.healthy_threshold,
+ }
+
+ if path:
+ health_check_dict['ping_path'] = path
+ return health_check_dict
+
+ @AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+ def _get_elb_info(self, elb):
+ elb_info = {
+ 'name': elb.name,
+ 'zones': elb.availability_zones,
+ 'dns_name': elb.dns_name,
+ 'canonical_hosted_zone_name': elb.canonical_hosted_zone_name,
+ 'canonical_hosted_zone_name_id': elb.canonical_hosted_zone_name_id,
+ 'hosted_zone_name': elb.canonical_hosted_zone_name,
+ 'hosted_zone_id': elb.canonical_hosted_zone_name_id,
+ 'instances': [instance.id for instance in elb.instances],
+ 'listeners': self._get_elb_listeners(elb.listeners),
+ 'scheme': elb.scheme,
+ 'security_groups': elb.security_groups,
+ 'health_check': self._get_health_check(elb.health_check),
+ 'subnets': elb.subnets,
+ 'instances_inservice': [],
+ 'instances_inservice_count': 0,
+ 'instances_outofservice': [],
+ 'instances_outofservice_count': 0,
+ 'instances_inservice_percent': 0.0,
+ 'tags': self._get_tags(elb.name)
+ }
+
+ if elb.vpc_id:
+ elb_info['vpc_id'] = elb.vpc_id
+
+ if elb.instances:
+ instance_health = self.connection.describe_instance_health(elb.name)
+ elb_info['instances_inservice'] = [inst.instance_id for inst in instance_health if inst.state == 'InService']
+ elb_info['instances_inservice_count'] = len(elb_info['instances_inservice'])
+ elb_info['instances_outofservice'] = [inst.instance_id for inst in instance_health if inst.state == 'OutOfService']
+ elb_info['instances_outofservice_count'] = len(elb_info['instances_outofservice'])
+ try:
+ elb_info['instances_inservice_percent'] = (
+ float(elb_info['instances_inservice_count']) /
+ float(elb_info['instances_inservice_count'] + elb_info['instances_outofservice_count'])
+ ) * 100.
+ except ZeroDivisionError:
+ elb_info['instances_inservice_percent'] = 0.
+ return elb_info
+
+ def list_elbs(self):
+ elb_array, token = [], None
+ get_elb_with_backoff = AWSRetry.backoff(tries=5, delay=5, backoff=2.0)(self.connection.get_all_load_balancers)
+ while True:
+ all_elbs = get_elb_with_backoff(marker=token)
+ token = all_elbs.next_marker
+
+ if all_elbs:
+ if self.names:
+ for existing_lb in all_elbs:
+ if existing_lb.name in self.names:
+ elb_array.append(existing_lb)
+ else:
+ elb_array.extend(all_elbs)
+ else:
+ break
+
+ if token is None:
+ break
+
+ return list(map(self._get_elb_info, elb_array))
+
+
+def main():
+ argument_spec = dict(
+ names={'default': [], 'type': 'list', 'elements': 'str'}
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+ if module._name == 'ec2_elb_facts':
+ module.deprecate("The 'ec2_elb_facts' module has been renamed to 'ec2_elb_info'", date='2021-12-01', collection_name='community.aws')
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ try:
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+ if not region:
+ module.fail_json(msg="region must be specified")
+
+ names = module.params['names']
+ elb_information = ElbInformation(
+ module, names, region, **aws_connect_params)
+
+ ec2_info_result = dict(changed=False,
+ elbs=elb_information.list_elbs())
+
+ except BotoServerError as err:
+ module.fail_json(msg="{0}: {1}".format(err.error_code, err.error_message),
+ exception=traceback.format_exc())
+
+ module.exit_json(**ec2_info_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_instance.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_instance.py
new file mode 100644
index 00000000..aba7ac26
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_instance.py
@@ -0,0 +1,1840 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_instance
+version_added: 1.0.0
+short_description: Create & manage EC2 instances
+description:
+ - Create and manage AWS EC2 instances.
+ - >
+ Note: This module does not support creating
+ L(EC2 Spot instances,https://aws.amazon.com/ec2/spot/). The M(amazon.aws.ec2) module
+ can create and manage spot instances.
+author:
+ - Ryan Scott Brown (@ryansb)
+requirements: [ "boto3", "botocore" ]
+options:
+ instance_ids:
+ description:
+ - If you specify one or more instance IDs, only instances that have the specified IDs are returned.
+ type: list
+ elements: str
+ state:
+ description:
+ - Goal state for the instances.
+ choices: [present, terminated, running, started, stopped, restarted, rebooted, absent]
+ default: present
+ type: str
+ wait:
+ description:
+ - Whether or not to wait for the desired state (use wait_timeout to customize this).
+ default: true
+ type: bool
+ wait_timeout:
+ description:
+ - How long to wait (in seconds) for the instance to finish booting/terminating.
+ default: 600
+ type: int
+ instance_type:
+ description:
+ - Instance type to use for the instance, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
+ Only required when instance is not already present.
+ default: t2.micro
+ type: str
+ user_data:
+ description:
+ - Opaque blob of data which is made available to the ec2 instance
+ type: str
+ tower_callback:
+ description:
+ - Preconfigured user-data to enable an instance to perform a Tower callback (Linux only).
+ - Mutually exclusive with I(user_data).
+ - For Windows instances, to enable remote access via Ansible set I(tower_callback.windows) to true, and optionally set an admin password.
+ - If using 'windows' and 'set_password', callback to Tower will not be performed but the instance will be ready to receive winrm connections from Ansible.
+ type: dict
+ suboptions:
+ tower_address:
+ description:
+ - IP address or DNS name of Tower server. Must be accessible via this address from the VPC that this instance will be launched in.
+ type: str
+ job_template_id:
+ description:
+ - Either the integer ID of the Tower Job Template, or the name (name supported only for Tower 3.2+).
+ type: str
+ host_config_key:
+ description:
+ - Host configuration secret key generated by the Tower job template.
+ type: str
+ tags:
+ description:
+ - A hash/dictionary of tags to add to the new instance or to add/remove from an existing one.
+ type: dict
+ purge_tags:
+ description:
+ - Delete any tags not specified in the task that are on the instance.
+ This means you have to specify all the desired tags on each task affecting an instance.
+ default: false
+ type: bool
+ image:
+ description:
+ - An image to use for the instance. The M(amazon.aws.ec2_ami_info) module may be used to retrieve images.
+ One of I(image) or I(image_id) are required when instance is not already present.
+ type: dict
+ suboptions:
+ id:
+ description:
+ - The AMI ID.
+ type: str
+ ramdisk:
+ description:
+ - Overrides the AMI's default ramdisk ID.
+ type: str
+ kernel:
+ description:
+ - a string AKI to override the AMI kernel.
+ image_id:
+ description:
+ - I(ami) ID to use for the instance. One of I(image) or I(image_id) are required when instance is not already present.
+ - This is an alias for I(image.id).
+ type: str
+ security_groups:
+ description:
+ - A list of security group IDs or names (strings). Mutually exclusive with I(security_group).
+ type: list
+ elements: str
+ security_group:
+ description:
+ - A security group ID or name. Mutually exclusive with I(security_groups).
+ type: str
+ name:
+ description:
+ - The Name tag for the instance.
+ type: str
+ vpc_subnet_id:
+ description:
+ - The subnet ID in which to launch the instance (VPC)
+ If none is provided, M(community.aws.ec2_instance) will chose the default zone of the default VPC.
+ aliases: ['subnet_id']
+ type: str
+ network:
+ description:
+ - Either a dictionary containing the key 'interfaces' corresponding to a list of network interface IDs or
+ containing specifications for a single network interface.
+ - Use the M(amazon.aws.ec2_eni) module to create ENIs with special settings.
+ type: dict
+ suboptions:
+ interfaces:
+ description:
+ - a list of ENI IDs (strings) or a list of objects containing the key I(id).
+ type: list
+ assign_public_ip:
+ description:
+ - when true assigns a public IP address to the interface
+ type: bool
+ private_ip_address:
+ description:
+ - an IPv4 address to assign to the interface
+ type: str
+ ipv6_addresses:
+ description:
+ - a list of IPv6 addresses to assign to the network interface
+ type: list
+ source_dest_check:
+ description:
+ - controls whether source/destination checking is enabled on the interface
+ type: bool
+ description:
+ description:
+ - a description for the network interface
+ type: str
+ private_ip_addresses:
+ description:
+ - a list of IPv4 addresses to assign to the network interface
+ type: list
+ subnet_id:
+ description:
+ - the subnet to connect the network interface to
+ type: str
+ delete_on_termination:
+ description:
+ - Delete the interface when the instance it is attached to is
+ terminated.
+ type: bool
+ device_index:
+ description:
+ - The index of the interface to modify
+ type: int
+ groups:
+ description:
+ - a list of security group IDs to attach to the interface
+ type: list
+ volumes:
+ description:
+ - A list of block device mappings, by default this will always use the AMI root device so the volumes option is primarily for adding more storage.
+ - A mapping contains the (optional) keys device_name, virtual_name, ebs.volume_type, ebs.volume_size, ebs.kms_key_id,
+ ebs.iops, and ebs.delete_on_termination.
+ - For more information about each parameter, see U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html).
+ type: list
+ elements: dict
+ launch_template:
+ description:
+ - The EC2 launch template to base instance configuration on.
+ type: dict
+ suboptions:
+ id:
+ description:
+ - the ID of the launch template (optional if name is specified).
+ type: str
+ name:
+ description:
+ - the pretty name of the launch template (optional if id is specified).
+ type: str
+ version:
+ description:
+ - the specific version of the launch template to use. If unspecified, the template default is chosen.
+ key_name:
+ description:
+ - Name of the SSH access key to assign to the instance - must exist in the region the instance is created.
+ type: str
+ availability_zone:
+ description:
+ - Specify an availability zone to use the default subnet it. Useful if not specifying the I(vpc_subnet_id) parameter.
+ - If no subnet, ENI, or availability zone is provided, the default subnet in the default VPC will be used in the first AZ (alphabetically sorted).
+ type: str
+ instance_initiated_shutdown_behavior:
+ description:
+ - Whether to stop or terminate an instance upon shutdown.
+ choices: ['stop', 'terminate']
+ type: str
+ tenancy:
+ description:
+ - What type of tenancy to allow an instance to use. Default is shared tenancy. Dedicated tenancy will incur additional charges.
+ choices: ['dedicated', 'default']
+ type: str
+ termination_protection:
+ description:
+ - Whether to enable termination protection.
+ This module will not terminate an instance with termination protection active, it must be turned off first.
+ type: bool
+ cpu_credit_specification:
+ description:
+ - For T series instances, choose whether to allow increased charges to buy CPU credits if the default pool is depleted.
+ - Choose I(unlimited) to enable buying additional CPU credits.
+ choices: ['unlimited', 'standard']
+ type: str
+ cpu_options:
+ description:
+ - Reduce the number of vCPU exposed to the instance.
+ - Those parameters can only be set at instance launch. The two suboptions threads_per_core and core_count are mandatory.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for combinations available.
+ - Requires botocore >= 1.10.16
+ type: dict
+ suboptions:
+ threads_per_core:
+ description:
+ - Select the number of threads per core to enable. Disable or Enable Intel HT.
+ choices: [1, 2]
+ required: true
+ type: int
+ core_count:
+ description:
+ - Set the number of core to enable.
+ required: true
+ type: int
+ detailed_monitoring:
+ description:
+ - Whether to allow detailed cloudwatch metrics to be collected, enabling more detailed alerting.
+ type: bool
+ ebs_optimized:
+ description:
+ - Whether instance is should use optimized EBS volumes, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html).
+ type: bool
+ filters:
+ description:
+ - A dict of filters to apply when deciding whether existing instances match and should be altered. Each dict item
+ consists of a filter key and a filter value. See
+ U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html).
+ for possible filters. Filter names and values are case sensitive.
+ - By default, instances are filtered for counting by their "Name" tag, base AMI, state (running, by default), and
+ subnet ID. Any queryable filter can be used. Good candidates are specific tags, SSH keys, or security groups.
+ type: dict
+ instance_role:
+ description:
+ - The ARN or name of an EC2-enabled instance role to be used. If a name is not provided in arn format
+ then the ListInstanceProfiles permission must also be granted.
+ U(https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListInstanceProfiles.html) If no full ARN is provided,
+ the role with a matching name will be used from the active AWS account.
+ type: str
+ placement_group:
+ description:
+ - The placement group that needs to be assigned to the instance
+ type: str
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Terminate every running instance in a region. Use with EXTREME caution.
+ community.aws.ec2_instance:
+ state: absent
+ filters:
+ instance-state-name: running
+
+- name: restart a particular instance by its ID
+ community.aws.ec2_instance:
+ state: restarted
+ instance_ids:
+ - i-12345678
+
+- name: start an instance with a public IP address
+ community.aws.ec2_instance:
+ name: "public-compute-instance"
+ key_name: "prod-ssh-key"
+ vpc_subnet_id: subnet-5ca1ab1e
+ instance_type: c5.large
+ security_group: default
+ network:
+ assign_public_ip: true
+ image_id: ami-123456
+ tags:
+ Environment: Testing
+
+- name: start an instance and Add EBS
+ community.aws.ec2_instance:
+ name: "public-withebs-instance"
+ vpc_subnet_id: subnet-5ca1ab1e
+ instance_type: t2.micro
+ key_name: "prod-ssh-key"
+ security_group: default
+ volumes:
+ - device_name: /dev/sda1
+ ebs:
+ volume_size: 16
+ delete_on_termination: true
+
+- name: start an instance with a cpu_options
+ community.aws.ec2_instance:
+ name: "public-cpuoption-instance"
+ vpc_subnet_id: subnet-5ca1ab1e
+ tags:
+ Environment: Testing
+ instance_type: c4.large
+ volumes:
+ - device_name: /dev/sda1
+ ebs:
+ delete_on_termination: true
+ cpu_options:
+ core_count: 1
+ threads_per_core: 1
+
+- name: start an instance and have it begin a Tower callback on boot
+ community.aws.ec2_instance:
+ name: "tower-callback-test"
+ key_name: "prod-ssh-key"
+ vpc_subnet_id: subnet-5ca1ab1e
+ security_group: default
+ tower_callback:
+ # IP or hostname of tower server
+ tower_address: 1.2.3.4
+ job_template_id: 876
+ host_config_key: '[secret config key goes here]'
+ network:
+ assign_public_ip: true
+ image_id: ami-123456
+ cpu_credit_specification: unlimited
+ tags:
+ SomeThing: "A value"
+
+- name: start an instance with ENI (An existing ENI ID is required)
+ community.aws.ec2_instance:
+ name: "public-eni-instance"
+ key_name: "prod-ssh-key"
+ vpc_subnet_id: subnet-5ca1ab1e
+ network:
+ interfaces:
+ - id: "eni-12345"
+ tags:
+ Env: "eni_on"
+ volumes:
+ - device_name: /dev/sda1
+ ebs:
+ delete_on_termination: true
+ instance_type: t2.micro
+ image_id: ami-123456
+
+- name: add second ENI interface
+ community.aws.ec2_instance:
+ name: "public-eni-instance"
+ network:
+ interfaces:
+ - id: "eni-12345"
+ - id: "eni-67890"
+ image_id: ami-123456
+ tags:
+ Env: "eni_on"
+ instance_type: t2.micro
+'''
+
+RETURN = '''
+instances:
+ description: a list of ec2 instances
+ returned: when wait == true
+ type: complex
+ contains:
+ ami_launch_index:
+ description: The AMI launch index, which can be used to find this instance in the launch group.
+ returned: always
+ type: int
+ sample: 0
+ architecture:
+ description: The architecture of the image
+ returned: always
+ type: str
+ sample: x86_64
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance (for example, /dev/sdh or xvdh).
+ returned: always
+ type: str
+ sample: /dev/sdh
+ ebs:
+ description: Parameters used to automatically set up EBS volumes when the instance is launched.
+ returned: always
+ type: complex
+ contains:
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: attached
+ volume_id:
+ description: The ID of the EBS volume
+ returned: always
+ type: str
+ sample: vol-12345678
+ client_token:
+ description: The idempotency token you provided when you launched the instance, if applicable.
+ returned: always
+ type: str
+ sample: mytoken
+ ebs_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ hypervisor:
+ description: The hypervisor type of the instance.
+ returned: always
+ type: str
+ sample: xen
+ iam_instance_profile:
+ description: The IAM instance profile associated with the instance, if applicable.
+ returned: always
+ type: complex
+ contains:
+ arn:
+ description: The Amazon Resource Name (ARN) of the instance profile.
+ returned: always
+ type: str
+ sample: "arn:aws:iam::000012345678:instance-profile/myprofile"
+ id:
+ description: The ID of the instance profile
+ returned: always
+ type: str
+ sample: JFJ397FDG400FG9FD1N
+ image_id:
+ description: The ID of the AMI used to launch the instance.
+ returned: always
+ type: str
+ sample: ami-0011223344
+ instance_id:
+ description: The ID of the instance.
+ returned: always
+ type: str
+ sample: i-012345678
+ instance_type:
+ description: The instance type size of the running instance.
+ returned: always
+ type: str
+ sample: t2.micro
+ key_name:
+ description: The name of the key pair, if this instance was launched with an associated key pair.
+ returned: always
+ type: str
+ sample: my-key
+ launch_time:
+ description: The time the instance was launched.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ monitoring:
+ description: The monitoring for the instance.
+ returned: always
+ type: complex
+ contains:
+ state:
+ description: Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled.
+ returned: always
+ type: str
+ sample: disabled
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ association:
+ description: The association information for an Elastic IPv4 associated with the network interface.
+ returned: always
+ type: complex
+ contains:
+ ip_owner_id:
+ description: The ID of the owner of the Elastic IP address.
+ returned: always
+ type: str
+ sample: amazon
+ public_dns_name:
+ description: The public DNS name.
+ returned: always
+ type: str
+ sample: ""
+ public_ip:
+ description: The public IP address or Elastic IP address bound to the network interface.
+ returned: always
+ type: str
+ sample: 1.2.3.4
+ attachment:
+ description: The network interface attachment.
+ returned: always
+ type: complex
+ contains:
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ attachment_id:
+ description: The ID of the network interface attachment.
+ returned: always
+ type: str
+ sample: eni-attach-3aff3f
+ delete_on_termination:
+ description: Indicates whether the network interface is deleted when the instance is terminated.
+ returned: always
+ type: bool
+ sample: true
+ device_index:
+ description: The index of the device on the instance for the network interface attachment.
+ returned: always
+ type: int
+ sample: 0
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: attached
+ description:
+ description: The description.
+ returned: always
+ type: str
+ sample: My interface
+ groups:
+ description: One or more security groups.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-abcdef12
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: mygroup
+ ipv6_addresses:
+ description: One or more IPv6 addresses associated with the network interface.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ ipv6_address:
+ description: The IPv6 address.
+ returned: always
+ type: str
+ sample: "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ owner_id:
+ description: The AWS account ID of the owner of the network interface.
+ returned: always
+ type: str
+ sample: 01234567890
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ private_ip_addresses:
+ description: The private IPv4 addresses associated with the network interface.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ association:
+ description: The association information for an Elastic IP address (IPv4) associated with the network interface.
+ returned: always
+ type: complex
+ contains:
+ ip_owner_id:
+ description: The ID of the owner of the Elastic IP address.
+ returned: always
+ type: str
+ sample: amazon
+ public_dns_name:
+ description: The public DNS name.
+ returned: always
+ type: str
+ sample: ""
+ public_ip:
+ description: The public IP address or Elastic IP address bound to the network interface.
+ returned: always
+ type: str
+ sample: 1.2.3.4
+ primary:
+ description: Indicates whether this IPv4 address is the primary private IP address of the network interface.
+ returned: always
+ type: bool
+ sample: true
+ private_ip_address:
+ description: The private IPv4 address of the network interface.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ source_dest_check:
+ description: Indicates whether source/destination checking is enabled.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The status of the network interface.
+ returned: always
+ type: str
+ sample: in-use
+ subnet_id:
+ description: The ID of the subnet for the network interface.
+ returned: always
+ type: str
+ sample: subnet-0123456
+ vpc_id:
+ description: The ID of the VPC for the network interface.
+ returned: always
+ type: str
+ sample: vpc-0123456
+ placement:
+ description: The location where the instance launched, if applicable.
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The Availability Zone of the instance.
+ returned: always
+ type: str
+ sample: ap-southeast-2a
+ group_name:
+ description: The name of the placement group the instance is in (for cluster compute instances).
+ returned: always
+ type: str
+ sample: ""
+ tenancy:
+ description: The tenancy of the instance (if the instance is running in a VPC).
+ returned: always
+ type: str
+ sample: default
+ private_dns_name:
+ description: The private DNS name.
+ returned: always
+ type: str
+ sample: ip-10-0-0-1.ap-southeast-2.compute.internal
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ product_codes:
+ description: One or more product codes.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ product_code_id:
+ description: The product code.
+ returned: always
+ type: str
+ sample: aw0evgkw8ef3n2498gndfgasdfsd5cce
+ product_code_type:
+ description: The type of product code.
+ returned: always
+ type: str
+ sample: marketplace
+ public_dns_name:
+ description: The public DNS name assigned to the instance.
+ returned: always
+ type: str
+ sample:
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance
+ returned: always
+ type: str
+ sample: 52.0.0.1
+ root_device_name:
+ description: The device name of the root device
+ returned: always
+ type: str
+ sample: /dev/sda1
+ root_device_type:
+ description: The type of root device used by the AMI.
+ returned: always
+ type: str
+ sample: ebs
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ network.source_dest_check:
+ description: Indicates whether source/destination checking is enabled.
+ returned: always
+ type: bool
+ sample: true
+ state:
+ description: The current state of the instance.
+ returned: always
+ type: complex
+ contains:
+ code:
+ description: The low byte represents the state.
+ returned: always
+ type: int
+ sample: 16
+ name:
+ description: The name of the state.
+ returned: always
+ type: str
+ sample: running
+ state_transition_reason:
+ description: The reason for the most recent state transition.
+ returned: always
+ type: str
+ sample:
+ subnet_id:
+ description: The ID of the subnet in which the instance is running.
+ returned: always
+ type: str
+ sample: subnet-00abcdef
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ virtualization_type:
+ description: The type of virtualization of the AMI.
+ returned: always
+ type: str
+ sample: hvm
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: dict
+ sample: vpc-0011223344
+'''
+
+import re
+import uuid
+import string
+import textwrap
+import time
+from collections import namedtuple
+
+try:
+ import boto3
+ import botocore.exceptions
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six.moves.urllib import parse as urlparse
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils._text import to_native
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict
+
+module = None
+
+
+def tower_callback_script(tower_conf, windows=False, passwd=None):
+ script_url = 'https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1'
+ if windows and passwd is not None:
+ script_tpl = """<powershell>
+ $admin = [adsi]("WinNT://./administrator, user")
+ $admin.PSBase.Invoke("SetPassword", "{PASS}")
+ Invoke-Expression ((New-Object System.Net.Webclient).DownloadString('{SCRIPT}'))
+ </powershell>
+ """
+ return to_native(textwrap.dedent(script_tpl).format(PASS=passwd, SCRIPT=script_url))
+ elif windows and passwd is None:
+ script_tpl = """<powershell>
+ $admin = [adsi]("WinNT://./administrator, user")
+ Invoke-Expression ((New-Object System.Net.Webclient).DownloadString('{SCRIPT}'))
+ </powershell>
+ """
+ return to_native(textwrap.dedent(script_tpl).format(PASS=passwd, SCRIPT=script_url))
+ elif not windows:
+ for p in ['tower_address', 'job_template_id', 'host_config_key']:
+ if p not in tower_conf:
+ module.fail_json(msg="Incomplete tower_callback configuration. tower_callback.{0} not set.".format(p))
+
+ if isinstance(tower_conf['job_template_id'], string_types):
+ tower_conf['job_template_id'] = urlparse.quote(tower_conf['job_template_id'])
+ tpl = string.Template(textwrap.dedent("""#!/bin/bash
+ set -x
+
+ retry_attempts=10
+ attempt=0
+ while [[ $attempt -lt $retry_attempts ]]
+ do
+ status_code=`curl --max-time 10 -v -k -s -i \
+ --data "host_config_key=${host_config_key}" \
+ 'https://${tower_address}/api/v2/job_templates/${template_id}/callback/' \
+ | head -n 1 \
+ | awk '{print $2}'`
+ if [[ $status_code == 404 ]]
+ then
+ status_code=`curl --max-time 10 -v -k -s -i \
+ --data "host_config_key=${host_config_key}" \
+ 'https://${tower_address}/api/v1/job_templates/${template_id}/callback/' \
+ | head -n 1 \
+ | awk '{print $2}'`
+ # fall back to using V1 API for Tower 3.1 and below, since v2 API will always 404
+ fi
+ if [[ $status_code == 201 ]]
+ then
+ exit 0
+ fi
+ attempt=$(( attempt + 1 ))
+ echo "$${status_code} received... retrying in 1 minute. (Attempt $${attempt})"
+ sleep 60
+ done
+ exit 1
+ """))
+ return tpl.safe_substitute(tower_address=tower_conf['tower_address'],
+ template_id=tower_conf['job_template_id'],
+ host_config_key=tower_conf['host_config_key'])
+ raise NotImplementedError("Only windows with remote-prep or non-windows with tower job callback supported so far.")
+
+
+@AWSRetry.jittered_backoff()
+def manage_tags(match, new_tags, purge_tags, ec2):
+ changed = False
+ old_tags = boto3_tag_list_to_ansible_dict(match['Tags'])
+ tags_to_set, tags_to_delete = compare_aws_tags(
+ old_tags, new_tags,
+ purge_tags=purge_tags,
+ )
+ if module.check_mode:
+ return bool(tags_to_delete or tags_to_set)
+ if tags_to_set:
+ ec2.create_tags(
+ Resources=[match['InstanceId']],
+ Tags=ansible_dict_to_boto3_tag_list(tags_to_set))
+ changed |= True
+ if tags_to_delete:
+ delete_with_current_values = dict((k, old_tags.get(k)) for k in tags_to_delete)
+ ec2.delete_tags(
+ Resources=[match['InstanceId']],
+ Tags=ansible_dict_to_boto3_tag_list(delete_with_current_values))
+ changed |= True
+ return changed
+
+
+def build_volume_spec(params):
+ volumes = params.get('volumes') or []
+ for volume in volumes:
+ if 'ebs' in volume:
+ for int_value in ['volume_size', 'iops']:
+ if int_value in volume['ebs']:
+ volume['ebs'][int_value] = int(volume['ebs'][int_value])
+ return [snake_dict_to_camel_dict(v, capitalize_first=True) for v in volumes]
+
+
+def add_or_update_instance_profile(instance, desired_profile_name):
+ instance_profile_setting = instance.get('IamInstanceProfile')
+ if instance_profile_setting and desired_profile_name:
+ if desired_profile_name in (instance_profile_setting.get('Name'), instance_profile_setting.get('Arn')):
+ # great, the profile we asked for is what's there
+ return False
+ else:
+ desired_arn = determine_iam_role(desired_profile_name)
+ if instance_profile_setting.get('Arn') == desired_arn:
+ return False
+ # update association
+ ec2 = module.client('ec2')
+ try:
+ association = ec2.describe_iam_instance_profile_associations(Filters=[{'Name': 'instance-id', 'Values': [instance['InstanceId']]}])
+ except botocore.exceptions.ClientError as e:
+ # check for InvalidAssociationID.NotFound
+ module.fail_json_aws(e, "Could not find instance profile association")
+ try:
+ resp = ec2.replace_iam_instance_profile_association(
+ AssociationId=association['IamInstanceProfileAssociations'][0]['AssociationId'],
+ IamInstanceProfile={'Arn': determine_iam_role(desired_profile_name)}
+ )
+ return True
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e, "Could not associate instance profile")
+
+ if not instance_profile_setting and desired_profile_name:
+ # create association
+ ec2 = module.client('ec2')
+ try:
+ resp = ec2.associate_iam_instance_profile(
+ IamInstanceProfile={'Arn': determine_iam_role(desired_profile_name)},
+ InstanceId=instance['InstanceId']
+ )
+ return True
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e, "Could not associate new instance profile")
+
+ return False
+
+
+def build_network_spec(params, ec2=None):
+ """
+ Returns list of interfaces [complex]
+ Interface type: {
+ 'AssociatePublicIpAddress': True|False,
+ 'DeleteOnTermination': True|False,
+ 'Description': 'string',
+ 'DeviceIndex': 123,
+ 'Groups': [
+ 'string',
+ ],
+ 'Ipv6AddressCount': 123,
+ 'Ipv6Addresses': [
+ {
+ 'Ipv6Address': 'string'
+ },
+ ],
+ 'NetworkInterfaceId': 'string',
+ 'PrivateIpAddress': 'string',
+ 'PrivateIpAddresses': [
+ {
+ 'Primary': True|False,
+ 'PrivateIpAddress': 'string'
+ },
+ ],
+ 'SecondaryPrivateIpAddressCount': 123,
+ 'SubnetId': 'string'
+ },
+ """
+ if ec2 is None:
+ ec2 = module.client('ec2')
+
+ interfaces = []
+ network = params.get('network') or {}
+ if not network.get('interfaces'):
+ # they only specified one interface
+ spec = {
+ 'DeviceIndex': 0,
+ }
+ if network.get('assign_public_ip') is not None:
+ spec['AssociatePublicIpAddress'] = network['assign_public_ip']
+
+ if params.get('vpc_subnet_id'):
+ spec['SubnetId'] = params['vpc_subnet_id']
+ else:
+ default_vpc = get_default_vpc(ec2)
+ if default_vpc is None:
+ raise module.fail_json(
+ msg="No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter) to create an instance")
+ else:
+ sub = get_default_subnet(ec2, default_vpc)
+ spec['SubnetId'] = sub['SubnetId']
+
+ if network.get('private_ip_address'):
+ spec['PrivateIpAddress'] = network['private_ip_address']
+
+ if params.get('security_group') or params.get('security_groups'):
+ groups = discover_security_groups(
+ group=params.get('security_group'),
+ groups=params.get('security_groups'),
+ subnet_id=spec['SubnetId'],
+ ec2=ec2
+ )
+ spec['Groups'] = [g['GroupId'] for g in groups]
+ if network.get('description') is not None:
+ spec['Description'] = network['description']
+ # TODO more special snowflake network things
+
+ return [spec]
+
+ # handle list of `network.interfaces` options
+ for idx, interface_params in enumerate(network.get('interfaces', [])):
+ spec = {
+ 'DeviceIndex': idx,
+ }
+
+ if isinstance(interface_params, string_types):
+ # naive case where user gave
+ # network_interfaces: [eni-1234, eni-4567, ....]
+ # put into normal data structure so we don't dupe code
+ interface_params = {'id': interface_params}
+
+ if interface_params.get('id') is not None:
+ # if an ID is provided, we don't want to set any other parameters.
+ spec['NetworkInterfaceId'] = interface_params['id']
+ interfaces.append(spec)
+ continue
+
+ spec['DeleteOnTermination'] = interface_params.get('delete_on_termination', True)
+
+ if interface_params.get('ipv6_addresses'):
+ spec['Ipv6Addresses'] = [{'Ipv6Address': a} for a in interface_params.get('ipv6_addresses', [])]
+
+ if interface_params.get('private_ip_address'):
+ spec['PrivateIpAddress'] = interface_params.get('private_ip_address')
+
+ if interface_params.get('description'):
+ spec['Description'] = interface_params.get('description')
+
+ if interface_params.get('subnet_id', params.get('vpc_subnet_id')):
+ spec['SubnetId'] = interface_params.get('subnet_id', params.get('vpc_subnet_id'))
+ elif not spec.get('SubnetId') and not interface_params['id']:
+ # TODO grab a subnet from default VPC
+ raise ValueError('Failed to assign subnet to interface {0}'.format(interface_params))
+
+ interfaces.append(spec)
+ return interfaces
+
+
+def warn_if_public_ip_assignment_changed(instance):
+ # This is a non-modifiable attribute.
+ assign_public_ip = (module.params.get('network') or {}).get('assign_public_ip')
+ if assign_public_ip is None:
+ return
+
+ # Check that public ip assignment is the same and warn if not
+ public_dns_name = instance.get('PublicDnsName')
+ if (public_dns_name and not assign_public_ip) or (assign_public_ip and not public_dns_name):
+ module.warn(
+ "Unable to modify public ip assignment to {0} for instance {1}. "
+ "Whether or not to assign a public IP is determined during instance creation.".format(
+ assign_public_ip, instance['InstanceId']))
+
+
+def warn_if_cpu_options_changed(instance):
+ # This is a non-modifiable attribute.
+ cpu_options = module.params.get('cpu_options')
+ if cpu_options is None:
+ return
+
+ # Check that the CpuOptions set are the same and warn if not
+ core_count_curr = instance['CpuOptions'].get('CoreCount')
+ core_count = cpu_options.get('core_count')
+ threads_per_core_curr = instance['CpuOptions'].get('ThreadsPerCore')
+ threads_per_core = cpu_options.get('threads_per_core')
+ if core_count_curr != core_count:
+ module.warn(
+ "Unable to modify core_count from {0} to {1}. "
+ "Assigning a number of core is determinted during instance creation".format(
+ core_count_curr, core_count))
+
+ if threads_per_core_curr != threads_per_core:
+ module.warn(
+ "Unable to modify threads_per_core from {0} to {1}. "
+ "Assigning a number of threads per core is determined during instance creation.".format(
+ threads_per_core_curr, threads_per_core))
+
+
+def discover_security_groups(group, groups, parent_vpc_id=None, subnet_id=None, ec2=None):
+ if ec2 is None:
+ ec2 = module.client('ec2')
+
+ if subnet_id is not None:
+ try:
+ sub = ec2.describe_subnets(SubnetIds=[subnet_id])
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'InvalidGroup.NotFound':
+ module.fail_json(
+ "Could not find subnet {0} to associate security groups. Please check the vpc_subnet_id and security_groups parameters.".format(
+ subnet_id
+ )
+ )
+ module.fail_json_aws(e, msg="Error while searching for subnet {0} parent VPC.".format(subnet_id))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Error while searching for subnet {0} parent VPC.".format(subnet_id))
+ parent_vpc_id = sub['Subnets'][0]['VpcId']
+
+ vpc = {
+ 'Name': 'vpc-id',
+ 'Values': [parent_vpc_id]
+ }
+
+ # because filter lists are AND in the security groups API,
+ # make two separate requests for groups by ID and by name
+ id_filters = [vpc]
+ name_filters = [vpc]
+
+ if group:
+ name_filters.append(
+ dict(
+ Name='group-name',
+ Values=[group]
+ )
+ )
+ if group.startswith('sg-'):
+ id_filters.append(
+ dict(
+ Name='group-id',
+ Values=[group]
+ )
+ )
+ if groups:
+ name_filters.append(
+ dict(
+ Name='group-name',
+ Values=groups
+ )
+ )
+ if [g for g in groups if g.startswith('sg-')]:
+ id_filters.append(
+ dict(
+ Name='group-id',
+ Values=[g for g in groups if g.startswith('sg-')]
+ )
+ )
+
+ found_groups = []
+ for f_set in (id_filters, name_filters):
+ if len(f_set) > 1:
+ found_groups.extend(ec2.get_paginator(
+ 'describe_security_groups'
+ ).paginate(
+ Filters=f_set
+ ).search('SecurityGroups[]'))
+ return list(dict((g['GroupId'], g) for g in found_groups).values())
+
+
+def build_top_level_options(params):
+ spec = {}
+ if params.get('image_id'):
+ spec['ImageId'] = params['image_id']
+ elif isinstance(params.get('image'), dict):
+ image = params.get('image', {})
+ spec['ImageId'] = image.get('id')
+ if 'ramdisk' in image:
+ spec['RamdiskId'] = image['ramdisk']
+ if 'kernel' in image:
+ spec['KernelId'] = image['kernel']
+ if not spec.get('ImageId') and not params.get('launch_template'):
+ module.fail_json(msg="You must include an image_id or image.id parameter to create an instance, or use a launch_template.")
+
+ if params.get('key_name') is not None:
+ spec['KeyName'] = params.get('key_name')
+ if params.get('user_data') is not None:
+ spec['UserData'] = to_native(params.get('user_data'))
+ elif params.get('tower_callback') is not None:
+ spec['UserData'] = tower_callback_script(
+ tower_conf=params.get('tower_callback'),
+ windows=params.get('tower_callback').get('windows', False),
+ passwd=params.get('tower_callback').get('set_password'),
+ )
+
+ if params.get('launch_template') is not None:
+ spec['LaunchTemplate'] = {}
+ if not params.get('launch_template').get('id') or params.get('launch_template').get('name'):
+ module.fail_json(msg="Could not create instance with launch template. Either launch_template.name or launch_template.id parameters are required")
+
+ if params.get('launch_template').get('id') is not None:
+ spec['LaunchTemplate']['LaunchTemplateId'] = params.get('launch_template').get('id')
+ if params.get('launch_template').get('name') is not None:
+ spec['LaunchTemplate']['LaunchTemplateName'] = params.get('launch_template').get('name')
+ if params.get('launch_template').get('version') is not None:
+ spec['LaunchTemplate']['Version'] = to_native(params.get('launch_template').get('version'))
+
+ if params.get('detailed_monitoring', False):
+ spec['Monitoring'] = {'Enabled': True}
+ if params.get('cpu_credit_specification') is not None:
+ spec['CreditSpecification'] = {'CpuCredits': params.get('cpu_credit_specification')}
+ if params.get('tenancy') is not None:
+ spec['Placement'] = {'Tenancy': params.get('tenancy')}
+ if params.get('placement_group'):
+ if 'Placement' in spec:
+ spec['Placement']['GroupName'] = str(params.get('placement_group'))
+ else:
+ spec.setdefault('Placement', {'GroupName': str(params.get('placement_group'))})
+ if params.get('ebs_optimized') is not None:
+ spec['EbsOptimized'] = params.get('ebs_optimized')
+ if params.get('instance_initiated_shutdown_behavior'):
+ spec['InstanceInitiatedShutdownBehavior'] = params.get('instance_initiated_shutdown_behavior')
+ if params.get('termination_protection') is not None:
+ spec['DisableApiTermination'] = params.get('termination_protection')
+ if params.get('cpu_options') is not None:
+ spec['CpuOptions'] = {}
+ spec['CpuOptions']['ThreadsPerCore'] = params.get('cpu_options').get('threads_per_core')
+ spec['CpuOptions']['CoreCount'] = params.get('cpu_options').get('core_count')
+ return spec
+
+
+def build_instance_tags(params, propagate_tags_to_volumes=True):
+ tags = params.get('tags', {})
+ if params.get('name') is not None:
+ if tags is None:
+ tags = {}
+ tags['Name'] = params.get('name')
+ return [
+ {
+ 'ResourceType': 'volume',
+ 'Tags': ansible_dict_to_boto3_tag_list(tags),
+ },
+ {
+ 'ResourceType': 'instance',
+ 'Tags': ansible_dict_to_boto3_tag_list(tags),
+ },
+ ]
+
+
+def build_run_instance_spec(params, ec2=None):
+ if ec2 is None:
+ ec2 = module.client('ec2')
+
+ spec = dict(
+ ClientToken=uuid.uuid4().hex,
+ MaxCount=1,
+ MinCount=1,
+ )
+ # network parameters
+ spec['NetworkInterfaces'] = build_network_spec(params, ec2)
+ spec['BlockDeviceMappings'] = build_volume_spec(params)
+ spec.update(**build_top_level_options(params))
+ spec['TagSpecifications'] = build_instance_tags(params)
+
+ # IAM profile
+ if params.get('instance_role'):
+ spec['IamInstanceProfile'] = dict(Arn=determine_iam_role(params.get('instance_role')))
+
+ spec['InstanceType'] = params['instance_type']
+ return spec
+
+
+def await_instances(ids, state='OK'):
+ if not module.params.get('wait', True):
+ # the user asked not to wait for anything
+ return
+
+ if module.check_mode:
+ # In check mode, there is no change even if you wait.
+ return
+
+ state_opts = {
+ 'OK': 'instance_status_ok',
+ 'STOPPED': 'instance_stopped',
+ 'TERMINATED': 'instance_terminated',
+ 'EXISTS': 'instance_exists',
+ 'RUNNING': 'instance_running',
+ }
+ if state not in state_opts:
+ module.fail_json(msg="Cannot wait for state {0}, invalid state".format(state))
+ waiter = module.client('ec2').get_waiter(state_opts[state])
+ try:
+ waiter.wait(
+ InstanceIds=ids,
+ WaiterConfig={
+ 'Delay': 15,
+ 'MaxAttempts': module.params.get('wait_timeout', 600) // 15,
+ }
+ )
+ except botocore.exceptions.WaiterConfigError as e:
+ module.fail_json(msg="{0}. Error waiting for instances {1} to reach state {2}".format(
+ to_native(e), ', '.join(ids), state))
+ except botocore.exceptions.WaiterError as e:
+ module.warn("Instances {0} took too long to reach state {1}. {2}".format(
+ ', '.join(ids), state, to_native(e)))
+
+
+def diff_instance_and_params(instance, params, ec2=None, skip=None):
+ """boto3 instance obj, module params"""
+ if ec2 is None:
+ ec2 = module.client('ec2')
+
+ if skip is None:
+ skip = []
+
+ changes_to_apply = []
+ id_ = instance['InstanceId']
+
+ ParamMapper = namedtuple('ParamMapper', ['param_key', 'instance_key', 'attribute_name', 'add_value'])
+
+ def value_wrapper(v):
+ return {'Value': v}
+
+ param_mappings = [
+ ParamMapper('ebs_optimized', 'EbsOptimized', 'ebsOptimized', value_wrapper),
+ ParamMapper('termination_protection', 'DisableApiTermination', 'disableApiTermination', value_wrapper),
+ # user data is an immutable property
+ # ParamMapper('user_data', 'UserData', 'userData', value_wrapper),
+ ]
+
+ for mapping in param_mappings:
+ if params.get(mapping.param_key) is None:
+ continue
+ if mapping.instance_key in skip:
+ continue
+
+ value = AWSRetry.jittered_backoff()(ec2.describe_instance_attribute)(Attribute=mapping.attribute_name, InstanceId=id_)
+ if value[mapping.instance_key]['Value'] != params.get(mapping.param_key):
+ arguments = dict(
+ InstanceId=instance['InstanceId'],
+ # Attribute=mapping.attribute_name,
+ )
+ arguments[mapping.instance_key] = mapping.add_value(params.get(mapping.param_key))
+ changes_to_apply.append(arguments)
+
+ if params.get('security_group') or params.get('security_groups'):
+ value = AWSRetry.jittered_backoff()(ec2.describe_instance_attribute)(Attribute="groupSet", InstanceId=id_)
+ # managing security groups
+ if params.get('vpc_subnet_id'):
+ subnet_id = params.get('vpc_subnet_id')
+ else:
+ default_vpc = get_default_vpc(ec2)
+ if default_vpc is None:
+ module.fail_json(
+ msg="No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter) to modify security groups.")
+ else:
+ sub = get_default_subnet(ec2, default_vpc)
+ subnet_id = sub['SubnetId']
+
+ groups = discover_security_groups(
+ group=params.get('security_group'),
+ groups=params.get('security_groups'),
+ subnet_id=subnet_id,
+ ec2=ec2
+ )
+ expected_groups = [g['GroupId'] for g in groups]
+ instance_groups = [g['GroupId'] for g in value['Groups']]
+ if set(instance_groups) != set(expected_groups):
+ changes_to_apply.append(dict(
+ Groups=expected_groups,
+ InstanceId=instance['InstanceId']
+ ))
+
+ if (params.get('network') or {}).get('source_dest_check') is not None:
+ # network.source_dest_check is nested, so needs to be treated separately
+ check = bool(params.get('network').get('source_dest_check'))
+ if instance['SourceDestCheck'] != check:
+ changes_to_apply.append(dict(
+ InstanceId=instance['InstanceId'],
+ SourceDestCheck={'Value': check},
+ ))
+
+ return changes_to_apply
+
+
+def change_network_attachments(instance, params, ec2):
+ if (params.get('network') or {}).get('interfaces') is not None:
+ new_ids = []
+ for inty in params.get('network').get('interfaces'):
+ if isinstance(inty, dict) and 'id' in inty:
+ new_ids.append(inty['id'])
+ elif isinstance(inty, string_types):
+ new_ids.append(inty)
+ # network.interfaces can create the need to attach new interfaces
+ old_ids = [inty['NetworkInterfaceId'] for inty in instance['NetworkInterfaces']]
+ to_attach = set(new_ids) - set(old_ids)
+ for eni_id in to_attach:
+ ec2.attach_network_interface(
+ DeviceIndex=new_ids.index(eni_id),
+ InstanceId=instance['InstanceId'],
+ NetworkInterfaceId=eni_id,
+ )
+ return bool(len(to_attach))
+ return False
+
+
+def find_instances(ec2, ids=None, filters=None):
+ paginator = ec2.get_paginator('describe_instances')
+ if ids:
+ return list(paginator.paginate(
+ InstanceIds=ids,
+ ).search('Reservations[].Instances[]'))
+ elif filters is None:
+ module.fail_json(msg="No filters provided when they were required")
+ elif filters is not None:
+ for key in list(filters.keys()):
+ if not key.startswith("tag:"):
+ filters[key.replace("_", "-")] = filters.pop(key)
+ return list(paginator.paginate(
+ Filters=ansible_dict_to_boto3_filter_list(filters)
+ ).search('Reservations[].Instances[]'))
+ return []
+
+
+@AWSRetry.jittered_backoff()
+def get_default_vpc(ec2):
+ vpcs = ec2.describe_vpcs(Filters=ansible_dict_to_boto3_filter_list({'isDefault': 'true'}))
+ if len(vpcs.get('Vpcs', [])):
+ return vpcs.get('Vpcs')[0]
+ return None
+
+
+@AWSRetry.jittered_backoff()
+def get_default_subnet(ec2, vpc, availability_zone=None):
+ subnets = ec2.describe_subnets(
+ Filters=ansible_dict_to_boto3_filter_list({
+ 'vpc-id': vpc['VpcId'],
+ 'state': 'available',
+ 'default-for-az': 'true',
+ })
+ )
+ if len(subnets.get('Subnets', [])):
+ if availability_zone is not None:
+ subs_by_az = dict((subnet['AvailabilityZone'], subnet) for subnet in subnets.get('Subnets'))
+ if availability_zone in subs_by_az:
+ return subs_by_az[availability_zone]
+
+ # to have a deterministic sorting order, we sort by AZ so we'll always pick the `a` subnet first
+ # there can only be one default-for-az subnet per AZ, so the AZ key is always unique in this list
+ by_az = sorted(subnets.get('Subnets'), key=lambda s: s['AvailabilityZone'])
+ return by_az[0]
+ return None
+
+
+def ensure_instance_state(state, ec2=None):
+ if ec2 is None:
+ module.client('ec2')
+ if state in ('running', 'started'):
+ changed, failed, instances, failure_reason = change_instance_state(filters=module.params.get('filters'), desired_state='RUNNING')
+
+ if failed:
+ module.fail_json(
+ msg="Unable to start instances: {0}".format(failure_reason),
+ reboot_success=list(changed),
+ reboot_failed=failed)
+
+ module.exit_json(
+ msg='Instances started',
+ reboot_success=list(changed),
+ changed=bool(len(changed)),
+ reboot_failed=[],
+ instances=[pretty_instance(i) for i in instances],
+ )
+ elif state in ('restarted', 'rebooted'):
+ changed, failed, instances, failure_reason = change_instance_state(
+ filters=module.params.get('filters'),
+ desired_state='STOPPED')
+ changed, failed, instances, failure_reason = change_instance_state(
+ filters=module.params.get('filters'),
+ desired_state='RUNNING')
+
+ if failed:
+ module.fail_json(
+ msg="Unable to restart instances: {0}".format(failure_reason),
+ reboot_success=list(changed),
+ reboot_failed=failed)
+
+ module.exit_json(
+ msg='Instances restarted',
+ reboot_success=list(changed),
+ changed=bool(len(changed)),
+ reboot_failed=[],
+ instances=[pretty_instance(i) for i in instances],
+ )
+ elif state in ('stopped',):
+ changed, failed, instances, failure_reason = change_instance_state(
+ filters=module.params.get('filters'),
+ desired_state='STOPPED')
+
+ if failed:
+ module.fail_json(
+ msg="Unable to stop instances: {0}".format(failure_reason),
+ stop_success=list(changed),
+ stop_failed=failed)
+
+ module.exit_json(
+ msg='Instances stopped',
+ stop_success=list(changed),
+ changed=bool(len(changed)),
+ stop_failed=[],
+ instances=[pretty_instance(i) for i in instances],
+ )
+ elif state in ('absent', 'terminated'):
+ terminated, terminate_failed, instances, failure_reason = change_instance_state(
+ filters=module.params.get('filters'),
+ desired_state='TERMINATED')
+
+ if terminate_failed:
+ module.fail_json(
+ msg="Unable to terminate instances: {0}".format(failure_reason),
+ terminate_success=list(terminated),
+ terminate_failed=terminate_failed)
+ module.exit_json(
+ msg='Instances terminated',
+ terminate_success=list(terminated),
+ changed=bool(len(terminated)),
+ terminate_failed=[],
+ instances=[pretty_instance(i) for i in instances],
+ )
+
+
+@AWSRetry.jittered_backoff()
+def change_instance_state(filters, desired_state, ec2=None):
+ """Takes STOPPED/RUNNING/TERMINATED"""
+ if ec2 is None:
+ ec2 = module.client('ec2')
+
+ changed = set()
+ instances = find_instances(ec2, filters=filters)
+ to_change = set(i['InstanceId'] for i in instances if i['State']['Name'].upper() != desired_state)
+ unchanged = set()
+ failure_reason = ""
+
+ for inst in instances:
+ try:
+ if desired_state == 'TERMINATED':
+ if module.check_mode:
+ changed.add(inst['InstanceId'])
+ continue
+
+ # TODO use a client-token to prevent double-sends of these start/stop/terminate commands
+ # https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html
+ resp = ec2.terminate_instances(InstanceIds=[inst['InstanceId']])
+ [changed.add(i['InstanceId']) for i in resp['TerminatingInstances']]
+ if desired_state == 'STOPPED':
+ if inst['State']['Name'] in ('stopping', 'stopped'):
+ unchanged.add(inst['InstanceId'])
+ continue
+
+ if module.check_mode:
+ changed.add(inst['InstanceId'])
+ continue
+
+ resp = ec2.stop_instances(InstanceIds=[inst['InstanceId']])
+ [changed.add(i['InstanceId']) for i in resp['StoppingInstances']]
+ if desired_state == 'RUNNING':
+ if module.check_mode:
+ changed.add(inst['InstanceId'])
+ continue
+
+ resp = ec2.start_instances(InstanceIds=[inst['InstanceId']])
+ [changed.add(i['InstanceId']) for i in resp['StartingInstances']]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ try:
+ failure_reason = to_native(e.message)
+ except AttributeError:
+ failure_reason = to_native(e)
+
+ if changed:
+ await_instances(ids=list(changed) + list(unchanged), state=desired_state)
+
+ change_failed = list(to_change - changed)
+
+ if instances:
+ instances = find_instances(ec2, ids=list(i['InstanceId'] for i in instances))
+ return changed, change_failed, instances, failure_reason
+
+
+def pretty_instance(i):
+ instance = camel_dict_to_snake_dict(i, ignore_list=['Tags'])
+ instance['tags'] = boto3_tag_list_to_ansible_dict(i['Tags'])
+ return instance
+
+
+def determine_iam_role(name_or_arn):
+ if re.match(r'^arn:aws:iam::\d+:instance-profile/[\w+=/,.@-]+$', name_or_arn):
+ return name_or_arn
+ iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+ try:
+ role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True)
+ return role['InstanceProfile']['Arn']
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchEntity':
+ module.fail_json_aws(e, msg="Could not find instance_role {0}".format(name_or_arn))
+ module.fail_json_aws(e, msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format(name_or_arn))
+
+
+def handle_existing(existing_matches, changed, ec2, state):
+ if state in ('running', 'started') and [i for i in existing_matches if i['State']['Name'] != 'running']:
+ ins_changed, failed, instances, failure_reason = change_instance_state(filters=module.params.get('filters'), desired_state='RUNNING')
+ if failed:
+ module.fail_json(msg="Couldn't start instances: {0}. Failure reason: {1}".format(instances, failure_reason))
+ module.exit_json(
+ changed=bool(len(ins_changed)) or changed,
+ instances=[pretty_instance(i) for i in instances],
+ instance_ids=[i['InstanceId'] for i in instances],
+ )
+ changes = diff_instance_and_params(existing_matches[0], module.params)
+ for c in changes:
+ AWSRetry.jittered_backoff()(ec2.modify_instance_attribute)(**c)
+ changed |= bool(changes)
+ changed |= add_or_update_instance_profile(existing_matches[0], module.params.get('instance_role'))
+ changed |= change_network_attachments(existing_matches[0], module.params, ec2)
+ altered = find_instances(ec2, ids=[i['InstanceId'] for i in existing_matches])
+ module.exit_json(
+ changed=bool(len(changes)) or changed,
+ instances=[pretty_instance(i) for i in altered],
+ instance_ids=[i['InstanceId'] for i in altered],
+ changes=changes,
+ )
+
+
+def ensure_present(existing_matches, changed, ec2, state):
+ if len(existing_matches):
+ try:
+ handle_existing(existing_matches, changed, ec2, state)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(
+ e, msg="Failed to handle existing instances {0}".format(', '.join([i['InstanceId'] for i in existing_matches])),
+ # instances=[pretty_instance(i) for i in existing_matches],
+ # instance_ids=[i['InstanceId'] for i in existing_matches],
+ )
+ try:
+ instance_spec = build_run_instance_spec(module.params)
+ # If check mode is enabled,suspend 'ensure function'.
+ if module.check_mode:
+ module.exit_json(
+ changed=True,
+ spec=instance_spec,
+ )
+ instance_response = run_instances(ec2, **instance_spec)
+ instances = instance_response['Instances']
+ instance_ids = [i['InstanceId'] for i in instances]
+
+ for ins in instances:
+ changes = diff_instance_and_params(ins, module.params, skip=['UserData', 'EbsOptimized'])
+ for c in changes:
+ try:
+ AWSRetry.jittered_backoff()(ec2.modify_instance_attribute)(**c)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e, msg="Could not apply change {0} to new instance.".format(str(c)))
+
+ if not module.params.get('wait'):
+ module.exit_json(
+ changed=True,
+ instance_ids=instance_ids,
+ spec=instance_spec,
+ )
+ await_instances(instance_ids)
+ instances = ec2.get_paginator('describe_instances').paginate(
+ InstanceIds=instance_ids
+ ).search('Reservations[].Instances[]')
+
+ module.exit_json(
+ changed=True,
+ instances=[pretty_instance(i) for i in instances],
+ instance_ids=instance_ids,
+ spec=instance_spec,
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to create new EC2 instance")
+
+
+@AWSRetry.jittered_backoff()
+def run_instances(ec2, **instance_spec):
+ try:
+ return ec2.run_instances(**instance_spec)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'InvalidParameterValue' and "Invalid IAM Instance Profile ARN" in e.response['Error']['Message']:
+ # If the instance profile has just been created, it takes some time to be visible by ec2
+ # So we wait 10 second and retry the run_instances
+ time.sleep(10)
+ return ec2.run_instances(**instance_spec)
+ else:
+ raise e
+
+
+def main():
+ global module
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'started', 'running', 'stopped', 'restarted', 'rebooted', 'terminated', 'absent']),
+ wait=dict(default=True, type='bool'),
+ wait_timeout=dict(default=600, type='int'),
+ # count=dict(default=1, type='int'),
+ image=dict(type='dict'),
+ image_id=dict(type='str'),
+ instance_type=dict(default='t2.micro', type='str'),
+ user_data=dict(type='str'),
+ tower_callback=dict(type='dict'),
+ ebs_optimized=dict(type='bool'),
+ vpc_subnet_id=dict(type='str', aliases=['subnet_id']),
+ availability_zone=dict(type='str'),
+ security_groups=dict(default=[], type='list', elements='str'),
+ security_group=dict(type='str'),
+ instance_role=dict(type='str'),
+ name=dict(type='str'),
+ tags=dict(type='dict'),
+ purge_tags=dict(type='bool', default=False),
+ filters=dict(type='dict', default=None),
+ launch_template=dict(type='dict'),
+ key_name=dict(type='str'),
+ cpu_credit_specification=dict(type='str', choices=['standard', 'unlimited']),
+ cpu_options=dict(type='dict', options=dict(
+ core_count=dict(type='int', required=True),
+ threads_per_core=dict(type='int', choices=[1, 2], required=True)
+ )),
+ tenancy=dict(type='str', choices=['dedicated', 'default']),
+ placement_group=dict(type='str'),
+ instance_initiated_shutdown_behavior=dict(type='str', choices=['stop', 'terminate']),
+ termination_protection=dict(type='bool'),
+ detailed_monitoring=dict(type='bool'),
+ instance_ids=dict(default=[], type='list', elements='str'),
+ network=dict(default=None, type='dict'),
+ volumes=dict(default=None, type='list', elements='dict'),
+ )
+ # running/present are synonyms
+ # as are terminated/absent
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['security_groups', 'security_group'],
+ ['availability_zone', 'vpc_subnet_id'],
+ ['tower_callback', 'user_data'],
+ ['image_id', 'image'],
+ ],
+ supports_check_mode=True
+ )
+
+ if module.params.get('network'):
+ if module.params.get('network').get('interfaces'):
+ if module.params.get('security_group'):
+ module.fail_json(msg="Parameter network.interfaces can't be used with security_group")
+ if module.params.get('security_groups'):
+ module.fail_json(msg="Parameter network.interfaces can't be used with security_groups")
+
+ state = module.params.get('state')
+ ec2 = module.client('ec2')
+ if module.params.get('filters') is None:
+ filters = {
+ # all states except shutting-down and terminated
+ 'instance-state-name': ['pending', 'running', 'stopping', 'stopped']
+ }
+ if state == 'stopped':
+ # only need to change instances that aren't already stopped
+ filters['instance-state-name'] = ['stopping', 'pending', 'running']
+
+ if isinstance(module.params.get('instance_ids'), string_types):
+ filters['instance-id'] = [module.params.get('instance_ids')]
+ elif isinstance(module.params.get('instance_ids'), list) and len(module.params.get('instance_ids')):
+ filters['instance-id'] = module.params.get('instance_ids')
+ else:
+ if not module.params.get('vpc_subnet_id'):
+ if module.params.get('network'):
+ # grab AZ from one of the ENIs
+ ints = module.params.get('network').get('interfaces')
+ if ints:
+ filters['network-interface.network-interface-id'] = []
+ for i in ints:
+ if isinstance(i, dict):
+ i = i['id']
+ filters['network-interface.network-interface-id'].append(i)
+ else:
+ sub = get_default_subnet(ec2, get_default_vpc(ec2), availability_zone=module.params.get('availability_zone'))
+ filters['subnet-id'] = sub['SubnetId']
+ else:
+ filters['subnet-id'] = [module.params.get('vpc_subnet_id')]
+
+ if module.params.get('name'):
+ filters['tag:Name'] = [module.params.get('name')]
+
+ if module.params.get('image_id'):
+ filters['image-id'] = [module.params.get('image_id')]
+ elif (module.params.get('image') or {}).get('id'):
+ filters['image-id'] = [module.params.get('image', {}).get('id')]
+
+ module.params['filters'] = filters
+
+ if module.params.get('cpu_options') and not module.botocore_at_least('1.10.16'):
+ module.fail_json(msg="cpu_options is only supported with botocore >= 1.10.16")
+
+ existing_matches = find_instances(ec2, filters=module.params.get('filters'))
+ changed = False
+
+ if state not in ('terminated', 'absent') and existing_matches:
+ for match in existing_matches:
+ warn_if_public_ip_assignment_changed(match)
+ warn_if_cpu_options_changed(match)
+ tags = module.params.get('tags') or {}
+ name = module.params.get('name')
+ if name:
+ tags['Name'] = name
+ changed |= manage_tags(match, tags, module.params.get('purge_tags', False), ec2)
+
+ if state in ('present', 'running', 'started'):
+ ensure_present(existing_matches=existing_matches, changed=changed, ec2=ec2, state=state)
+ elif state in ('restarted', 'rebooted', 'stopped', 'absent', 'terminated'):
+ if existing_matches:
+ ensure_instance_state(state, ec2)
+ else:
+ module.exit_json(
+ msg='No matching instances found',
+ changed=False,
+ instances=[],
+ )
+ else:
+ module.fail_json(msg="We don't handle the state {0}".format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_instance_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_instance_facts.py
new file mode 100644
index 00000000..1c4c1f0d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_instance_facts.py
@@ -0,0 +1,561 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_instance_info
+version_added: 1.0.0
+short_description: Gather information about ec2 instances in AWS
+description:
+ - Gather information about ec2 instances in AWS
+ - This module was called C(ec2_instance_facts) before Ansible 2.9. The usage did not change.
+author:
+ - Michael Schuett (@michaeljs1990)
+ - Rob White (@wimnat)
+requirements: [ "boto3", "botocore" ]
+options:
+ instance_ids:
+ description:
+ - If you specify one or more instance IDs, only instances that have the specified IDs are returned.
+ required: false
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
+ U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) for possible filters. Filter
+ names and values are case sensitive.
+ required: false
+ default: {}
+ type: dict
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all instances
+ community.aws.ec2_instance_info:
+
+- name: Gather information about all instances in AZ ap-southeast-2a
+ community.aws.ec2_instance_info:
+ filters:
+ availability-zone: ap-southeast-2a
+
+- name: Gather information about a particular instance using ID
+ community.aws.ec2_instance_info:
+ instance_ids:
+ - i-12345678
+
+- name: Gather information about any instance with a tag key Name and value Example
+ community.aws.ec2_instance_info:
+ filters:
+ "tag:Name": Example
+
+- name: Gather information about any instance in states "shutting-down", "stopping", "stopped"
+ community.aws.ec2_instance_info:
+ filters:
+ instance-state-name: [ "shutting-down", "stopping", "stopped" ]
+
+'''
+
+RETURN = r'''
+instances:
+ description: a list of ec2 instances
+ returned: always
+ type: complex
+ contains:
+ ami_launch_index:
+ description: The AMI launch index, which can be used to find this instance in the launch group.
+ returned: always
+ type: int
+ sample: 0
+ architecture:
+ description: The architecture of the image
+ returned: always
+ type: str
+ sample: x86_64
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance (for example, /dev/sdh or xvdh).
+ returned: always
+ type: str
+ sample: /dev/sdh
+ ebs:
+ description: Parameters used to automatically set up EBS volumes when the instance is launched.
+ returned: always
+ type: complex
+ contains:
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: attached
+ volume_id:
+ description: The ID of the EBS volume
+ returned: always
+ type: str
+ sample: vol-12345678
+ cpu_options:
+ description: The CPU options set for the instance.
+ returned: always if botocore version >= 1.10.16
+ type: complex
+ contains:
+ core_count:
+ description: The number of CPU cores for the instance.
+ returned: always
+ type: int
+ sample: 1
+ threads_per_core:
+ description: The number of threads per CPU core. On supported instance, a value of 1 means Intel Hyper-Threading Technology is disabled.
+ returned: always
+ type: int
+ sample: 1
+ client_token:
+ description: The idempotency token you provided when you launched the instance, if applicable.
+ returned: always
+ type: str
+ sample: mytoken
+ ebs_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ hypervisor:
+ description: The hypervisor type of the instance.
+ returned: always
+ type: str
+ sample: xen
+ iam_instance_profile:
+ description: The IAM instance profile associated with the instance, if applicable.
+ returned: always
+ type: complex
+ contains:
+ arn:
+ description: The Amazon Resource Name (ARN) of the instance profile.
+ returned: always
+ type: str
+ sample: "arn:aws:iam::000012345678:instance-profile/myprofile"
+ id:
+ description: The ID of the instance profile
+ returned: always
+ type: str
+ sample: JFJ397FDG400FG9FD1N
+ image_id:
+ description: The ID of the AMI used to launch the instance.
+ returned: always
+ type: str
+ sample: ami-0011223344
+ instance_id:
+ description: The ID of the instance.
+ returned: always
+ type: str
+ sample: i-012345678
+ instance_type:
+ description: The instance type size of the running instance.
+ returned: always
+ type: str
+ sample: t2.micro
+ key_name:
+ description: The name of the key pair, if this instance was launched with an associated key pair.
+ returned: always
+ type: str
+ sample: my-key
+ launch_time:
+ description: The time the instance was launched.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ monitoring:
+ description: The monitoring for the instance.
+ returned: always
+ type: complex
+ contains:
+ state:
+ description: Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled.
+ returned: always
+ type: str
+ sample: disabled
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ association:
+ description: The association information for an Elastic IPv4 associated with the network interface.
+ returned: always
+ type: complex
+ contains:
+ ip_owner_id:
+ description: The ID of the owner of the Elastic IP address.
+ returned: always
+ type: str
+ sample: amazon
+ public_dns_name:
+ description: The public DNS name.
+ returned: always
+ type: str
+ sample: ""
+ public_ip:
+ description: The public IP address or Elastic IP address bound to the network interface.
+ returned: always
+ type: str
+ sample: 1.2.3.4
+ attachment:
+ description: The network interface attachment.
+ returned: always
+ type: complex
+ contains:
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ attachment_id:
+ description: The ID of the network interface attachment.
+ returned: always
+ type: str
+ sample: eni-attach-3aff3f
+ delete_on_termination:
+ description: Indicates whether the network interface is deleted when the instance is terminated.
+ returned: always
+ type: bool
+ sample: true
+ device_index:
+ description: The index of the device on the instance for the network interface attachment.
+ returned: always
+ type: int
+ sample: 0
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: attached
+ description:
+ description: The description.
+ returned: always
+ type: str
+ sample: My interface
+ groups:
+ description: One or more security groups.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-abcdef12
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: mygroup
+ ipv6_addresses:
+ description: One or more IPv6 addresses associated with the network interface.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ ipv6_address:
+ description: The IPv6 address.
+ returned: always
+ type: str
+ sample: "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ owner_id:
+ description: The AWS account ID of the owner of the network interface.
+ returned: always
+ type: str
+ sample: 01234567890
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ private_ip_addresses:
+ description: The private IPv4 addresses associated with the network interface.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ association:
+ description: The association information for an Elastic IP address (IPv4) associated with the network interface.
+ returned: always
+ type: complex
+ contains:
+ ip_owner_id:
+ description: The ID of the owner of the Elastic IP address.
+ returned: always
+ type: str
+ sample: amazon
+ public_dns_name:
+ description: The public DNS name.
+ returned: always
+ type: str
+ sample: ""
+ public_ip:
+ description: The public IP address or Elastic IP address bound to the network interface.
+ returned: always
+ type: str
+ sample: 1.2.3.4
+ primary:
+ description: Indicates whether this IPv4 address is the primary private IP address of the network interface.
+ returned: always
+ type: bool
+ sample: true
+ private_ip_address:
+ description: The private IPv4 address of the network interface.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ source_dest_check:
+ description: Indicates whether source/destination checking is enabled.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The status of the network interface.
+ returned: always
+ type: str
+ sample: in-use
+ subnet_id:
+ description: The ID of the subnet for the network interface.
+ returned: always
+ type: str
+ sample: subnet-0123456
+ vpc_id:
+ description: The ID of the VPC for the network interface.
+ returned: always
+ type: str
+ sample: vpc-0123456
+ placement:
+ description: The location where the instance launched, if applicable.
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The Availability Zone of the instance.
+ returned: always
+ type: str
+ sample: ap-southeast-2a
+ group_name:
+ description: The name of the placement group the instance is in (for cluster compute instances).
+ returned: always
+ type: str
+ sample: ""
+ tenancy:
+ description: The tenancy of the instance (if the instance is running in a VPC).
+ returned: always
+ type: str
+ sample: default
+ private_dns_name:
+ description: The private DNS name.
+ returned: always
+ type: str
+ sample: ip-10-0-0-1.ap-southeast-2.compute.internal
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ product_codes:
+ description: One or more product codes.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ product_code_id:
+ description: The product code.
+ returned: always
+ type: str
+ sample: aw0evgkw8ef3n2498gndfgasdfsd5cce
+ product_code_type:
+ description: The type of product code.
+ returned: always
+ type: str
+ sample: marketplace
+ public_dns_name:
+ description: The public DNS name assigned to the instance.
+ returned: always
+ type: str
+ sample:
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance
+ returned: always
+ type: str
+ sample: 52.0.0.1
+ root_device_name:
+ description: The device name of the root device
+ returned: always
+ type: str
+ sample: /dev/sda1
+ root_device_type:
+ description: The type of root device used by the AMI.
+ returned: always
+ type: str
+ sample: ebs
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ source_dest_check:
+ description: Indicates whether source/destination checking is enabled.
+ returned: always
+ type: bool
+ sample: true
+ state:
+ description: The current state of the instance.
+ returned: always
+ type: complex
+ contains:
+ code:
+ description: The low byte represents the state.
+ returned: always
+ type: int
+ sample: 16
+ name:
+ description: The name of the state.
+ returned: always
+ type: str
+ sample: running
+ state_transition_reason:
+ description: The reason for the most recent state transition.
+ returned: always
+ type: str
+ sample:
+ subnet_id:
+ description: The ID of the subnet in which the instance is running.
+ returned: always
+ type: str
+ sample: subnet-00abcdef
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ virtualization_type:
+ description: The type of virtualization of the AMI.
+ returned: always
+ type: str
+ sample: hvm
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: dict
+ sample: vpc-0011223344
+'''
+
+import traceback
+
+try:
+ import boto3
+ import botocore
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def list_ec2_instances(connection, module):
+
+ instance_ids = module.params.get("instance_ids")
+ filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+
+ try:
+ reservations_paginator = connection.get_paginator('describe_instances')
+ reservations = reservations_paginator.paginate(InstanceIds=instance_ids, Filters=filters).build_full_result()
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to list ec2 instances")
+
+ # Get instances from reservations
+ instances = []
+ for reservation in reservations['Reservations']:
+ instances = instances + reservation['Instances']
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_instances = [camel_dict_to_snake_dict(instance) for instance in instances]
+
+ # Turn the boto3 result in to ansible friendly tag dictionary
+ for instance in snaked_instances:
+ instance['tags'] = boto3_tag_list_to_ansible_dict(instance.get('tags', []), 'key', 'value')
+
+ module.exit_json(instances=snaked_instances)
+
+
+def main():
+
+ argument_spec = dict(
+ instance_ids=dict(default=[], type='list', elements='str'),
+ filters=dict(default={}, type='dict')
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['instance_ids', 'filters']
+ ],
+ supports_check_mode=True,
+ )
+ if module._name == 'ec2_instance_facts':
+ module.deprecate("The 'ec2_instance_facts' module has been renamed to 'ec2_instance_info'", date='2021-12-01', collection_name='community.aws')
+
+ try:
+ connection = module.client('ec2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ list_ec2_instances(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_instance_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_instance_info.py
new file mode 100644
index 00000000..1c4c1f0d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_instance_info.py
@@ -0,0 +1,561 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_instance_info
+version_added: 1.0.0
+short_description: Gather information about ec2 instances in AWS
+description:
+ - Gather information about ec2 instances in AWS
+ - This module was called C(ec2_instance_facts) before Ansible 2.9. The usage did not change.
+author:
+ - Michael Schuett (@michaeljs1990)
+ - Rob White (@wimnat)
+requirements: [ "boto3", "botocore" ]
+options:
+ instance_ids:
+ description:
+ - If you specify one or more instance IDs, only instances that have the specified IDs are returned.
+ required: false
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
+ U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) for possible filters. Filter
+ names and values are case sensitive.
+ required: false
+ default: {}
+ type: dict
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all instances
+ community.aws.ec2_instance_info:
+
+- name: Gather information about all instances in AZ ap-southeast-2a
+ community.aws.ec2_instance_info:
+ filters:
+ availability-zone: ap-southeast-2a
+
+- name: Gather information about a particular instance using ID
+ community.aws.ec2_instance_info:
+ instance_ids:
+ - i-12345678
+
+- name: Gather information about any instance with a tag key Name and value Example
+ community.aws.ec2_instance_info:
+ filters:
+ "tag:Name": Example
+
+- name: Gather information about any instance in states "shutting-down", "stopping", "stopped"
+ community.aws.ec2_instance_info:
+ filters:
+ instance-state-name: [ "shutting-down", "stopping", "stopped" ]
+
+'''
+
+RETURN = r'''
+instances:
+ description: a list of ec2 instances
+ returned: always
+ type: complex
+ contains:
+ ami_launch_index:
+ description: The AMI launch index, which can be used to find this instance in the launch group.
+ returned: always
+ type: int
+ sample: 0
+ architecture:
+ description: The architecture of the image
+ returned: always
+ type: str
+ sample: x86_64
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance (for example, /dev/sdh or xvdh).
+ returned: always
+ type: str
+ sample: /dev/sdh
+ ebs:
+ description: Parameters used to automatically set up EBS volumes when the instance is launched.
+ returned: always
+ type: complex
+ contains:
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: attached
+ volume_id:
+ description: The ID of the EBS volume
+ returned: always
+ type: str
+ sample: vol-12345678
+ cpu_options:
+ description: The CPU options set for the instance.
+ returned: always if botocore version >= 1.10.16
+ type: complex
+ contains:
+ core_count:
+ description: The number of CPU cores for the instance.
+ returned: always
+ type: int
+ sample: 1
+ threads_per_core:
+ description: The number of threads per CPU core. On supported instance, a value of 1 means Intel Hyper-Threading Technology is disabled.
+ returned: always
+ type: int
+ sample: 1
+ client_token:
+ description: The idempotency token you provided when you launched the instance, if applicable.
+ returned: always
+ type: str
+ sample: mytoken
+ ebs_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ hypervisor:
+ description: The hypervisor type of the instance.
+ returned: always
+ type: str
+ sample: xen
+ iam_instance_profile:
+ description: The IAM instance profile associated with the instance, if applicable.
+ returned: always
+ type: complex
+ contains:
+ arn:
+ description: The Amazon Resource Name (ARN) of the instance profile.
+ returned: always
+ type: str
+ sample: "arn:aws:iam::000012345678:instance-profile/myprofile"
+ id:
+ description: The ID of the instance profile
+ returned: always
+ type: str
+ sample: JFJ397FDG400FG9FD1N
+ image_id:
+ description: The ID of the AMI used to launch the instance.
+ returned: always
+ type: str
+ sample: ami-0011223344
+ instance_id:
+ description: The ID of the instance.
+ returned: always
+ type: str
+ sample: i-012345678
+ instance_type:
+ description: The instance type size of the running instance.
+ returned: always
+ type: str
+ sample: t2.micro
+ key_name:
+ description: The name of the key pair, if this instance was launched with an associated key pair.
+ returned: always
+ type: str
+ sample: my-key
+ launch_time:
+ description: The time the instance was launched.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ monitoring:
+ description: The monitoring for the instance.
+ returned: always
+ type: complex
+ contains:
+ state:
+ description: Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled.
+ returned: always
+ type: str
+ sample: disabled
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ association:
+ description: The association information for an Elastic IPv4 associated with the network interface.
+ returned: always
+ type: complex
+ contains:
+ ip_owner_id:
+ description: The ID of the owner of the Elastic IP address.
+ returned: always
+ type: str
+ sample: amazon
+ public_dns_name:
+ description: The public DNS name.
+ returned: always
+ type: str
+ sample: ""
+ public_ip:
+ description: The public IP address or Elastic IP address bound to the network interface.
+ returned: always
+ type: str
+ sample: 1.2.3.4
+ attachment:
+ description: The network interface attachment.
+ returned: always
+ type: complex
+ contains:
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ attachment_id:
+ description: The ID of the network interface attachment.
+ returned: always
+ type: str
+ sample: eni-attach-3aff3f
+ delete_on_termination:
+ description: Indicates whether the network interface is deleted when the instance is terminated.
+ returned: always
+ type: bool
+ sample: true
+ device_index:
+ description: The index of the device on the instance for the network interface attachment.
+ returned: always
+ type: int
+ sample: 0
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: attached
+ description:
+ description: The description.
+ returned: always
+ type: str
+ sample: My interface
+ groups:
+ description: One or more security groups.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-abcdef12
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: mygroup
+ ipv6_addresses:
+ description: One or more IPv6 addresses associated with the network interface.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ ipv6_address:
+ description: The IPv6 address.
+ returned: always
+ type: str
+ sample: "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ owner_id:
+ description: The AWS account ID of the owner of the network interface.
+ returned: always
+ type: str
+ sample: 01234567890
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ private_ip_addresses:
+ description: The private IPv4 addresses associated with the network interface.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ association:
+ description: The association information for an Elastic IP address (IPv4) associated with the network interface.
+ returned: always
+ type: complex
+ contains:
+ ip_owner_id:
+ description: The ID of the owner of the Elastic IP address.
+ returned: always
+ type: str
+ sample: amazon
+ public_dns_name:
+ description: The public DNS name.
+ returned: always
+ type: str
+ sample: ""
+ public_ip:
+ description: The public IP address or Elastic IP address bound to the network interface.
+ returned: always
+ type: str
+ sample: 1.2.3.4
+ primary:
+ description: Indicates whether this IPv4 address is the primary private IP address of the network interface.
+ returned: always
+ type: bool
+ sample: true
+ private_ip_address:
+ description: The private IPv4 address of the network interface.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ source_dest_check:
+ description: Indicates whether source/destination checking is enabled.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The status of the network interface.
+ returned: always
+ type: str
+ sample: in-use
+ subnet_id:
+ description: The ID of the subnet for the network interface.
+ returned: always
+ type: str
+ sample: subnet-0123456
+ vpc_id:
+ description: The ID of the VPC for the network interface.
+ returned: always
+ type: str
+ sample: vpc-0123456
+ placement:
+ description: The location where the instance launched, if applicable.
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The Availability Zone of the instance.
+ returned: always
+ type: str
+ sample: ap-southeast-2a
+ group_name:
+ description: The name of the placement group the instance is in (for cluster compute instances).
+ returned: always
+ type: str
+ sample: ""
+ tenancy:
+ description: The tenancy of the instance (if the instance is running in a VPC).
+ returned: always
+ type: str
+ sample: default
+ private_dns_name:
+ description: The private DNS name.
+ returned: always
+ type: str
+ sample: ip-10-0-0-1.ap-southeast-2.compute.internal
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ product_codes:
+ description: One or more product codes.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ product_code_id:
+ description: The product code.
+ returned: always
+ type: str
+ sample: aw0evgkw8ef3n2498gndfgasdfsd5cce
+ product_code_type:
+ description: The type of product code.
+ returned: always
+ type: str
+ sample: marketplace
+ public_dns_name:
+ description: The public DNS name assigned to the instance.
+ returned: always
+ type: str
+ sample:
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance
+ returned: always
+ type: str
+ sample: 52.0.0.1
+ root_device_name:
+ description: The device name of the root device
+ returned: always
+ type: str
+ sample: /dev/sda1
+ root_device_type:
+ description: The type of root device used by the AMI.
+ returned: always
+ type: str
+ sample: ebs
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ source_dest_check:
+ description: Indicates whether source/destination checking is enabled.
+ returned: always
+ type: bool
+ sample: true
+ state:
+ description: The current state of the instance.
+ returned: always
+ type: complex
+ contains:
+ code:
+ description: The low byte represents the state.
+ returned: always
+ type: int
+ sample: 16
+ name:
+ description: The name of the state.
+ returned: always
+ type: str
+ sample: running
+ state_transition_reason:
+ description: The reason for the most recent state transition.
+ returned: always
+ type: str
+ sample:
+ subnet_id:
+ description: The ID of the subnet in which the instance is running.
+ returned: always
+ type: str
+ sample: subnet-00abcdef
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ virtualization_type:
+ description: The type of virtualization of the AMI.
+ returned: always
+ type: str
+ sample: hvm
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: dict
+ sample: vpc-0011223344
+'''
+
+import traceback
+
+try:
+ import boto3
+ import botocore
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def list_ec2_instances(connection, module):
+
+ instance_ids = module.params.get("instance_ids")
+ filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+
+ try:
+ reservations_paginator = connection.get_paginator('describe_instances')
+ reservations = reservations_paginator.paginate(InstanceIds=instance_ids, Filters=filters).build_full_result()
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to list ec2 instances")
+
+ # Get instances from reservations
+ instances = []
+ for reservation in reservations['Reservations']:
+ instances = instances + reservation['Instances']
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_instances = [camel_dict_to_snake_dict(instance) for instance in instances]
+
+ # Turn the boto3 result in to ansible friendly tag dictionary
+ for instance in snaked_instances:
+ instance['tags'] = boto3_tag_list_to_ansible_dict(instance.get('tags', []), 'key', 'value')
+
+ module.exit_json(instances=snaked_instances)
+
+
+def main():
+
+ argument_spec = dict(
+ instance_ids=dict(default=[], type='list', elements='str'),
+ filters=dict(default={}, type='dict')
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['instance_ids', 'filters']
+ ],
+ supports_check_mode=True,
+ )
+ if module._name == 'ec2_instance_facts':
+ module.deprecate("The 'ec2_instance_facts' module has been renamed to 'ec2_instance_info'", date='2021-12-01', collection_name='community.aws')
+
+ try:
+ connection = module.client('ec2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ list_ec2_instances(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_launch_template.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_launch_template.py
new file mode 100644
index 00000000..4553a8e7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_launch_template.py
@@ -0,0 +1,707 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_launch_template
+version_added: 1.0.0
+short_description: Manage EC2 launch templates
+description:
+ - Create, modify, and delete EC2 Launch Templates, which can be used to
+ create individual instances or with Autoscaling Groups.
+ - The M(community.aws.ec2_instance) and M(community.aws.ec2_asg) modules can, instead of specifying all
+ parameters on those tasks, be passed a Launch Template which contains
+ settings like instance size, disk type, subnet, and more.
+requirements:
+ - botocore
+ - boto3 >= 1.6.0
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+author:
+ - Ryan Scott Brown (@ryansb)
+options:
+ template_id:
+ description:
+ - The ID for the launch template, can be used for all cases except creating a new Launch Template.
+ aliases: [id]
+ type: str
+ template_name:
+ description:
+ - The template name. This must be unique in the region-account combination you are using.
+ aliases: [name]
+ type: str
+ default_version:
+ description:
+ - Which version should be the default when users spin up new instances based on this template? By default, the latest version will be made the default.
+ type: str
+ default: latest
+ state:
+ description:
+ - Whether the launch template should exist or not.
+ - Deleting specific versions of a launch template is not supported at this time.
+ choices: [present, absent]
+ default: present
+ type: str
+ block_device_mappings:
+ description:
+ - The block device mapping. Supplying both a snapshot ID and an encryption
+ value as arguments for block-device mapping results in an error. This is
+ because only blank volumes can be encrypted on start, and these are not
+ created from a snapshot. If a snapshot is the basis for the volume, it
+ contains data by definition and its encryption status cannot be changed
+ using this action.
+ type: list
+ elements: dict
+ suboptions:
+ device_name:
+ description: The device name (for example, /dev/sdh or xvdh).
+ type: str
+ no_device:
+ description: Suppresses the specified device included in the block device mapping of the AMI.
+ type: str
+ virtual_name:
+ description: >
+ The virtual device name (ephemeralN). Instance store volumes are
+ numbered starting from 0. An instance type with 2 available instance
+ store volumes can specify mappings for ephemeral0 and ephemeral1. The
+ number of available instance store volumes depends on the instance
+ type. After you connect to the instance, you must mount the volume.
+ type: str
+ ebs:
+ description: Parameters used to automatically set up EBS volumes when the instance is launched.
+ type: dict
+ suboptions:
+ delete_on_termination:
+ description: Indicates whether the EBS volume is deleted on instance termination.
+ type: bool
+ encrypted:
+ description: >
+ Indicates whether the EBS volume is encrypted. Encrypted volumes
+ can only be attached to instances that support Amazon EBS
+ encryption. If you are creating a volume from a snapshot, you
+ can't specify an encryption value.
+ type: bool
+ iops:
+ description:
+ - The number of I/O operations per second (IOPS) that the volume
+ supports. For io1, this represents the number of IOPS that are
+ provisioned for the volume. For gp2, this represents the baseline
+ performance of the volume and the rate at which the volume
+ accumulates I/O credits for bursting. For more information about
+ General Purpose SSD baseline performance, I/O credits, and
+ bursting, see Amazon EBS Volume Types in the Amazon Elastic
+ Compute Cloud User Guide.
+ - >
+ Condition: This parameter is required for requests to create io1
+ volumes; it is not used in requests to create gp2, st1, sc1, or
+ standard volumes.
+ type: int
+ kms_key_id:
+ description: The ARN of the AWS Key Management Service (AWS KMS) CMK used for encryption.
+ type: str
+ snapshot_id:
+ description: The ID of the snapshot to create the volume from.
+ type: str
+ volume_size:
+ description:
+ - The size of the volume, in GiB.
+ - "Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size."
+ type: int
+ volume_type:
+ description: The volume type
+ type: str
+ cpu_options:
+ description:
+ - Choose CPU settings for the EC2 instances that will be created with this template.
+ - For more information, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html)
+ type: dict
+ suboptions:
+ core_count:
+ description: The number of CPU cores for the instance.
+ type: int
+ threads_per_core:
+ description: >
+ The number of threads per CPU core. To disable Intel Hyper-Threading
+ Technology for the instance, specify a value of 1. Otherwise, specify
+ the default value of 2.
+ type: int
+ credit_specification:
+ description: The credit option for CPU usage of the instance. Valid for T2 or T3 instances only.
+ type: dict
+ suboptions:
+ cpu_credits:
+ description: >
+ The credit option for CPU usage of a T2 or T3 instance. Valid values
+ are C(standard) and C(unlimited).
+ type: str
+ disable_api_termination:
+ description: >
+ This helps protect instances from accidental termination. If set to true,
+ you can't terminate the instance using the Amazon EC2 console, CLI, or
+ API. To change this attribute to false after launch, use
+ I(ModifyInstanceAttribute).
+ type: bool
+ ebs_optimized:
+ description: >
+ Indicates whether the instance is optimized for Amazon EBS I/O. This
+ optimization provides dedicated throughput to Amazon EBS and an optimized
+ configuration stack to provide optimal Amazon EBS I/O performance. This
+ optimization isn't available with all instance types. Additional usage
+ charges apply when using an EBS-optimized instance.
+ type: bool
+ elastic_gpu_specifications:
+ type: list
+ elements: dict
+ description: Settings for Elastic GPU attachments. See U(https://aws.amazon.com/ec2/elastic-gpus/) for details.
+ suboptions:
+ type:
+ description: The type of Elastic GPU to attach
+ type: str
+ iam_instance_profile:
+ description: >
+ The name or ARN of an IAM instance profile. Requires permissions to
+ describe existing instance roles to confirm ARN is properly formed.
+ type: str
+ image_id:
+ description: >
+ The AMI ID to use for new instances launched with this template. This
+ value is region-dependent since AMIs are not global resources.
+ type: str
+ instance_initiated_shutdown_behavior:
+ description: >
+ Indicates whether an instance stops or terminates when you initiate
+ shutdown from the instance using the operating system shutdown command.
+ choices: [stop, terminate]
+ type: str
+ instance_market_options:
+ description: Options for alternative instance markets, currently only the spot market is supported.
+ type: dict
+ suboptions:
+ market_type:
+ description: The market type. This should always be 'spot'.
+ type: str
+ spot_options:
+ description: Spot-market specific settings.
+ type: dict
+ suboptions:
+ block_duration_minutes:
+ description: >
+ The required duration for the Spot Instances (also known as Spot
+ blocks), in minutes. This value must be a multiple of 60 (60,
+ 120, 180, 240, 300, or 360).
+ type: int
+ instance_interruption_behavior:
+ description: The behavior when a Spot Instance is interrupted. The default is C(terminate).
+ choices: [hibernate, stop, terminate]
+ type: str
+ max_price:
+ description: The highest hourly price you're willing to pay for this Spot Instance.
+ type: str
+ spot_instance_type:
+ description: The request type to send.
+ choices: [one-time, persistent]
+ type: str
+ instance_type:
+ description: >
+ The instance type, such as C(c5.2xlarge). For a full list of instance types, see
+ U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html).
+ type: str
+ kernel_id:
+ description: >
+ The ID of the kernel. We recommend that you use PV-GRUB instead of
+ kernels and RAM disks. For more information, see
+ U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html)
+ type: str
+ key_name:
+ description:
+ - The name of the key pair. You can create a key pair using M(amazon.aws.ec2_key).
+ - If you do not specify a key pair, you can't connect to the instance
+ unless you choose an AMI that is configured to allow users another way to
+ log in.
+ type: str
+ monitoring:
+ description: Settings for instance monitoring.
+ type: dict
+ suboptions:
+ enabled:
+ type: bool
+ description: Whether to turn on detailed monitoring for new instances. This will incur extra charges.
+ network_interfaces:
+ description: One or more network interfaces.
+ type: list
+ elements: dict
+ suboptions:
+ associate_public_ip_address:
+ description: Associates a public IPv4 address with eth0 for a new network interface.
+ type: bool
+ delete_on_termination:
+ description: Indicates whether the network interface is deleted when the instance is terminated.
+ type: bool
+ description:
+ description: A description for the network interface.
+ type: str
+ device_index:
+ description: The device index for the network interface attachment.
+ type: int
+ groups:
+ description: List of security group IDs to include on this instance.
+ type: list
+ elements: str
+ ipv6_address_count:
+ description: >
+ The number of IPv6 addresses to assign to a network interface. Amazon
+ EC2 automatically selects the IPv6 addresses from the subnet range.
+ You can't use this option if specifying the I(ipv6_addresses) option.
+ type: int
+ ipv6_addresses:
+ description: >
+ A list of one or more specific IPv6 addresses from the IPv6 CIDR
+ block range of your subnet. You can't use this option if you're
+ specifying the I(ipv6_address_count) option.
+ type: list
+ elements: str
+ network_interface_id:
+ description: The eni ID of a network interface to attach.
+ type: str
+ private_ip_address:
+ description: The primary private IPv4 address of the network interface.
+ type: str
+ subnet_id:
+ description: The ID of the subnet for the network interface.
+ type: str
+ placement:
+ description: The placement group settings for the instance.
+ type: dict
+ suboptions:
+ affinity:
+ description: The affinity setting for an instance on a Dedicated Host.
+ type: str
+ availability_zone:
+ description: The Availability Zone for the instance.
+ type: str
+ group_name:
+ description: The name of the placement group for the instance.
+ type: str
+ host_id:
+ description: The ID of the Dedicated Host for the instance.
+ type: str
+ tenancy:
+ description: >
+ The tenancy of the instance (if the instance is running in a VPC). An
+ instance with a tenancy of dedicated runs on single-tenant hardware.
+ type: str
+ ram_disk_id:
+ description: >
+ The ID of the RAM disk to launch the instance with. We recommend that you
+ use PV-GRUB instead of kernels and RAM disks. For more information, see
+ U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html)
+ type: str
+ security_group_ids:
+ description: A list of security group IDs (VPC or EC2-Classic) that the new instances will be added to.
+ type: list
+ elements: str
+ security_groups:
+ description: >
+ A list of security group names (Default VPC or EC2-Classic) that the new instances will be added to.
+ For any VPC other than Default, you must use I(security_group_ids).
+ type: list
+ elements: str
+ tags:
+ type: dict
+ description:
+ - A set of key-value pairs to be applied to resources when this Launch Template is used.
+ - "Tag key constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with I(aws:)"
+ - "Tag value constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters."
+ user_data:
+ description: >
+ The Base64-encoded user data to make available to the instance. For more information, see the Linux
+ U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) and Windows
+ U(http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data)
+ documentation on user-data.
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Create an ec2 launch template
+ community.aws.ec2_launch_template:
+ name: "my_template"
+ image_id: "ami-04b762b4289fba92b"
+ key_name: my_ssh_key
+ instance_type: t2.micro
+ iam_instance_profile: myTestProfile
+ disable_api_termination: true
+
+- name: >
+ Create a new version of an existing ec2 launch template with a different instance type,
+ while leaving an older version as the default version
+ community.aws.ec2_launch_template:
+ name: "my_template"
+ default_version: 1
+ instance_type: c5.4xlarge
+
+- name: Delete an ec2 launch template
+ community.aws.ec2_launch_template:
+ name: "my_template"
+ state: absent
+
+# This module does not yet allow deletion of specific versions of launch templates
+'''
+
+RETURN = '''
+latest_version:
+ description: Latest available version of the launch template
+ returned: when state=present
+ type: int
+default_version:
+ description: The version that will be used if only the template name is specified. Often this is the same as the latest version, but not always.
+ returned: when state=present
+ type: int
+'''
+import re
+from uuid import uuid4
+
+from ansible.module_utils._text import to_text
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict, snake_dict_to_camel_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_tag_list,
+ AWSRetry,
+ boto3_tag_list_to_ansible_dict,
+ ansible_dict_to_boto3_tag_list,
+ )
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError, WaiterError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def determine_iam_role(module, name_or_arn):
+ if re.match(r'^arn:aws:iam::\d+:instance-profile/[\w+=/,.@-]+$', name_or_arn):
+ return name_or_arn
+ iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+ try:
+ role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True)
+ return {'arn': role['InstanceProfile']['Arn']}
+ except is_boto3_error_code('NoSuchEntity') as e:
+ module.fail_json_aws(e, msg="Could not find instance_role {0}".format(name_or_arn))
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format(name_or_arn))
+
+
+def existing_templates(module):
+ ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+ matches = None
+ try:
+ if module.params.get('template_id'):
+ matches = ec2.describe_launch_templates(LaunchTemplateIds=[module.params.get('template_id')])
+ elif module.params.get('template_name'):
+ matches = ec2.describe_launch_templates(LaunchTemplateNames=[module.params.get('template_name')])
+ except is_boto3_error_code('InvalidLaunchTemplateName.NotFoundException') as e:
+ # no named template was found, return nothing/empty versions
+ return None, []
+ except is_boto3_error_code('InvalidLaunchTemplateId.Malformed') as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Launch template with ID {0} is not a valid ID. It should start with `lt-....`'.format(
+ module.params.get('launch_template_id')))
+ except is_boto3_error_code('InvalidLaunchTemplateId.NotFoundException') as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(
+ e, msg='Launch template with ID {0} could not be found, please supply a name '
+ 'instead so that a new template can be created'.format(module.params.get('launch_template_id')))
+ except (ClientError, BotoCoreError, WaiterError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Could not check existing launch templates. This may be an IAM permission problem.')
+ else:
+ template = matches['LaunchTemplates'][0]
+ template_id, template_version, template_default = template['LaunchTemplateId'], template['LatestVersionNumber'], template['DefaultVersionNumber']
+ try:
+ return template, ec2.describe_launch_template_versions(LaunchTemplateId=template_id)['LaunchTemplateVersions']
+ except (ClientError, BotoCoreError, WaiterError) as e:
+ module.fail_json_aws(e, msg='Could not find launch template versions for {0} (ID: {1}).'.format(template['LaunchTemplateName'], template_id))
+
+
+def params_to_launch_data(module, template_params):
+ if template_params.get('tags'):
+ template_params['tag_specifications'] = [
+ {
+ 'resource_type': r_type,
+ 'tags': [
+ {'Key': k, 'Value': v} for k, v
+ in template_params['tags'].items()
+ ]
+ }
+ for r_type in ('instance', 'volume')
+ ]
+ del template_params['tags']
+ if module.params.get('iam_instance_profile'):
+ template_params['iam_instance_profile'] = determine_iam_role(module, module.params['iam_instance_profile'])
+ params = snake_dict_to_camel_dict(
+ dict((k, v) for k, v in template_params.items() if v is not None),
+ capitalize_first=True,
+ )
+ return params
+
+
+def delete_template(module):
+ ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+ template, template_versions = existing_templates(module)
+ deleted_versions = []
+ if template or template_versions:
+ non_default_versions = [to_text(t['VersionNumber']) for t in template_versions if not t['DefaultVersion']]
+ if non_default_versions:
+ try:
+ v_resp = ec2.delete_launch_template_versions(
+ LaunchTemplateId=template['LaunchTemplateId'],
+ Versions=non_default_versions,
+ )
+ if v_resp['UnsuccessfullyDeletedLaunchTemplateVersions']:
+ module.warn('Failed to delete template versions {0} on launch template {1}'.format(
+ v_resp['UnsuccessfullyDeletedLaunchTemplateVersions'],
+ template['LaunchTemplateId'],
+ ))
+ deleted_versions = [camel_dict_to_snake_dict(v) for v in v_resp['SuccessfullyDeletedLaunchTemplateVersions']]
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Could not delete existing versions of the launch template {0}".format(template['LaunchTemplateId']))
+ try:
+ resp = ec2.delete_launch_template(
+ LaunchTemplateId=template['LaunchTemplateId'],
+ )
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Could not delete launch template {0}".format(template['LaunchTemplateId']))
+ return {
+ 'deleted_versions': deleted_versions,
+ 'deleted_template': camel_dict_to_snake_dict(resp['LaunchTemplate']),
+ 'changed': True,
+ }
+ else:
+ return {'changed': False}
+
+
+def create_or_update(module, template_options):
+ ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidLaunchTemplateId.NotFound']))
+ template, template_versions = existing_templates(module)
+ out = {}
+ lt_data = params_to_launch_data(module, dict((k, v) for k, v in module.params.items() if k in template_options))
+ if not (template or template_versions):
+ # create a full new one
+ try:
+ resp = ec2.create_launch_template(
+ LaunchTemplateName=module.params['template_name'],
+ LaunchTemplateData=lt_data,
+ ClientToken=uuid4().hex,
+ aws_retry=True,
+ )
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create launch template")
+ template, template_versions = existing_templates(module)
+ out['changed'] = True
+ elif template and template_versions:
+ most_recent = sorted(template_versions, key=lambda x: x['VersionNumber'])[-1]
+ if lt_data == most_recent['LaunchTemplateData']:
+ out['changed'] = False
+ return out
+ try:
+ resp = ec2.create_launch_template_version(
+ LaunchTemplateId=template['LaunchTemplateId'],
+ LaunchTemplateData=lt_data,
+ ClientToken=uuid4().hex,
+ aws_retry=True,
+ )
+ if module.params.get('default_version') in (None, ''):
+ # no need to do anything, leave the existing version as default
+ pass
+ elif module.params.get('default_version') == 'latest':
+ set_default = ec2.modify_launch_template(
+ LaunchTemplateId=template['LaunchTemplateId'],
+ DefaultVersion=to_text(resp['LaunchTemplateVersion']['VersionNumber']),
+ ClientToken=uuid4().hex,
+ aws_retry=True,
+ )
+ else:
+ try:
+ int(module.params.get('default_version'))
+ except ValueError:
+ module.fail_json(msg='default_version param was not a valid integer, got "{0}"'.format(module.params.get('default_version')))
+ set_default = ec2.modify_launch_template(
+ LaunchTemplateId=template['LaunchTemplateId'],
+ DefaultVersion=to_text(int(module.params.get('default_version'))),
+ ClientToken=uuid4().hex,
+ aws_retry=True,
+ )
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create subsequent launch template version")
+ template, template_versions = existing_templates(module)
+ out['changed'] = True
+ return out
+
+
+def format_module_output(module):
+ output = {}
+ template, template_versions = existing_templates(module)
+ template = camel_dict_to_snake_dict(template)
+ template_versions = [camel_dict_to_snake_dict(v) for v in template_versions]
+ for v in template_versions:
+ for ts in (v['launch_template_data'].get('tag_specifications') or []):
+ ts['tags'] = boto3_tag_list_to_ansible_dict(ts.pop('tags'))
+ output.update(dict(template=template, versions=template_versions))
+ output['default_template'] = [
+ v for v in template_versions
+ if v.get('default_version')
+ ][0]
+ output['latest_template'] = [
+ v for v in template_versions
+ if (
+ v.get('version_number') and
+ int(v['version_number']) == int(template['latest_version_number'])
+ )
+ ][0]
+ if "version_number" in output['default_template']:
+ output['default_version'] = output['default_template']['version_number']
+ if "version_number" in output['latest_template']:
+ output['latest_version'] = output['latest_template']['version_number']
+ return output
+
+
+def main():
+ template_options = dict(
+ block_device_mappings=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ device_name=dict(),
+ ebs=dict(
+ type='dict',
+ options=dict(
+ delete_on_termination=dict(type='bool'),
+ encrypted=dict(type='bool'),
+ iops=dict(type='int'),
+ kms_key_id=dict(),
+ snapshot_id=dict(),
+ volume_size=dict(type='int'),
+ volume_type=dict(),
+ ),
+ ),
+ no_device=dict(),
+ virtual_name=dict(),
+ ),
+ ),
+ cpu_options=dict(
+ type='dict',
+ options=dict(
+ core_count=dict(type='int'),
+ threads_per_core=dict(type='int'),
+ ),
+ ),
+ credit_specification=dict(
+ dict(type='dict'),
+ options=dict(
+ cpu_credits=dict(),
+ ),
+ ),
+ disable_api_termination=dict(type='bool'),
+ ebs_optimized=dict(type='bool'),
+ elastic_gpu_specifications=dict(
+ options=dict(type=dict()),
+ type='list',
+ elements='dict',
+ ),
+ iam_instance_profile=dict(),
+ image_id=dict(),
+ instance_initiated_shutdown_behavior=dict(choices=['stop', 'terminate']),
+ instance_market_options=dict(
+ type='dict',
+ options=dict(
+ market_type=dict(),
+ spot_options=dict(
+ type='dict',
+ options=dict(
+ block_duration_minutes=dict(type='int'),
+ instance_interruption_behavior=dict(choices=['hibernate', 'stop', 'terminate']),
+ max_price=dict(),
+ spot_instance_type=dict(choices=['one-time', 'persistent']),
+ ),
+ ),
+ ),
+ ),
+ instance_type=dict(),
+ kernel_id=dict(),
+ key_name=dict(),
+ monitoring=dict(
+ type='dict',
+ options=dict(
+ enabled=dict(type='bool')
+ ),
+ ),
+ network_interfaces=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ associate_public_ip_address=dict(type='bool'),
+ delete_on_termination=dict(type='bool'),
+ description=dict(),
+ device_index=dict(type='int'),
+ groups=dict(type='list', elements='str'),
+ ipv6_address_count=dict(type='int'),
+ ipv6_addresses=dict(type='list', elements='str'),
+ network_interface_id=dict(),
+ private_ip_address=dict(),
+ subnet_id=dict(),
+ ),
+ ),
+ placement=dict(
+ options=dict(
+ affinity=dict(),
+ availability_zone=dict(),
+ group_name=dict(),
+ host_id=dict(),
+ tenancy=dict(),
+ ),
+ type='dict',
+ ),
+ ram_disk_id=dict(),
+ security_group_ids=dict(type='list', elements='str'),
+ security_groups=dict(type='list', elements='str'),
+ tags=dict(type='dict'),
+ user_data=dict(),
+ )
+
+ arg_spec = dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ template_name=dict(aliases=['name']),
+ template_id=dict(aliases=['id']),
+ default_version=dict(default='latest'),
+ )
+
+ arg_spec.update(template_options)
+
+ module = AnsibleAWSModule(
+ argument_spec=arg_spec,
+ required_one_of=[
+ ('template_name', 'template_id')
+ ],
+ supports_check_mode=True
+ )
+
+ if not module.boto3_at_least('1.6.0'):
+ module.fail_json(msg="ec2_launch_template requires boto3 >= 1.6.0")
+
+ for interface in (module.params.get('network_interfaces') or []):
+ if interface.get('ipv6_addresses'):
+ interface['ipv6_addresses'] = [{'ipv6_address': x} for x in interface['ipv6_addresses']]
+
+ if module.params.get('state') == 'present':
+ out = create_or_update(module, template_options)
+ out.update(format_module_output(module))
+ elif module.params.get('state') == 'absent':
+ out = delete_template(module)
+ else:
+ module.fail_json(msg='Unsupported value "{0}" for `state` parameter'.format(module.params.get('state')))
+
+ module.exit_json(**out)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_lc.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_lc.py
new file mode 100644
index 00000000..7555cf68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_lc.py
@@ -0,0 +1,695 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_lc
+version_added: 1.0.0
+
+short_description: Create or delete AWS Autoscaling Launch Configurations
+
+description:
+ - Can create or delete AWS Autoscaling Configurations.
+ - Works with the ec2_asg module to manage Autoscaling Groups.
+
+notes:
+ - Amazon ASG Autoscaling Launch Configurations are immutable once created, so modifying the configuration after it is changed will not modify the
+ launch configuration on AWS. You must create a new config and assign it to the ASG instead.
+ - encrypted volumes are supported on versions >= 2.4
+
+
+author:
+ - "Gareth Rushgrove (@garethr)"
+ - "Willem van Ketwich (@wilvk)"
+
+options:
+ state:
+ description:
+ - Register or deregister the instance.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ name:
+ description:
+ - Unique name for configuration.
+ required: true
+ type: str
+ instance_type:
+ description:
+ - Instance type to use for the instance.
+ - Required when creating a new Launch Configuration.
+ type: str
+ image_id:
+ description:
+ - The AMI unique identifier to be used for the group.
+ type: str
+ key_name:
+ description:
+ - The SSH key name to be used for access to managed instances.
+ type: str
+ security_groups:
+ description:
+ - A list of security groups to apply to the instances. Since version 2.4 you can specify either security group names or IDs or a mix. Previous
+ to 2.4, for VPC instances, specify security group IDs and for EC2-Classic, specify either security group names or IDs.
+ type: list
+ elements: str
+ volumes:
+ description:
+ - A list dictionaries defining the volumes to create.
+ - For any volume, a volume size less than 1 will be interpreted as a request not to create the volume.
+ type: list
+ elements: dict
+ suboptions:
+ device_name:
+ type: str
+ description:
+ - The name for the volume (For example C(/dev/sda)).
+ required: true
+ no_device:
+ type: bool
+ description:
+ - When I(no_device=true) the device will not be created.
+ snapshot:
+ type: str
+ description:
+ - The ID of an EBS snapshot to copy when creating the volume.
+ - Mutually exclusive with the I(ephemeral) parameter.
+ ephemeral:
+ type: str
+ description:
+ - Whether the volume should be ephemeral.
+ - Data on ephemeral volumes is lost when the instance is stopped.
+ - Mutually exclusive with the I(snapshot) parameter.
+ volume_size:
+ type: int
+ description:
+ - The size of the volume (in GiB).
+ - Required unless one of I(ephemeral), I(snapshot) or I(no_device) is set.
+ volume_type:
+ type: str
+ description:
+ - The type of volume to create.
+ - See
+ U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) for more information on the available volume types.
+ delete_on_termination:
+ type: bool
+ default: false
+ description:
+ - Whether the volume should be automatically deleted when the instance
+ is terminated.
+ iops:
+ type: int
+ description:
+ - The number of IOPS per second to provision for the volume.
+ - Required when I(volume_type=io1).
+ encrypted:
+ type: bool
+ default: false
+ description:
+ - Whether the volume should be encrypted using the 'aws/ebs' KMS CMK.
+ user_data:
+ description:
+ - Opaque blob of data which is made available to the ec2 instance. Mutually exclusive with I(user_data_path).
+ type: str
+ user_data_path:
+ description:
+ - Path to the file that contains userdata for the ec2 instances. Mutually exclusive with I(user_data).
+ type: path
+ kernel_id:
+ description:
+ - Kernel id for the EC2 instance.
+ type: str
+ spot_price:
+ description:
+ - The spot price you are bidding. Only applies for an autoscaling group with spot instances.
+ type: float
+ instance_monitoring:
+ description:
+ - Specifies whether instances are launched with detailed monitoring.
+ type: bool
+ default: false
+ assign_public_ip:
+ description:
+ - Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP address
+ to each instance launched in a Amazon VPC.
+ type: bool
+ ramdisk_id:
+ description:
+ - A RAM disk id for the instances.
+ type: str
+ instance_profile_name:
+ description:
+ - The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instances.
+ type: str
+ ebs_optimized:
+ description:
+ - Specifies whether the instance is optimized for EBS I/O (true) or not (false).
+ default: false
+ type: bool
+ classic_link_vpc_id:
+ description:
+ - Id of ClassicLink enabled VPC
+ type: str
+ classic_link_vpc_security_groups:
+ description:
+ - A list of security group IDs with which to associate the ClassicLink VPC instances.
+ type: list
+ elements: str
+ vpc_id:
+ description:
+ - VPC ID, used when resolving security group names to IDs.
+ type: str
+ instance_id:
+ description:
+ - The Id of a running instance to use as a basis for a launch configuration. Can be used in place of I(image_id) and I(instance_type).
+ type: str
+ placement_tenancy:
+ description:
+ - Determines whether the instance runs on single-tenant hardware or not.
+ - When not set AWS will default to C(default).
+ type: str
+ choices: ['default', 'dedicated']
+ associate_public_ip_address:
+ description:
+ - The I(associate_public_ip_address) option does nothing and will be removed after 2022-06-01
+ type: bool
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+
+requirements:
+ - boto3 >= 1.4.4
+
+'''
+
+EXAMPLES = r'''
+
+# create a launch configuration using an AMI image and instance type as a basis
+
+- name: note that encrypted volumes are only supported in >= Ansible 2.4
+ community.aws.ec2_lc:
+ name: special
+ image_id: ami-XXX
+ key_name: default
+ security_groups: ['group', 'group2' ]
+ instance_type: t1.micro
+ volumes:
+ - device_name: /dev/sda1
+ volume_size: 100
+ volume_type: io1
+ iops: 3000
+ delete_on_termination: true
+ encrypted: true
+ - device_name: /dev/sdb
+ ephemeral: ephemeral0
+
+- name: create a launch configuration using a running instance id as a basis
+ community.aws.ec2_lc:
+ name: special
+ instance_id: i-00a48b207ec59e948
+ key_name: default
+ security_groups: ['launch-wizard-2' ]
+ volumes:
+ - device_name: /dev/sda1
+ volume_size: 120
+ volume_type: io1
+ iops: 3000
+ delete_on_termination: true
+
+- name: create a launch configuration to omit the /dev/sdf EBS device that is included in the AMI image
+ community.aws.ec2_lc:
+ name: special
+ image_id: ami-XXX
+ key_name: default
+ security_groups: ['group', 'group2' ]
+ instance_type: t1.micro
+ volumes:
+ - device_name: /dev/sdf
+ no_device: true
+
+- name: Use EBS snapshot ID for volume
+ block:
+ - name: Set Volume Facts
+ ansible.builtin.set_fact:
+ volumes:
+ - device_name: /dev/sda1
+ volume_size: 20
+ ebs:
+ snapshot: snap-XXXX
+ volume_type: gp2
+ delete_on_termination: true
+ encrypted: no
+
+ - name: Create launch configuration
+ community.aws.ec2_lc:
+ name: lc1
+ image_id: ami-xxxx
+ assign_public_ip: yes
+ instance_type: t2.medium
+ key_name: my-key
+ security_groups: "['sg-xxxx']"
+ volumes: "{{ volumes }}"
+ register: lc_info
+'''
+
+RETURN = r'''
+arn:
+ description: The Amazon Resource Name of the launch configuration.
+ returned: when I(state=present)
+ type: str
+ sample: arn:aws:autoscaling:us-east-1:148830907657:launchConfiguration:888d9b58-d93a-40c4-90cf-759197a2621a:launchConfigurationName/launch_config_name
+changed:
+ description: Whether the state of the launch configuration has changed.
+ returned: always
+ type: bool
+ sample: false
+created_time:
+ description: The creation date and time for the launch configuration.
+ returned: when I(state=present)
+ type: str
+ sample: '2017-11-03 23:46:44.841000'
+image_id:
+ description: The ID of the Amazon Machine Image used by the launch configuration.
+ returned: when I(state=present)
+ type: str
+ sample: ami-9be6f38c
+instance_type:
+ description: The instance type for the instances.
+ returned: when I(state=present)
+ type: str
+ sample: t1.micro
+name:
+ description: The name of the launch configuration.
+ returned: when I(state=present)
+ type: str
+ sample: launch_config_name
+result:
+ description: The specification details for the launch configuration.
+ returned: when I(state=present)
+ type: complex
+ contains:
+ PlacementTenancy:
+ description: The tenancy of the instances, either default or dedicated.
+ returned: when I(state=present)
+ type: str
+ sample: default
+ associate_public_ip_address:
+ description: (EC2-VPC) Indicates whether to assign a public IP address to each instance.
+ returned: when I(state=present)
+ type: bool
+ sample: false
+ block_device_mappings:
+ description: A block device mapping, which specifies the block devices.
+ returned: when I(state=present)
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the EC2 instance (for example, /dev/sdh or xvdh).
+ returned: when I(state=present)
+ type: str
+ sample: /dev/sda1
+ ebs:
+ description: The information about the Amazon EBS volume.
+ returned: when I(state=present)
+ type: complex
+ contains:
+ snapshot_id:
+ description: The ID of the snapshot.
+ returned: when I(state=present)
+ type: str
+ volume_size:
+ description: The volume size, in GiB.
+ returned: when I(state=present)
+ type: str
+ sample: '100'
+ virtual_name:
+ description: The name of the virtual device (for example, ephemeral0).
+ returned: when I(state=present)
+ type: str
+ sample: ephemeral0
+ classic_link_vpc_id:
+ description: The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to.
+ returned: when I(state=present)
+ type: str
+ classic_link_vpc_security_groups:
+ description: The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId.
+ returned: when I(state=present)
+ type: list
+ sample: []
+ created_time:
+ description: The creation date and time for the launch configuration.
+ returned: when I(state=present)
+ type: str
+ sample: '2017-11-03 23:46:44.841000'
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: when I(state=present)
+ type: bool
+ sample: true
+ ebs_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O (true) or not (false).
+ returned: when I(state=present)
+ type: bool
+ sample: false
+ image_id:
+ description: The ID of the Amazon Machine Image used by the launch configuration.
+ returned: when I(state=present)
+ type: str
+ sample: ami-9be6f38c
+ instance_monitoring:
+ description: Indicates whether instances in this group are launched with detailed (true) or basic (false) monitoring.
+ returned: when I(state=present)
+ type: bool
+ sample: true
+ instance_profile_name:
+ description: The name or Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance.
+ returned: when I(state=present)
+ type: str
+ sample: null
+ instance_type:
+ description: The instance type for the instances.
+ returned: when I(state=present)
+ type: str
+ sample: t1.micro
+ iops:
+ description: The number of I/O operations per second (IOPS) to provision for the volume.
+ returned: when I(state=present)
+ type: int
+ kernel_id:
+ description: The ID of the kernel associated with the AMI.
+ returned: when I(state=present)
+ type: str
+ sample: ''
+ key_name:
+ description: The name of the key pair.
+ returned: when I(state=present)
+ type: str
+ sample: testkey
+ launch_configuration_arn:
+ description: The Amazon Resource Name (ARN) of the launch configuration.
+ returned: when I(state=present)
+ type: str
+ sample: arn:aws:autoscaling:us-east-1:148830907657:launchConfiguration:888d9b58-d93a-40c4-90cf-759197a2621a:launchConfigurationName/launch_config_name
+ member:
+ description: ""
+ returned: when I(state=present)
+ type: str
+ sample: "\n "
+ name:
+ description: The name of the launch configuration.
+ returned: when I(state=present)
+ type: str
+ sample: launch_config_name
+ ramdisk_id:
+ description: The ID of the RAM disk associated with the AMI.
+ returned: when I(state=present)
+ type: str
+ sample: ''
+ security_groups:
+ description: The security groups to associate with the instances.
+ returned: when I(state=present)
+ type: list
+ sample:
+ - sg-5e27db2f
+ spot_price:
+ description: The price to bid when launching Spot Instances.
+ returned: when I(state=present)
+ type: float
+ use_block_device_types:
+ description: Indicates whether to suppress a device mapping.
+ returned: when I(state=present)
+ type: bool
+ sample: false
+ user_data:
+ description: The user data available to the instances.
+ returned: when I(state=present)
+ type: str
+ sample: ''
+ volume_type:
+ description: The volume type (one of standard, io1, gp2).
+ returned: when I(state=present)
+ type: str
+ sample: io1
+security_groups:
+ description: The security groups to associate with the instances.
+ returned: when I(state=present)
+ type: list
+ sample:
+ - sg-5e27db2f
+
+'''
+
+
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_text
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict
+
+
+def create_block_device_meta(module, volume):
+ if 'snapshot' not in volume and 'ephemeral' not in volume and 'no_device' not in volume:
+ if 'volume_size' not in volume:
+ module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume')
+ if 'snapshot' in volume:
+ if volume.get('volume_type') == 'io1' and 'iops' not in volume:
+ module.fail_json(msg='io1 volumes must have an iops value set')
+ if 'ephemeral' in volume:
+ if 'snapshot' in volume:
+ module.fail_json(msg='Cannot set both ephemeral and snapshot')
+
+ return_object = {}
+
+ if 'ephemeral' in volume:
+ return_object['VirtualName'] = volume.get('ephemeral')
+
+ if 'device_name' in volume:
+ return_object['DeviceName'] = volume.get('device_name')
+
+ if 'no_device' in volume:
+ return_object['NoDevice'] = volume.get('no_device')
+
+ if any(key in volume for key in ['snapshot', 'volume_size', 'volume_type', 'delete_on_termination', 'ips', 'encrypted']):
+ return_object['Ebs'] = {}
+
+ if 'snapshot' in volume:
+ return_object['Ebs']['SnapshotId'] = volume.get('snapshot')
+
+ if 'volume_size' in volume:
+ return_object['Ebs']['VolumeSize'] = int(volume.get('volume_size', 0))
+
+ if 'volume_type' in volume:
+ return_object['Ebs']['VolumeType'] = volume.get('volume_type')
+
+ if 'delete_on_termination' in volume:
+ return_object['Ebs']['DeleteOnTermination'] = volume.get('delete_on_termination', False)
+
+ if 'iops' in volume:
+ return_object['Ebs']['Iops'] = volume.get('iops')
+
+ if 'encrypted' in volume:
+ return_object['Ebs']['Encrypted'] = volume.get('encrypted')
+
+ return return_object
+
+
+def create_launch_config(connection, module):
+ name = module.params.get('name')
+ vpc_id = module.params.get('vpc_id')
+ try:
+ ec2_connection = module.client('ec2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+ try:
+ security_groups = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), ec2_connection, vpc_id=vpc_id, boto3=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to get Security Group IDs')
+ except ValueError as e:
+ module.fail_json(msg="Failed to get Security Group IDs", exception=traceback.format_exc())
+ user_data = module.params.get('user_data')
+ user_data_path = module.params.get('user_data_path')
+ volumes = module.params['volumes']
+ instance_monitoring = module.params.get('instance_monitoring')
+ assign_public_ip = module.params.get('assign_public_ip')
+ instance_profile_name = module.params.get('instance_profile_name')
+ ebs_optimized = module.params.get('ebs_optimized')
+ classic_link_vpc_id = module.params.get('classic_link_vpc_id')
+ classic_link_vpc_security_groups = module.params.get('classic_link_vpc_security_groups')
+
+ block_device_mapping = []
+
+ convert_list = ['image_id', 'instance_type', 'instance_type', 'instance_id', 'placement_tenancy', 'key_name', 'kernel_id', 'ramdisk_id', 'spot_price']
+
+ launch_config = (snake_dict_to_camel_dict(dict((k.capitalize(), str(v)) for k, v in module.params.items() if v is not None and k in convert_list)))
+
+ if user_data_path:
+ try:
+ with open(user_data_path, 'r') as user_data_file:
+ user_data = user_data_file.read()
+ except IOError as e:
+ module.fail_json(msg="Failed to open file for reading", exception=traceback.format_exc())
+
+ if volumes:
+ for volume in volumes:
+ if 'device_name' not in volume:
+ module.fail_json(msg='Device name must be set for volume')
+ # Minimum volume size is 1GiB. We'll use volume size explicitly set to 0 to be a signal not to create this volume
+ if 'volume_size' not in volume or int(volume['volume_size']) > 0:
+ block_device_mapping.append(create_block_device_meta(module, volume))
+
+ try:
+ launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations')
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Failed to describe launch configuration by name", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+
+ changed = False
+ result = {}
+
+ launch_config['LaunchConfigurationName'] = name
+
+ if security_groups is not None:
+ launch_config['SecurityGroups'] = security_groups
+
+ if classic_link_vpc_id is not None:
+ launch_config['ClassicLinkVPCId'] = classic_link_vpc_id
+
+ if instance_monitoring is not None:
+ launch_config['InstanceMonitoring'] = {'Enabled': instance_monitoring}
+
+ if classic_link_vpc_security_groups is not None:
+ launch_config['ClassicLinkVPCSecurityGroups'] = classic_link_vpc_security_groups
+
+ if block_device_mapping:
+ launch_config['BlockDeviceMappings'] = block_device_mapping
+
+ if instance_profile_name is not None:
+ launch_config['IamInstanceProfile'] = instance_profile_name
+
+ if assign_public_ip is not None:
+ launch_config['AssociatePublicIpAddress'] = assign_public_ip
+
+ if user_data is not None:
+ launch_config['UserData'] = user_data
+
+ if ebs_optimized is not None:
+ launch_config['EbsOptimized'] = ebs_optimized
+
+ if len(launch_configs) == 0:
+ try:
+ connection.create_launch_configuration(**launch_config)
+ launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations')
+ changed = True
+ if launch_configs:
+ launch_config = launch_configs[0]
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Failed to create launch configuration", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+
+ result = (dict((k, v) for k, v in launch_config.items()
+ if k not in ['Connection', 'CreatedTime', 'InstanceMonitoring', 'BlockDeviceMappings']))
+
+ result['CreatedTime'] = to_text(launch_config.get('CreatedTime'))
+
+ try:
+ result['InstanceMonitoring'] = module.boolean(launch_config.get('InstanceMonitoring').get('Enabled'))
+ except AttributeError:
+ result['InstanceMonitoring'] = False
+
+ result['BlockDeviceMappings'] = []
+
+ for block_device_mapping in launch_config.get('BlockDeviceMappings', []):
+ result['BlockDeviceMappings'].append(dict(device_name=block_device_mapping.get('DeviceName'), virtual_name=block_device_mapping.get('VirtualName')))
+ if block_device_mapping.get('Ebs') is not None:
+ result['BlockDeviceMappings'][-1]['ebs'] = dict(
+ snapshot_id=block_device_mapping.get('Ebs').get('SnapshotId'), volume_size=block_device_mapping.get('Ebs').get('VolumeSize'))
+
+ if user_data_path:
+ result['UserData'] = "hidden" # Otherwise, we dump binary to the user's terminal
+
+ return_object = {
+ 'Name': result.get('LaunchConfigurationName'),
+ 'CreatedTime': result.get('CreatedTime'),
+ 'ImageId': result.get('ImageId'),
+ 'Arn': result.get('LaunchConfigurationARN'),
+ 'SecurityGroups': result.get('SecurityGroups'),
+ 'InstanceType': result.get('InstanceType'),
+ 'Result': result
+ }
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(return_object))
+
+
+def delete_launch_config(connection, module):
+ try:
+ name = module.params.get('name')
+ launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations')
+ if launch_configs:
+ connection.delete_launch_configuration(LaunchConfigurationName=launch_configs[0].get('LaunchConfigurationName'))
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Failed to delete launch configuration", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ image_id=dict(),
+ instance_id=dict(),
+ key_name=dict(),
+ security_groups=dict(default=[], type='list', elements='str'),
+ user_data=dict(),
+ user_data_path=dict(type='path'),
+ kernel_id=dict(),
+ volumes=dict(type='list', elements='dict'),
+ instance_type=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ spot_price=dict(type='float'),
+ ramdisk_id=dict(),
+ instance_profile_name=dict(),
+ ebs_optimized=dict(default=False, type='bool'),
+ associate_public_ip_address=dict(type='bool', removed_at_date='2022-06-01', removed_from_collection='community.aws'),
+ instance_monitoring=dict(default=False, type='bool'),
+ assign_public_ip=dict(type='bool'),
+ classic_link_vpc_security_groups=dict(type='list', elements='str'),
+ classic_link_vpc_id=dict(),
+ vpc_id=dict(),
+ placement_tenancy=dict(choices=['default', 'dedicated'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[['user_data', 'user_data_path']],
+ )
+
+ try:
+ connection = module.client('autoscaling')
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="unable to establish connection - " + str(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+
+ state = module.params.get('state')
+
+ if state == 'present':
+ create_launch_config(connection, module)
+ elif state == 'absent':
+ delete_launch_config(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_lc_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_lc_facts.py
new file mode 100644
index 00000000..1d680c37
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_lc_facts.py
@@ -0,0 +1,224 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_lc_info
+version_added: 1.0.0
+short_description: Gather information about AWS Autoscaling Launch Configurations.
+description:
+ - Gather information about AWS Autoscaling Launch Configurations.
+ - This module was called C(ec2_lc_facts) before Ansible 2.9. The usage did not change.
+author: "Loïc Latreille (@psykotox)"
+requirements: [ boto3 ]
+options:
+ name:
+ description:
+ - A name or a list of name to match.
+ default: []
+ type: list
+ elements: str
+ sort:
+ description:
+ - Optional attribute which with to sort the results.
+ choices: ['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']
+ type: str
+ sort_order:
+ description:
+ - Order in which to sort results.
+ - Only used when the 'sort' parameter is specified.
+ choices: ['ascending', 'descending']
+ default: 'ascending'
+ type: str
+ sort_start:
+ description:
+ - Which result to start with (when sorting).
+ - Corresponds to Python slice notation.
+ type: int
+ sort_end:
+ description:
+ - Which result to end with (when sorting).
+ - Corresponds to Python slice notation.
+ type: int
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all launch configurations
+ community.aws.ec2_lc_info:
+
+- name: Gather information about launch configuration with name "example"
+ community.aws.ec2_lc_info:
+ name: example
+
+- name: Gather information sorted by created_time from most recent to least recent
+ community.aws.ec2_lc_info:
+ sort: created_time
+ sort_order: descending
+'''
+
+RETURN = r'''
+block_device_mapping:
+ description: Block device mapping for the instances of launch configuration
+ type: list
+ returned: always
+ sample: "[{
+ 'device_name': '/dev/xvda':,
+ 'ebs': {
+ 'delete_on_termination': true,
+ 'volume_size': 8,
+ 'volume_type': 'gp2'
+ }]"
+classic_link_vpc_security_groups:
+ description: IDs of one or more security groups for the VPC specified in classic_link_vpc_id
+ type: str
+ returned: always
+ sample:
+created_time:
+ description: The creation date and time for the launch configuration
+ type: str
+ returned: always
+ sample: "2016-05-27T13:47:44.216000+00:00"
+ebs_optimized:
+ description: EBS I/O optimized (true ) or not (false )
+ type: bool
+ returned: always
+ sample: true,
+image_id:
+ description: ID of the Amazon Machine Image (AMI)
+ type: str
+ returned: always
+ sample: "ami-12345678"
+instance_monitoring:
+ description: Launched with detailed monitoring or not
+ type: dict
+ returned: always
+ sample: "{
+ 'enabled': true
+ }"
+instance_type:
+ description: Instance type
+ type: str
+ returned: always
+ sample: "t2.micro"
+kernel_id:
+ description: ID of the kernel associated with the AMI
+ type: str
+ returned: always
+ sample:
+key_name:
+ description: Name of the key pair
+ type: str
+ returned: always
+ sample: "user_app"
+launch_configuration_arn:
+ description: Amazon Resource Name (ARN) of the launch configuration
+ type: str
+ returned: always
+ sample: "arn:aws:autoscaling:us-east-1:666612345678:launchConfiguration:ba785e3a-dd42-6f02-4585-ea1a2b458b3d:launchConfigurationName/lc-app"
+launch_configuration_name:
+ description: Name of the launch configuration
+ type: str
+ returned: always
+ sample: "lc-app"
+ramdisk_id:
+ description: ID of the RAM disk associated with the AMI
+ type: str
+ returned: always
+ sample:
+security_groups:
+ description: Security groups to associated
+ type: list
+ returned: always
+ sample: "[
+ 'web'
+ ]"
+user_data:
+ description: User data available
+ type: str
+ returned: always
+ sample:
+'''
+
+try:
+ import boto3
+ import botocore
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def list_launch_configs(connection, module):
+
+ launch_config_name = module.params.get("name")
+ sort = module.params.get('sort')
+ sort_order = module.params.get('sort_order')
+ sort_start = module.params.get('sort_start')
+ sort_end = module.params.get('sort_end')
+
+ try:
+ pg = connection.get_paginator('describe_launch_configurations')
+ launch_configs = pg.paginate(LaunchConfigurationNames=launch_config_name).build_full_result()
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to list launch configs")
+
+ snaked_launch_configs = []
+ for launch_config in launch_configs['LaunchConfigurations']:
+ snaked_launch_configs.append(camel_dict_to_snake_dict(launch_config))
+
+ for launch_config in snaked_launch_configs:
+ if 'CreatedTime' in launch_config:
+ launch_config['CreatedTime'] = str(launch_config['CreatedTime'])
+
+ if sort:
+ snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order == 'descending'))
+
+ if sort and sort_start and sort_end:
+ snaked_launch_configs = snaked_launch_configs[sort_start:sort_end]
+ elif sort and sort_start:
+ snaked_launch_configs = snaked_launch_configs[sort_start:]
+ elif sort and sort_end:
+ snaked_launch_configs = snaked_launch_configs[:sort_end]
+
+ module.exit_json(launch_configurations=snaked_launch_configs)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=False, default=[], type='list', elements='str'),
+ sort=dict(required=False, default=None,
+ choices=['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']),
+ sort_order=dict(required=False, default='ascending',
+ choices=['ascending', 'descending']),
+ sort_start=dict(required=False, type='int'),
+ sort_end=dict(required=False, type='int'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+ if module._name == 'ec2_lc_facts':
+ module.deprecate("The 'ec2_lc_facts' module has been renamed to 'ec2_lc_info'", date='2021-12-01', collection_name='community.aws')
+
+ try:
+ connection = module.client('autoscaling')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ list_launch_configs(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_lc_find.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_lc_find.py
new file mode 100644
index 00000000..1db33a20
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_lc_find.py
@@ -0,0 +1,217 @@
+#!/usr/bin/python
+# encoding: utf-8
+
+# (c) 2015, Jose Armesto <jose@armesto.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_lc_find
+version_added: 1.0.0
+short_description: Find AWS Autoscaling Launch Configurations
+description:
+ - Returns list of matching Launch Configurations for a given name, along with other useful information.
+ - Results can be sorted and sliced.
+ - It depends on boto.
+ - Based on the work by Tom Bamford U(https://github.com/tombamford)
+
+author: "Jose Armesto (@fiunchinho)"
+options:
+ name_regex:
+ description:
+ - A Launch Configuration to match.
+ - It'll be compiled as regex.
+ required: True
+ type: str
+ sort_order:
+ description:
+ - Order in which to sort results.
+ choices: ['ascending', 'descending']
+ default: 'ascending'
+ type: str
+ limit:
+ description:
+ - How many results to show.
+ - Corresponds to Python slice notation like list[:limit].
+ type: int
+requirements:
+ - "python >= 2.6"
+ - boto3
+extends_documentation_fragment:
+- amazon.aws.ec2
+- amazon.aws.aws
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Search for the Launch Configurations that start with "app"
+ community.aws.ec2_lc_find:
+ name_regex: app.*
+ sort_order: descending
+ limit: 2
+'''
+
+RETURN = '''
+image_id:
+ description: AMI id
+ returned: when Launch Configuration was found
+ type: str
+ sample: "ami-0d75df7e"
+user_data:
+ description: User data used to start instance
+ returned: when Launch Configuration was found
+ type: str
+ sample: "ZXhwb3J0IENMT1VE"
+name:
+ description: Name of the Launch Configuration
+ returned: when Launch Configuration was found
+ type: str
+ sample: "myapp-v123"
+arn:
+ description: Name of the AMI
+ returned: when Launch Configuration was found
+ type: str
+ sample: "arn:aws:autoscaling:eu-west-1:12345:launchConfiguration:d82f050e-e315:launchConfigurationName/yourproject"
+instance_type:
+ description: Type of ec2 instance
+ returned: when Launch Configuration was found
+ type: str
+ sample: "t2.small"
+created_time:
+ description: When it was created
+ returned: when Launch Configuration was found
+ type: str
+ sample: "2016-06-29T14:59:22.222000+00:00"
+ebs_optimized:
+ description: Launch Configuration EBS optimized property
+ returned: when Launch Configuration was found
+ type: bool
+ sample: False
+instance_monitoring:
+ description: Launch Configuration instance monitoring property
+ returned: when Launch Configuration was found
+ type: str
+ sample: {"Enabled": false}
+classic_link_vpc_security_groups:
+ description: Launch Configuration classic link vpc security groups property
+ returned: when Launch Configuration was found
+ type: list
+ sample: []
+block_device_mappings:
+ description: Launch Configuration block device mappings property
+ returned: when Launch Configuration was found
+ type: list
+ sample: []
+keyname:
+ description: Launch Configuration ssh key
+ returned: when Launch Configuration was found
+ type: str
+ sample: mykey
+security_groups:
+ description: Launch Configuration security groups
+ returned: when Launch Configuration was found
+ type: list
+ sample: []
+kernel_id:
+ description: Launch Configuration kernel to use
+ returned: when Launch Configuration was found
+ type: str
+ sample: ''
+ram_disk_id:
+ description: Launch Configuration ram disk property
+ returned: when Launch Configuration was found
+ type: str
+ sample: ''
+associate_public_address:
+ description: Assign public address or not
+ returned: when Launch Configuration was found
+ type: bool
+ sample: True
+...
+'''
+import re
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+def find_launch_configs(client, module):
+ name_regex = module.params.get('name_regex')
+ sort_order = module.params.get('sort_order')
+ limit = module.params.get('limit')
+
+ paginator = client.get_paginator('describe_launch_configurations')
+
+ response_iterator = paginator.paginate(
+ PaginationConfig={
+ 'MaxItems': 1000,
+ 'PageSize': 100
+ }
+ )
+
+ results = []
+
+ for response in response_iterator:
+ response['LaunchConfigurations'] = filter(lambda lc: re.compile(name_regex).match(lc['LaunchConfigurationName']),
+ response['LaunchConfigurations'])
+
+ for lc in response['LaunchConfigurations']:
+ data = {
+ 'name': lc['LaunchConfigurationName'],
+ 'arn': lc['LaunchConfigurationARN'],
+ 'created_time': lc['CreatedTime'],
+ 'user_data': lc['UserData'],
+ 'instance_type': lc['InstanceType'],
+ 'image_id': lc['ImageId'],
+ 'ebs_optimized': lc['EbsOptimized'],
+ 'instance_monitoring': lc['InstanceMonitoring'],
+ 'classic_link_vpc_security_groups': lc['ClassicLinkVPCSecurityGroups'],
+ 'block_device_mappings': lc['BlockDeviceMappings'],
+ 'keyname': lc['KeyName'],
+ 'security_groups': lc['SecurityGroups'],
+ 'kernel_id': lc['KernelId'],
+ 'ram_disk_id': lc['RamdiskId'],
+ 'associate_public_address': lc.get('AssociatePublicIpAddress', False),
+ }
+
+ results.append(data)
+
+ results.sort(key=lambda e: e['name'], reverse=(sort_order == 'descending'))
+
+ if limit:
+ results = results[:int(limit)]
+
+ module.exit_json(changed=False, results=results)
+
+
+def main():
+ argument_spec = dict(
+ name_regex=dict(required=True),
+ sort_order=dict(required=False, default='ascending', choices=['ascending', 'descending']),
+ limit=dict(required=False, type='int'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ )
+
+ try:
+ client = module.client('autoscaling')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ find_launch_configs(client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_lc_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_lc_info.py
new file mode 100644
index 00000000..1d680c37
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_lc_info.py
@@ -0,0 +1,224 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_lc_info
+version_added: 1.0.0
+short_description: Gather information about AWS Autoscaling Launch Configurations.
+description:
+ - Gather information about AWS Autoscaling Launch Configurations.
+ - This module was called C(ec2_lc_facts) before Ansible 2.9. The usage did not change.
+author: "Loïc Latreille (@psykotox)"
+requirements: [ boto3 ]
+options:
+ name:
+ description:
+ - A name or a list of name to match.
+ default: []
+ type: list
+ elements: str
+ sort:
+ description:
+ - Optional attribute which with to sort the results.
+ choices: ['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']
+ type: str
+ sort_order:
+ description:
+ - Order in which to sort results.
+ - Only used when the 'sort' parameter is specified.
+ choices: ['ascending', 'descending']
+ default: 'ascending'
+ type: str
+ sort_start:
+ description:
+ - Which result to start with (when sorting).
+ - Corresponds to Python slice notation.
+ type: int
+ sort_end:
+ description:
+ - Which result to end with (when sorting).
+ - Corresponds to Python slice notation.
+ type: int
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all launch configurations
+ community.aws.ec2_lc_info:
+
+- name: Gather information about launch configuration with name "example"
+ community.aws.ec2_lc_info:
+ name: example
+
+- name: Gather information sorted by created_time from most recent to least recent
+ community.aws.ec2_lc_info:
+ sort: created_time
+ sort_order: descending
+'''
+
+RETURN = r'''
+block_device_mapping:
+ description: Block device mapping for the instances of launch configuration
+ type: list
+ returned: always
+ sample: "[{
+ 'device_name': '/dev/xvda':,
+ 'ebs': {
+ 'delete_on_termination': true,
+ 'volume_size': 8,
+ 'volume_type': 'gp2'
+ }]"
+classic_link_vpc_security_groups:
+ description: IDs of one or more security groups for the VPC specified in classic_link_vpc_id
+ type: str
+ returned: always
+ sample:
+created_time:
+ description: The creation date and time for the launch configuration
+ type: str
+ returned: always
+ sample: "2016-05-27T13:47:44.216000+00:00"
+ebs_optimized:
+ description: EBS I/O optimized (true ) or not (false )
+ type: bool
+ returned: always
+ sample: true,
+image_id:
+ description: ID of the Amazon Machine Image (AMI)
+ type: str
+ returned: always
+ sample: "ami-12345678"
+instance_monitoring:
+ description: Launched with detailed monitoring or not
+ type: dict
+ returned: always
+ sample: "{
+ 'enabled': true
+ }"
+instance_type:
+ description: Instance type
+ type: str
+ returned: always
+ sample: "t2.micro"
+kernel_id:
+ description: ID of the kernel associated with the AMI
+ type: str
+ returned: always
+ sample:
+key_name:
+ description: Name of the key pair
+ type: str
+ returned: always
+ sample: "user_app"
+launch_configuration_arn:
+ description: Amazon Resource Name (ARN) of the launch configuration
+ type: str
+ returned: always
+ sample: "arn:aws:autoscaling:us-east-1:666612345678:launchConfiguration:ba785e3a-dd42-6f02-4585-ea1a2b458b3d:launchConfigurationName/lc-app"
+launch_configuration_name:
+ description: Name of the launch configuration
+ type: str
+ returned: always
+ sample: "lc-app"
+ramdisk_id:
+ description: ID of the RAM disk associated with the AMI
+ type: str
+ returned: always
+ sample:
+security_groups:
+ description: Security groups to associated
+ type: list
+ returned: always
+ sample: "[
+ 'web'
+ ]"
+user_data:
+ description: User data available
+ type: str
+ returned: always
+ sample:
+'''
+
+try:
+ import boto3
+ import botocore
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def list_launch_configs(connection, module):
+
+ launch_config_name = module.params.get("name")
+ sort = module.params.get('sort')
+ sort_order = module.params.get('sort_order')
+ sort_start = module.params.get('sort_start')
+ sort_end = module.params.get('sort_end')
+
+ try:
+ pg = connection.get_paginator('describe_launch_configurations')
+ launch_configs = pg.paginate(LaunchConfigurationNames=launch_config_name).build_full_result()
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to list launch configs")
+
+ snaked_launch_configs = []
+ for launch_config in launch_configs['LaunchConfigurations']:
+ snaked_launch_configs.append(camel_dict_to_snake_dict(launch_config))
+
+ for launch_config in snaked_launch_configs:
+ if 'CreatedTime' in launch_config:
+ launch_config['CreatedTime'] = str(launch_config['CreatedTime'])
+
+ if sort:
+ snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order == 'descending'))
+
+ if sort and sort_start and sort_end:
+ snaked_launch_configs = snaked_launch_configs[sort_start:sort_end]
+ elif sort and sort_start:
+ snaked_launch_configs = snaked_launch_configs[sort_start:]
+ elif sort and sort_end:
+ snaked_launch_configs = snaked_launch_configs[:sort_end]
+
+ module.exit_json(launch_configurations=snaked_launch_configs)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=False, default=[], type='list', elements='str'),
+ sort=dict(required=False, default=None,
+ choices=['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']),
+ sort_order=dict(required=False, default='ascending',
+ choices=['ascending', 'descending']),
+ sort_start=dict(required=False, type='int'),
+ sort_end=dict(required=False, type='int'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+ if module._name == 'ec2_lc_facts':
+ module.deprecate("The 'ec2_lc_facts' module has been renamed to 'ec2_lc_info'", date='2021-12-01', collection_name='community.aws')
+
+ try:
+ connection = module.client('autoscaling')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ list_launch_configs(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_metric_alarm.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_metric_alarm.py
new file mode 100644
index 00000000..09e95d2f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_metric_alarm.py
@@ -0,0 +1,406 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: ec2_metric_alarm
+short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'"
+version_added: 1.0.0
+description:
+ - Can create or delete AWS metric alarms.
+ - Metrics you wish to alarm on must already exist.
+author: "Zacharie Eakin (@Zeekin)"
+options:
+ state:
+ description:
+ - Register or deregister the alarm.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ name:
+ description:
+ - Unique name for the alarm.
+ required: true
+ type: str
+ metric:
+ description:
+ - Name of the monitored metric (e.g. C(CPUUtilization)).
+ - Metric must already exist.
+ required: false
+ type: str
+ namespace:
+ description:
+ - Name of the appropriate namespace (C(AWS/EC2), C(System/Linux), etc.), which determines the category it will appear under in cloudwatch.
+ required: false
+ type: str
+ statistic:
+ description:
+ - Operation applied to the metric.
+ - Works in conjunction with I(period) and I(evaluation_periods) to determine the comparison value.
+ required: false
+ choices: ['SampleCount','Average','Sum','Minimum','Maximum']
+ type: str
+ comparison:
+ description:
+ - Determines how the threshold value is compared
+ - Symbolic comparison operators have been deprecated, and will be removed after 2022-06-22.
+ required: false
+ type: str
+ choices:
+ - 'GreaterThanOrEqualToThreshold'
+ - 'GreaterThanThreshold'
+ - 'LessThanThreshold'
+ - 'LessThanOrEqualToThreshold'
+ - '<='
+ - '<'
+ - '>='
+ - '>'
+ threshold:
+ description:
+ - Sets the min/max bound for triggering the alarm.
+ required: false
+ type: float
+ period:
+ description:
+ - The time (in seconds) between metric evaluations.
+ required: false
+ type: int
+ evaluation_periods:
+ description:
+ - The number of times in which the metric is evaluated before final calculation.
+ required: false
+ type: int
+ unit:
+ description:
+ - The threshold's unit of measurement.
+ required: false
+ type: str
+ choices:
+ - 'Seconds'
+ - 'Microseconds'
+ - 'Milliseconds'
+ - 'Bytes'
+ - 'Kilobytes'
+ - 'Megabytes'
+ - 'Gigabytes'
+ - 'Terabytes'
+ - 'Bits'
+ - 'Kilobits'
+ - 'Megabits'
+ - 'Gigabits'
+ - 'Terabits'
+ - 'Percent'
+ - 'Count'
+ - 'Bytes/Second'
+ - 'Kilobytes/Second'
+ - 'Megabytes/Second'
+ - 'Gigabytes/Second'
+ - 'Terabytes/Second'
+ - 'Bits/Second'
+ - 'Kilobits/Second'
+ - 'Megabits/Second'
+ - 'Gigabits/Second'
+ - 'Terabits/Second'
+ - 'Count/Second'
+ - 'None'
+ description:
+ description:
+ - A longer description of the alarm.
+ required: false
+ type: str
+ dimensions:
+ description:
+ - A dictionary describing which metric the alarm is applied to.
+ - 'For more information see the AWS documentation:'
+ - U(https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#Dimension)
+ required: false
+ type: dict
+ alarm_actions:
+ description:
+ - A list of the names action(s) taken when the alarm is in the C(alarm) status, denoted as Amazon Resource Name(s).
+ required: false
+ type: list
+ elements: str
+ insufficient_data_actions:
+ description:
+ - A list of the names of action(s) to take when the alarm is in the C(insufficient_data) status.
+ required: false
+ type: list
+ elements: str
+ ok_actions:
+ description:
+ - A list of the names of action(s) to take when the alarm is in the C(ok) status, denoted as Amazon Resource Name(s).
+ required: false
+ type: list
+ elements: str
+ treat_missing_data:
+ description:
+ - Sets how the alarm handles missing data points.
+ required: false
+ type: str
+ choices:
+ - 'breaching'
+ - 'notBreaching'
+ - 'ignore'
+ - 'missing'
+ default: 'missing'
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+ - name: create alarm
+ community.aws.ec2_metric_alarm:
+ state: present
+ region: ap-southeast-2
+ name: "cpu-low"
+ metric: "CPUUtilization"
+ namespace: "AWS/EC2"
+ statistic: Average
+ comparison: "LessThanOrEqualToThreshold"
+ threshold: 5.0
+ period: 300
+ evaluation_periods: 3
+ unit: "Percent"
+ description: "This will alarm when a bamboo slave's cpu usage average is lower than 5% for 15 minutes "
+ dimensions: {'InstanceId':'i-XXX'}
+ alarm_actions: ["action1","action2"]
+
+ - name: Create an alarm to recover a failed instance
+ community.aws.ec2_metric_alarm:
+ state: present
+ region: us-west-1
+ name: "recover-instance"
+ metric: "StatusCheckFailed_System"
+ namespace: "AWS/EC2"
+ statistic: "Minimum"
+ comparison: ">="
+ threshold: 1.0
+ period: 60
+ evaluation_periods: 2
+ unit: "Count"
+ description: "This will recover an instance when it fails"
+ dimensions: {"InstanceId":'i-XXX'}
+ alarm_actions: ["arn:aws:automate:us-west-1:ec2:recover"]
+
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+try:
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # protected by AnsibleAWSModule
+
+
+def create_metric_alarm(connection, module):
+
+ name = module.params.get('name')
+ metric = module.params.get('metric')
+ namespace = module.params.get('namespace')
+ statistic = module.params.get('statistic')
+ comparison = module.params.get('comparison')
+ threshold = module.params.get('threshold')
+ period = module.params.get('period')
+ evaluation_periods = module.params.get('evaluation_periods')
+ unit = module.params.get('unit')
+ description = module.params.get('description')
+ dimensions = module.params.get('dimensions')
+ alarm_actions = module.params.get('alarm_actions')
+ insufficient_data_actions = module.params.get('insufficient_data_actions')
+ ok_actions = module.params.get('ok_actions')
+ treat_missing_data = module.params.get('treat_missing_data')
+
+ warnings = []
+
+ alarms = connection.describe_alarms(AlarmNames=[name])
+
+ comparisons = {'<=': 'LessThanOrEqualToThreshold',
+ '<': 'LessThanThreshold',
+ '>=': 'GreaterThanOrEqualToThreshold',
+ '>': 'GreaterThanThreshold'}
+ if comparison in ('<=', '<', '>', '>='):
+ module.deprecate('Using the <=, <, > and >= operators for comparison has been deprecated. Please use LessThanOrEqualToThreshold, '
+ 'LessThanThreshold, GreaterThanThreshold or GreaterThanOrEqualToThreshold instead.',
+ date='2022-06-01', collection_name='community.aws')
+ comparison = comparisons[comparison]
+
+ if not isinstance(dimensions, list):
+ fixed_dimensions = []
+ for key, value in dimensions.items():
+ fixed_dimensions.append({'Name': key, 'Value': value})
+ dimensions = fixed_dimensions
+
+ if not alarms['MetricAlarms']:
+ try:
+ connection.put_metric_alarm(AlarmName=name,
+ MetricName=metric,
+ Namespace=namespace,
+ Statistic=statistic,
+ ComparisonOperator=comparison,
+ Threshold=threshold,
+ Period=period,
+ EvaluationPeriods=evaluation_periods,
+ Unit=unit,
+ AlarmDescription=description,
+ Dimensions=dimensions,
+ AlarmActions=alarm_actions,
+ InsufficientDataActions=insufficient_data_actions,
+ OKActions=ok_actions,
+ TreatMissingData=treat_missing_data)
+ changed = True
+ alarms = connection.describe_alarms(AlarmNames=[name])
+ except ClientError as e:
+ module.fail_json_aws(e)
+
+ else:
+ changed = False
+ alarm = alarms['MetricAlarms'][0]
+
+ # Workaround for alarms created before TreatMissingData was introduced
+ if 'TreatMissingData' not in alarm.keys():
+ alarm['TreatMissingData'] = 'missing'
+
+ for key, value in {'MetricName': metric,
+ 'Namespace': namespace,
+ 'Statistic': statistic,
+ 'ComparisonOperator': comparison,
+ 'Threshold': threshold,
+ 'Period': period,
+ 'EvaluationPeriods': evaluation_periods,
+ 'Unit': unit,
+ 'AlarmDescription': description,
+ 'Dimensions': dimensions,
+ 'TreatMissingData': treat_missing_data}.items():
+ try:
+ if alarm[key] != value:
+ changed = True
+ except KeyError:
+ if value is not None:
+ changed = True
+
+ alarm[key] = value
+
+ for key, value in {'AlarmActions': alarm_actions,
+ 'InsufficientDataActions': insufficient_data_actions,
+ 'OKActions': ok_actions}.items():
+ action = value or []
+ if alarm[key] != action:
+ changed = True
+ alarm[key] = value
+
+ try:
+ if changed:
+ connection.put_metric_alarm(AlarmName=alarm['AlarmName'],
+ MetricName=alarm['MetricName'],
+ Namespace=alarm['Namespace'],
+ Statistic=alarm['Statistic'],
+ ComparisonOperator=alarm['ComparisonOperator'],
+ Threshold=alarm['Threshold'],
+ Period=alarm['Period'],
+ EvaluationPeriods=alarm['EvaluationPeriods'],
+ Unit=alarm['Unit'],
+ AlarmDescription=alarm['AlarmDescription'],
+ Dimensions=alarm['Dimensions'],
+ AlarmActions=alarm['AlarmActions'],
+ InsufficientDataActions=alarm['InsufficientDataActions'],
+ OKActions=alarm['OKActions'],
+ TreatMissingData=alarm['TreatMissingData'])
+ except ClientError as e:
+ module.fail_json_aws(e)
+
+ result = alarms['MetricAlarms'][0]
+ module.exit_json(changed=changed, warnings=warnings,
+ name=result['AlarmName'],
+ actions_enabled=result['ActionsEnabled'],
+ alarm_actions=result['AlarmActions'],
+ alarm_arn=result['AlarmArn'],
+ comparison=result['ComparisonOperator'],
+ description=result['AlarmDescription'],
+ dimensions=result['Dimensions'],
+ evaluation_periods=result['EvaluationPeriods'],
+ insufficient_data_actions=result['InsufficientDataActions'],
+ last_updated=result['AlarmConfigurationUpdatedTimestamp'],
+ metric=result['MetricName'],
+ namespace=result['Namespace'],
+ ok_actions=result['OKActions'],
+ period=result['Period'],
+ state_reason=result['StateReason'],
+ state_value=result['StateValue'],
+ statistic=result['Statistic'],
+ threshold=result['Threshold'],
+ treat_missing_data=result['TreatMissingData'],
+ unit=result['Unit'])
+
+
+def delete_metric_alarm(connection, module):
+ name = module.params.get('name')
+ alarms = connection.describe_alarms(AlarmNames=[name])
+
+ if alarms['MetricAlarms']:
+ try:
+ connection.delete_alarms(AlarmNames=[name])
+ module.exit_json(changed=True)
+ except (ClientError) as e:
+ module.fail_json_aws(e)
+ else:
+ module.exit_json(changed=False)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True, type='str'),
+ metric=dict(type='str'),
+ namespace=dict(type='str'),
+ statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']),
+ comparison=dict(type='str', choices=['LessThanOrEqualToThreshold', 'LessThanThreshold', 'GreaterThanThreshold',
+ 'GreaterThanOrEqualToThreshold', '<=', '<', '>', '>=']),
+ threshold=dict(type='float'),
+ period=dict(type='int'),
+ unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes',
+ 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count',
+ 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second',
+ 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second',
+ 'Terabits/Second', 'Count/Second', 'None']),
+ evaluation_periods=dict(type='int'),
+ description=dict(type='str'),
+ dimensions=dict(type='dict', default={}),
+ alarm_actions=dict(type='list', default=[], elements='str'),
+ insufficient_data_actions=dict(type='list', default=[], elements='str'),
+ ok_actions=dict(type='list', default=[], elements='str'),
+ treat_missing_data=dict(type='str', choices=['breaching', 'notBreaching', 'ignore', 'missing'], default='missing'),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ state = module.params.get('state')
+
+ connection = module.client('cloudwatch')
+
+ if state == 'present':
+ create_metric_alarm(connection, module)
+ elif state == 'absent':
+ delete_metric_alarm(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_placement_group.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_placement_group.py
new file mode 100644
index 00000000..7d9a8004
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_placement_group.py
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_placement_group
+version_added: 1.0.0
+short_description: Create or delete an EC2 Placement Group
+description:
+ - Create an EC2 Placement Group; if the placement group already exists,
+ nothing is done. Or, delete an existing placement group. If the placement
+ group is absent, do nothing. See also
+ U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html)
+author: "Brad Macpherson (@iiibrad)"
+options:
+ name:
+ description:
+ - The name for the placement group.
+ required: true
+ type: str
+ state:
+ description:
+ - Create or delete placement group.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ strategy:
+ description:
+ - Placement group strategy. Cluster will cluster instances into a
+ low-latency group in a single Availability Zone, while Spread spreads
+ instances across underlying hardware.
+ default: cluster
+ choices: [ 'cluster', 'spread' ]
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide
+# for details.
+
+- name: Create a placement group.
+ community.aws.ec2_placement_group:
+ name: my-cluster
+ state: present
+
+- name: Create a Spread placement group.
+ community.aws.ec2_placement_group:
+ name: my-cluster
+ state: present
+ strategy: spread
+
+- name: Delete a placement group.
+ community.aws.ec2_placement_group:
+ name: my-cluster
+ state: absent
+
+'''
+
+
+RETURN = '''
+placement_group:
+ description: Placement group attributes
+ returned: when state != absent
+ type: complex
+ contains:
+ name:
+ description: PG name
+ type: str
+ sample: my-cluster
+ state:
+ description: PG state
+ type: str
+ sample: "available"
+ strategy:
+ description: PG strategy
+ type: str
+ sample: "cluster"
+
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+try:
+ from botocore.exceptions import (BotoCoreError, ClientError)
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+@AWSRetry.exponential_backoff()
+def get_placement_group_details(connection, module):
+ name = module.params.get("name")
+ try:
+ response = connection.describe_placement_groups(
+ Filters=[{
+ "Name": "group-name",
+ "Values": [name]
+ }])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(
+ e,
+ msg="Couldn't find placement group named [%s]" % name)
+
+ if len(response['PlacementGroups']) != 1:
+ return None
+ else:
+ placement_group = response['PlacementGroups'][0]
+ return {
+ "name": placement_group['GroupName'],
+ "state": placement_group['State'],
+ "strategy": placement_group['Strategy'],
+ }
+
+
+@AWSRetry.exponential_backoff()
+def create_placement_group(connection, module):
+ name = module.params.get("name")
+ strategy = module.params.get("strategy")
+
+ try:
+ connection.create_placement_group(
+ GroupName=name, Strategy=strategy, DryRun=module.check_mode)
+ except (BotoCoreError, ClientError) as e:
+ if e.response['Error']['Code'] == "DryRunOperation":
+ module.exit_json(changed=True, placement_group={
+ "name": name,
+ "state": 'DryRun',
+ "strategy": strategy,
+ })
+ module.fail_json_aws(
+ e,
+ msg="Couldn't create placement group [%s]" % name)
+
+ module.exit_json(changed=True,
+ placement_group=get_placement_group_details(
+ connection, module
+ ))
+
+
+@AWSRetry.exponential_backoff()
+def delete_placement_group(connection, module):
+ name = module.params.get("name")
+
+ try:
+ connection.delete_placement_group(
+ GroupName=name, DryRun=module.check_mode)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(
+ e,
+ msg="Couldn't delete placement group [%s]" % name)
+
+ module.exit_json(changed=True)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True, type='str'),
+ state=dict(default='present', choices=['present', 'absent']),
+ strategy=dict(default='cluster', choices=['cluster', 'spread'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ connection = module.client('ec2')
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ placement_group = get_placement_group_details(connection, module)
+ if placement_group is None:
+ create_placement_group(connection, module)
+ else:
+ strategy = module.params.get("strategy")
+ if placement_group['strategy'] == strategy:
+ module.exit_json(
+ changed=False, placement_group=placement_group)
+ else:
+ name = module.params.get("name")
+ module.fail_json(
+ msg=("Placement group '{}' exists, can't change strategy" +
+ " from '{}' to '{}'").format(
+ name,
+ placement_group['strategy'],
+ strategy))
+
+ elif state == 'absent':
+ placement_group = get_placement_group_details(connection, module)
+ if placement_group is None:
+ module.exit_json(changed=False)
+ else:
+ delete_placement_group(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_placement_group_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_placement_group_facts.py
new file mode 100644
index 00000000..354d3eb3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_placement_group_facts.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_placement_group_info
+version_added: 1.0.0
+short_description: List EC2 Placement Group(s) details
+description:
+ - List details of EC2 Placement Group(s).
+ - This module was called C(ec2_placement_group_facts) before Ansible 2.9. The usage did not change.
+author: "Brad Macpherson (@iiibrad)"
+options:
+ names:
+ description:
+ - A list of names to filter on. If a listed group does not exist, there
+ will be no corresponding entry in the result; no error will be raised.
+ type: list
+ elements: str
+ required: false
+ default: []
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details or the AWS region,
+# see the AWS Guide for details.
+
+- name: List all placement groups.
+ community.aws.ec2_placement_group_info:
+ register: all_ec2_placement_groups
+
+- name: List two placement groups.
+ community.aws.ec2_placement_group_info:
+ names:
+ - my-cluster
+ - my-other-cluster
+ register: specific_ec2_placement_groups
+
+- ansible.builtin.debug:
+ msg: "{{ specific_ec2_placement_groups | json_query(\"[?name=='my-cluster']\") }}"
+
+'''
+
+
+RETURN = r'''
+placement_groups:
+ description: Placement group attributes
+ returned: always
+ type: complex
+ contains:
+ name:
+ description: PG name
+ type: str
+ sample: my-cluster
+ state:
+ description: PG state
+ type: str
+ sample: "available"
+ strategy:
+ description: PG strategy
+ type: str
+ sample: "cluster"
+
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+try:
+ from botocore.exceptions import (BotoCoreError, ClientError)
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def get_placement_groups_details(connection, module):
+ names = module.params.get("names")
+ try:
+ if len(names) > 0:
+ response = connection.describe_placement_groups(
+ Filters=[{
+ "Name": "group-name",
+ "Values": names
+ }])
+ else:
+ response = connection.describe_placement_groups()
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(
+ e,
+ msg="Couldn't find placement groups named [%s]" % names)
+
+ results = []
+ for placement_group in response['PlacementGroups']:
+ results.append({
+ "name": placement_group['GroupName'],
+ "state": placement_group['State'],
+ "strategy": placement_group['Strategy'],
+ })
+ return results
+
+
+def main():
+ argument_spec = dict(
+ names=dict(type='list', default=[], elements='str')
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+ if module._module._name == 'ec2_placement_group_facts':
+ module._module.deprecate("The 'ec2_placement_group_facts' module has been renamed to 'ec2_placement_group_info'",
+ date='2021-12-01', collection_name='community.aws')
+
+ connection = module.client('ec2')
+
+ placement_groups = get_placement_groups_details(connection, module)
+ module.exit_json(changed=False, placement_groups=placement_groups)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_placement_group_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_placement_group_info.py
new file mode 100644
index 00000000..354d3eb3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_placement_group_info.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_placement_group_info
+version_added: 1.0.0
+short_description: List EC2 Placement Group(s) details
+description:
+ - List details of EC2 Placement Group(s).
+ - This module was called C(ec2_placement_group_facts) before Ansible 2.9. The usage did not change.
+author: "Brad Macpherson (@iiibrad)"
+options:
+ names:
+ description:
+ - A list of names to filter on. If a listed group does not exist, there
+ will be no corresponding entry in the result; no error will be raised.
+ type: list
+ elements: str
+ required: false
+ default: []
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details or the AWS region,
+# see the AWS Guide for details.
+
+- name: List all placement groups.
+ community.aws.ec2_placement_group_info:
+ register: all_ec2_placement_groups
+
+- name: List two placement groups.
+ community.aws.ec2_placement_group_info:
+ names:
+ - my-cluster
+ - my-other-cluster
+ register: specific_ec2_placement_groups
+
+- ansible.builtin.debug:
+ msg: "{{ specific_ec2_placement_groups | json_query(\"[?name=='my-cluster']\") }}"
+
+'''
+
+
+RETURN = r'''
+placement_groups:
+ description: Placement group attributes
+ returned: always
+ type: complex
+ contains:
+ name:
+ description: PG name
+ type: str
+ sample: my-cluster
+ state:
+ description: PG state
+ type: str
+ sample: "available"
+ strategy:
+ description: PG strategy
+ type: str
+ sample: "cluster"
+
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+try:
+ from botocore.exceptions import (BotoCoreError, ClientError)
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def get_placement_groups_details(connection, module):
+ names = module.params.get("names")
+ try:
+ if len(names) > 0:
+ response = connection.describe_placement_groups(
+ Filters=[{
+ "Name": "group-name",
+ "Values": names
+ }])
+ else:
+ response = connection.describe_placement_groups()
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(
+ e,
+ msg="Couldn't find placement groups named [%s]" % names)
+
+ results = []
+ for placement_group in response['PlacementGroups']:
+ results.append({
+ "name": placement_group['GroupName'],
+ "state": placement_group['State'],
+ "strategy": placement_group['Strategy'],
+ })
+ return results
+
+
+def main():
+ argument_spec = dict(
+ names=dict(type='list', default=[], elements='str')
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+ if module._module._name == 'ec2_placement_group_facts':
+ module._module.deprecate("The 'ec2_placement_group_facts' module has been renamed to 'ec2_placement_group_info'",
+ date='2021-12-01', collection_name='community.aws')
+
+ connection = module.client('ec2')
+
+ placement_groups = get_placement_groups_details(connection, module)
+ module.exit_json(changed=False, placement_groups=placement_groups)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_scaling_policy.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_scaling_policy.py
new file mode 100644
index 00000000..656519b4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_scaling_policy.py
@@ -0,0 +1,387 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: ec2_scaling_policy
+short_description: Create or delete AWS scaling policies for Autoscaling groups
+version_added: 1.0.0
+description:
+ - Can create or delete scaling policies for autoscaling groups.
+ - Referenced autoscaling groups must already exist.
+author:
+ - Zacharie Eakin (@zeekin)
+ - Will Thames (@willthames)
+options:
+ state:
+ type: str
+ description:
+ - Register or deregister the policy.
+ choices: ['present', 'absent']
+ default: 'present'
+ name:
+ type: str
+ description:
+ - Unique name for the scaling policy.
+ required: true
+ asg_name:
+ type: str
+ description:
+ - Name of the associated autoscaling group.
+ - Required if I(state) is C(present).
+ adjustment_type:
+ type: str
+ description:
+ - The type of change in capacity of the autoscaling group.
+ - Required if I(state) is C(present).
+ choices:
+ - ChangeInCapacity
+ - ExactCapacity
+ - PercentChangeInCapacity
+ scaling_adjustment:
+ type: int
+ description:
+ - The amount by which the autoscaling group is adjusted by the policy.
+ - A negative number has the effect of scaling down the ASG.
+ - Units are numbers of instances for C(ExactCapacity) or C(ChangeInCapacity) or percent
+ of existing instances for C(PercentChangeInCapacity).
+ - Required when I(policy_type) is C(SimpleScaling).
+ min_adjustment_step:
+ type: int
+ description:
+ - Minimum amount of adjustment when policy is triggered.
+ - Only used when I(adjustment_type) is C(PercentChangeInCapacity).
+ cooldown:
+ type: int
+ description:
+ - The minimum period of time (in seconds) between which autoscaling actions can take place.
+ - Only used when I(policy_type) is C(SimpleScaling).
+ policy_type:
+ type: str
+ description:
+ - Auto scaling adjustment policy.
+ choices:
+ - StepScaling
+ - SimpleScaling
+ default: SimpleScaling
+ metric_aggregation:
+ type: str
+ description:
+ - The aggregation type for the CloudWatch metrics.
+ - Only used when I(policy_type) is not C(SimpleScaling).
+ choices:
+ - Minimum
+ - Maximum
+ - Average
+ default: Average
+ step_adjustments:
+ type: list
+ description:
+ - list of dicts containing I(lower_bound), I(upper_bound) and I(scaling_adjustment)
+ - Intervals must not overlap or have a gap between them.
+ - At most, one item can have an undefined I(lower_bound).
+ If any item has a negative lower_bound, then there must be a step adjustment with an undefined I(lower_bound).
+ - At most, one item can have an undefined I(upper_bound).
+ If any item has a positive upper_bound, then there must be a step adjustment with an undefined I(upper_bound).
+ - The bounds are the amount over the alarm threshold at which the adjustment will trigger.
+ This means that for an alarm threshold of 50, triggering at 75 requires a lower bound of 25.
+ See U(http://docs.aws.amazon.com/AutoScaling/latest/APIReference/API_StepAdjustment.html).
+ elements: dict
+ suboptions:
+ lower_bound:
+ type: int
+ description:
+ - The lower bound for the difference between the alarm threshold and
+ the CloudWatch metric.
+ upper_bound:
+ type: int
+ description:
+ - The upper bound for the difference between the alarm threshold and
+ the CloudWatch metric.
+ scaling_adjustment:
+ type: int
+ description:
+ - The amount by which to scale.
+ required: true
+ estimated_instance_warmup:
+ type: int
+ description:
+ - The estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics.
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+EXAMPLES = '''
+- name: Simple Scale Down policy
+ community.aws.ec2_scaling_policy:
+ state: present
+ region: US-XXX
+ name: "scaledown-policy"
+ adjustment_type: "ChangeInCapacity"
+ asg_name: "application-asg"
+ scaling_adjustment: -1
+ min_adjustment_step: 1
+ cooldown: 300
+
+# For an alarm with a breach threshold of 20, the
+# following creates a stepped policy:
+# From 20-40 (0-20 above threshold), increase by 50% of existing capacity
+# From 41-infinity, increase by 100% of existing capacity
+- community.aws.ec2_scaling_policy:
+ state: present
+ region: US-XXX
+ name: "step-scale-up-policy"
+ policy_type: StepScaling
+ metric_aggregation: Maximum
+ step_adjustments:
+ - upper_bound: 20
+ scaling_adjustment: 50
+ - lower_bound: 20
+ scaling_adjustment: 100
+ adjustment_type: "PercentChangeInCapacity"
+ asg_name: "application-asg"
+'''
+
+RETURN = '''
+adjustment_type:
+ description: Scaling policy adjustment type
+ returned: always
+ type: str
+ sample: PercentChangeInCapacity
+alarms:
+ description: Cloudwatch alarms related to the policy
+ returned: always
+ type: complex
+ contains:
+ alarm_name:
+ description: name of the Cloudwatch alarm
+ returned: always
+ type: str
+ sample: cpu-very-high
+ alarm_arn:
+ description: ARN of the Cloudwatch alarm
+ returned: always
+ type: str
+ sample: arn:aws:cloudwatch:us-east-2:1234567890:alarm:cpu-very-high
+arn:
+ description: ARN of the scaling policy. Provided for backward compatibility, value is the same as I(policy_arn)
+ returned: always
+ type: str
+ sample: arn:aws:autoscaling:us-east-2:123456789012:scalingPolicy:59e37526-bd27-42cf-adca-5cd3d90bc3b9:autoScalingGroupName/app-asg:policyName/app-policy
+as_name:
+ description: Auto Scaling Group name. Provided for backward compatibility, value is the same as I(auto_scaling_group_name)
+ returned: always
+ type: str
+ sample: app-asg
+auto_scaling_group_name:
+ description: Name of Auto Scaling Group
+ returned: always
+ type: str
+ sample: app-asg
+metric_aggregation_type:
+ description: Method used to aggregate metrics
+ returned: when I(policy_type) is C(StepScaling)
+ type: str
+ sample: Maximum
+name:
+ description: Name of the scaling policy. Provided for backward compatibility, value is the same as I(policy_name)
+ returned: always
+ type: str
+ sample: app-policy
+policy_arn:
+ description: ARN of scaling policy.
+ returned: always
+ type: str
+ sample: arn:aws:autoscaling:us-east-2:123456789012:scalingPolicy:59e37526-bd27-42cf-adca-5cd3d90bc3b9:autoScalingGroupName/app-asg:policyName/app-policy
+policy_name:
+ description: Name of scaling policy
+ returned: always
+ type: str
+ sample: app-policy
+policy_type:
+ description: Type of auto scaling policy
+ returned: always
+ type: str
+ sample: StepScaling
+scaling_adjustment:
+ description: Adjustment to make when alarm is triggered
+ returned: When I(policy_type) is C(SimpleScaling)
+ type: int
+ sample: 1
+step_adjustments:
+ description: List of step adjustments
+ returned: always
+ type: complex
+ contains:
+ metric_interval_lower_bound:
+ description: Lower bound for metric interval
+ returned: if step has a lower bound
+ type: float
+ sample: 20.0
+ metric_interval_upper_bound:
+ description: Upper bound for metric interval
+ returned: if step has an upper bound
+ type: float
+ sample: 40.0
+ scaling_adjustment:
+ description: Adjustment to make if this step is reached
+ returned: always
+ type: int
+ sample: 50
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by imported AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+def create_scaling_policy(connection, module):
+ changed = False
+ asg_name = module.params['asg_name']
+ policy_type = module.params['policy_type']
+ policy_name = module.params['name']
+
+ params = dict(PolicyName=policy_name,
+ PolicyType=policy_type,
+ AutoScalingGroupName=asg_name,
+ AdjustmentType=module.params['adjustment_type'])
+
+ # min_adjustment_step attribute is only relevant if the adjustment_type
+ # is set to percentage change in capacity, so it is a special case
+ if module.params['adjustment_type'] == 'PercentChangeInCapacity':
+ if module.params['min_adjustment_step']:
+ params['MinAdjustmentMagnitude'] = module.params['min_adjustment_step']
+
+ if policy_type == 'SimpleScaling':
+ # can't use required_if because it doesn't allow multiple criteria -
+ # it's only required if policy is SimpleScaling and state is present
+ if not module.params['scaling_adjustment']:
+ module.fail_json(msg='scaling_adjustment is required when policy_type is SimpleScaling '
+ 'and state is present')
+ params['ScalingAdjustment'] = module.params['scaling_adjustment']
+ if module.params['cooldown']:
+ params['Cooldown'] = module.params['cooldown']
+
+ if policy_type == 'StepScaling':
+ if not module.params['step_adjustments']:
+ module.fail_json(msg='step_adjustments is required when policy_type is StepScaling '
+ 'and state is present')
+ params['StepAdjustments'] = []
+ for step_adjustment in module.params['step_adjustments']:
+ step_adjust_params = dict(ScalingAdjustment=step_adjustment['scaling_adjustment'])
+ if step_adjustment.get('lower_bound'):
+ step_adjust_params['MetricIntervalLowerBound'] = step_adjustment['lower_bound']
+ if step_adjustment.get('upper_bound'):
+ step_adjust_params['MetricIntervalUpperBound'] = step_adjustment['upper_bound']
+ params['StepAdjustments'].append(step_adjust_params)
+ if module.params['metric_aggregation']:
+ params['MetricAggregationType'] = module.params['metric_aggregation']
+ if module.params['estimated_instance_warmup']:
+ params['EstimatedInstanceWarmup'] = module.params['estimated_instance_warmup']
+
+ try:
+ policies = connection.describe_policies(aws_retry=True,
+ AutoScalingGroupName=asg_name,
+ PolicyNames=[policy_name])['ScalingPolicies']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to obtain autoscaling policy %s" % policy_name)
+
+ before = after = {}
+ if not policies:
+ changed = True
+ else:
+ policy = policies[0]
+ for key in params:
+ if params[key] != policy.get(key):
+ changed = True
+ before[key] = params[key]
+ after[key] = policy.get(key)
+
+ if changed:
+ try:
+ connection.put_scaling_policy(aws_retry=True, **params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to create autoscaling policy")
+ try:
+ policies = connection.describe_policies(aws_retry=True,
+ AutoScalingGroupName=asg_name,
+ PolicyNames=[policy_name])['ScalingPolicies']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(msg="Failed to obtain autoscaling policy %s" % policy_name)
+
+ policy = camel_dict_to_snake_dict(policies[0])
+ # Backward compatible return values
+ policy['arn'] = policy['policy_arn']
+ policy['as_name'] = policy['auto_scaling_group_name']
+ policy['name'] = policy['policy_name']
+
+ if before and after:
+ module.exit_json(changed=changed, diff=dict(before=before, after=after), **policy)
+ else:
+ module.exit_json(changed=changed, **policy)
+
+
+def delete_scaling_policy(connection, module):
+ policy_name = module.params.get('name')
+
+ try:
+ policy = connection.describe_policies(aws_retry=True, PolicyNames=[policy_name])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to obtain autoscaling policy %s" % policy_name)
+
+ if policy['ScalingPolicies']:
+ try:
+ connection.delete_policy(aws_retry=True,
+ AutoScalingGroupName=policy['ScalingPolicies'][0]['AutoScalingGroupName'],
+ PolicyName=policy_name)
+ module.exit_json(changed=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to delete autoscaling policy")
+
+ module.exit_json(changed=False)
+
+
+def main():
+ step_adjustment_spec = dict(
+ lower_bound=dict(type='int'),
+ upper_bound=dict(type='int'),
+ scaling_adjustment=dict(type='int', required=True))
+
+ argument_spec = dict(
+ name=dict(required=True),
+ adjustment_type=dict(choices=['ChangeInCapacity', 'ExactCapacity', 'PercentChangeInCapacity']),
+ asg_name=dict(),
+ scaling_adjustment=dict(type='int'),
+ min_adjustment_step=dict(type='int'),
+ cooldown=dict(type='int'),
+ state=dict(default='present', choices=['present', 'absent']),
+ metric_aggregation=dict(default='Average', choices=['Minimum', 'Maximum', 'Average']),
+ policy_type=dict(default='SimpleScaling', choices=['SimpleScaling', 'StepScaling']),
+ step_adjustments=dict(type='list', options=step_adjustment_spec, elements='dict'),
+ estimated_instance_warmup=dict(type='int')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[['state', 'present', ['asg_name', 'adjustment_type']]])
+
+ connection = module.client('autoscaling', retry_decorator=AWSRetry.jittered_backoff())
+
+ state = module.params.get('state')
+ if state == 'present':
+ create_scaling_policy(connection, module)
+ elif state == 'absent':
+ delete_scaling_policy(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_snapshot_copy.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_snapshot_copy.py
new file mode 100644
index 00000000..85f44d60
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_snapshot_copy.py
@@ -0,0 +1,195 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_snapshot_copy
+version_added: 1.0.0
+short_description: Copies an EC2 snapshot and returns the new Snapshot ID.
+description:
+ - Copies an EC2 Snapshot from a source region to a destination region.
+options:
+ source_region:
+ description:
+ - The source region the Snapshot should be copied from.
+ required: true
+ type: str
+ source_snapshot_id:
+ description:
+ - The ID of the Snapshot in source region that should be copied.
+ required: true
+ type: str
+ description:
+ description:
+ - An optional human-readable string describing purpose of the new Snapshot.
+ type: str
+ encrypted:
+ description:
+ - Whether or not the destination Snapshot should be encrypted.
+ type: bool
+ default: 'no'
+ kms_key_id:
+ description:
+ - KMS key id used to encrypt snapshot. If not specified, AWS defaults to C(alias/aws/ebs).
+ type: str
+ wait:
+ description:
+ - Wait for the copied Snapshot to be in 'Available' state before returning.
+ type: bool
+ default: 'no'
+ wait_timeout:
+ description:
+ - How long before wait gives up, in seconds.
+ default: 600
+ type: int
+ tags:
+ description:
+ - A hash/dictionary of tags to add to the new Snapshot; '{"key":"value"}' and '{"key":"value","key":"value"}'
+ type: dict
+author: Deepak Kothandan (@Deepakkothandan) <deepak.kdy@gmail.com>
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements:
+ - boto3
+'''
+
+EXAMPLES = '''
+- name: Basic Snapshot Copy
+ community.aws.ec2_snapshot_copy:
+ source_region: eu-central-1
+ region: eu-west-1
+ source_snapshot_id: snap-xxxxxxx
+
+- name: Copy Snapshot and wait until available
+ community.aws.ec2_snapshot_copy:
+ source_region: eu-central-1
+ region: eu-west-1
+ source_snapshot_id: snap-xxxxxxx
+ wait: yes
+ wait_timeout: 1200 # Default timeout is 600
+ register: snapshot_id
+
+- name: Tagged Snapshot copy
+ community.aws.ec2_snapshot_copy:
+ source_region: eu-central-1
+ region: eu-west-1
+ source_snapshot_id: snap-xxxxxxx
+ tags:
+ Name: Snapshot-Name
+
+- name: Encrypted Snapshot copy
+ community.aws.ec2_snapshot_copy:
+ source_region: eu-central-1
+ region: eu-west-1
+ source_snapshot_id: snap-xxxxxxx
+ encrypted: yes
+
+- name: Encrypted Snapshot copy with specified key
+ community.aws.ec2_snapshot_copy:
+ source_region: eu-central-1
+ region: eu-west-1
+ source_snapshot_id: snap-xxxxxxx
+ encrypted: yes
+ kms_key_id: arn:aws:kms:eu-central-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b
+'''
+
+RETURN = '''
+snapshot_id:
+ description: snapshot id of the newly created snapshot
+ returned: when snapshot copy is successful
+ type: str
+ sample: "snap-e9095e8c"
+'''
+
+import traceback
+
+try:
+ import boto3
+ import botocore
+ from botocore.exceptions import ClientError, WaiterError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def copy_snapshot(module, ec2):
+ """
+ Copies an EC2 Snapshot to another region
+
+ module : AnsibleAWSModule object
+ ec2: ec2 connection object
+ """
+
+ params = {
+ 'SourceRegion': module.params.get('source_region'),
+ 'SourceSnapshotId': module.params.get('source_snapshot_id'),
+ 'Description': module.params.get('description')
+ }
+
+ if module.params.get('encrypted'):
+ params['Encrypted'] = True
+
+ if module.params.get('kms_key_id'):
+ params['KmsKeyId'] = module.params.get('kms_key_id')
+
+ try:
+ snapshot_id = ec2.copy_snapshot(**params)['SnapshotId']
+ if module.params.get('wait'):
+ delay = 15
+ # Add one to max_attempts as wait() increment
+ # its counter before assessing it for time.sleep()
+ max_attempts = (module.params.get('wait_timeout') // delay) + 1
+ ec2.get_waiter('snapshot_completed').wait(
+ SnapshotIds=[snapshot_id],
+ WaiterConfig=dict(Delay=delay, MaxAttempts=max_attempts)
+ )
+ if module.params.get('tags'):
+ ec2.create_tags(
+ Resources=[snapshot_id],
+ Tags=[{'Key': k, 'Value': v} for k, v in module.params.get('tags').items()]
+ )
+
+ except WaiterError as we:
+ module.fail_json(msg='An error occurred waiting for the snapshot to become available. (%s)' % str(we), exception=traceback.format_exc())
+ except ClientError as ce:
+ module.fail_json(msg=str(ce), exception=traceback.format_exc(), **camel_dict_to_snake_dict(ce.response))
+
+ module.exit_json(changed=True, snapshot_id=snapshot_id)
+
+
+def main():
+ argument_spec = dict(
+ source_region=dict(required=True),
+ source_snapshot_id=dict(required=True),
+ description=dict(default=''),
+ encrypted=dict(type='bool', default=False, required=False),
+ kms_key_id=dict(type='str', required=False),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=600),
+ tags=dict(type='dict'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ try:
+ client = module.client('ec2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ copy_snapshot(module, client)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway.py
new file mode 100644
index 00000000..a0595b4b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway.py
@@ -0,0 +1,573 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: ec2_transit_gateway
+short_description: Create and delete AWS Transit Gateways
+version_added: 1.0.0
+description:
+ - Creates AWS Transit Gateways.
+ - Deletes AWS Transit Gateways.
+ - Updates tags on existing transit gateways.
+requirements: [ 'botocore', 'boto3' ]
+options:
+ asn:
+ description:
+ - A private Autonomous System Number (ASN) for the Amazon side of a BGP session.
+ - The range is 64512 to 65534 for 16-bit ASNs and 4200000000 to 4294967294 for 32-bit ASNs.
+ type: int
+ auto_associate:
+ description:
+ - Enable or disable automatic association with the default association route table.
+ default: true
+ type: bool
+ auto_attach:
+ description:
+ - Enable or disable automatic acceptance of attachment requests.
+ default: false
+ type: bool
+ auto_propagate:
+ description:
+ - Enable or disable automatic propagation of routes to the default propagation route table.
+ default: true
+ type: bool
+ description:
+ description:
+ - The description of the transit gateway.
+ type: str
+ dns_support:
+ description:
+ - Whether to enable AWS DNS support.
+ default: true
+ type: bool
+ purge_tags:
+ description:
+ - Whether to purge existing tags not included with tags argument.
+ default: true
+ type: bool
+ state:
+ description:
+ - C(present) to ensure resource is created.
+ - C(absent) to remove resource.
+ default: present
+ choices: [ "present", "absent"]
+ type: str
+ tags:
+ description:
+ - A dictionary of resource tags
+ type: dict
+ transit_gateway_id:
+ description:
+ - The ID of the transit gateway.
+ type: str
+ vpn_ecmp_support:
+ description:
+ - Enable or disable Equal Cost Multipath Protocol support.
+ default: true
+ type: bool
+ wait:
+ description:
+ - Whether to wait for status
+ default: true
+ type: bool
+ wait_timeout:
+ description:
+ - number of seconds to wait for status
+ default: 300
+ type: int
+
+author: "Bob Boldin (@BobBoldin)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: Create a new transit gateway using defaults
+ community.aws.ec2_transit_gateway:
+ state: present
+ region: us-east-1
+ description: personal-testing
+ register: created_tgw
+
+- name: Create a new transit gateway with options
+ community.aws.ec2_transit_gateway:
+ asn: 64514
+ auto_associate: no
+ auto_propagate: no
+ dns_support: True
+ description: "nonprod transit gateway"
+ purge_tags: False
+ state: present
+ region: us-east-1
+ tags:
+ Name: nonprod transit gateway
+ status: testing
+
+- name: Remove a transit gateway by description
+ community.aws.ec2_transit_gateway:
+ state: absent
+ region: us-east-1
+ description: personal-testing
+
+- name: Remove a transit gateway by id
+ community.aws.ec2_transit_gateway:
+ state: absent
+ region: ap-southeast-2
+ transit_gateway_id: tgw-3a9aa123
+ register: deleted_tgw
+'''
+
+RETURN = '''
+transit_gateway:
+ description: The attributes of the transit gateway.
+ type: complex
+ returned: I(state=present)
+ contains:
+ creation_time:
+ description: The creation time of the transit gateway.
+ returned: always
+ type: str
+ sample: "2019-03-06T17:13:51+00:00"
+ description:
+ description: The description of the transit gateway.
+ returned: always
+ type: str
+ sample: my test tgw
+ options:
+ description: The options attributes of the transit gateway
+ returned: always
+ type: complex
+ contains:
+ amazon_side_asn:
+ description:
+ - A private Autonomous System Number (ASN) for the Amazon side of a BGP session.
+ The range is 64512 to 65534 for 16-bit ASNs and 4200000000 to 4294967294 for 32-bit ASNs.
+ returned: always
+ type: str
+ sample: 64512
+ auto_accept_shared_attachements:
+ description: Indicates whether attachment requests are automatically accepted.
+ returned: always
+ type: str
+ sample: disable
+ default_route_table_association:
+ description:
+ - Indicates whether resource attachments are automatically
+ associated with the default association route table.
+ returned: always
+ type: str
+ sample: enable
+ association_default_route_table_id:
+ description: The ID of the default association route table.
+ returned: Iwhen exists
+ type: str
+ sample: tgw-rtb-abc123444
+ default_route_table_propagation:
+ description:
+ - Indicates whether resource attachments automatically
+ propagate routes to the default propagation route table.
+ returned: always
+ type: str
+ sample: disable
+ propagation_default_route_table_id:
+ description: The ID of the default propagation route table.
+ returned: when exists
+ type: str
+ sample: tgw-rtb-def456777
+ vpn_ecmp_support:
+ description: Indicates whether Equal Cost Multipath Protocol support is enabled.
+ returned: always
+ type: str
+ sample: enable
+ dns_support:
+ description: Indicates whether DNS support is enabled.
+ returned: always
+ type: str
+ sample: enable
+ owner_id:
+ description: The account that owns the transit gateway.
+ returned: always
+ type: str
+ sample: '123456789012'
+ state:
+ description: The state of the transit gateway.
+ returned: always
+ type: str
+ sample: pending
+ tags:
+ description: A dictionary of resource tags
+ returned: always
+ type: dict
+ sample:
+ tags:
+ Name: nonprod_tgw
+ transit_gateway_arn:
+ description: The ID of the transit_gateway.
+ returned: always
+ type: str
+ sample: tgw-3a9aa123
+ transit_gateway_id:
+ description: The ID of the transit_gateway.
+ returned: always
+ type: str
+ sample: tgw-3a9aa123
+'''
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # handled by imported AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from time import sleep, time
+from ansible.module_utils._text import to_text
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (
+ ansible_dict_to_boto3_tag_list,
+ ansible_dict_to_boto3_filter_list,
+ AWSRetry,
+ boto3_tag_list_to_ansible_dict,
+ camel_dict_to_snake_dict,
+ compare_aws_tags
+)
+
+
+class AnsibleEc2Tgw(object):
+
+ def __init__(self, module, results):
+ self._module = module
+ self._results = results
+ self._connection = self._module.client('ec2')
+ self._check_mode = self._module.check_mode
+
+ if not hasattr(self._connection, 'describe_transit_gateways'):
+ self._module.fail_json(msg='transit gateway module requires boto3 >= 1.9.52')
+
+ def process(self):
+ """ Process the request based on state parameter .
+ state = present will search for an existing tgw based and return the object data.
+ if no object is found it will be created
+
+ state = absent will attempt to remove the tgw however will fail if it still has
+ attachments or associations
+ """
+ description = self._module.params.get('description')
+ state = self._module.params.get('state', 'present')
+ tgw_id = self._module.params.get('transit_gateway_id')
+
+ if state == 'present':
+ self.ensure_tgw_present(tgw_id, description)
+ elif state == 'absent':
+ self.ensure_tgw_absent(tgw_id, description)
+
+ def wait_for_status(self, wait_timeout, tgw_id, status, skip_deleted=True):
+ """
+ Wait for the Transit Gateway to reach the specified status
+ :param wait_timeout: Number of seconds to wait, until this timeout is reached.
+ :param tgw_id: The Amazon nat id.
+ :param status: The status to wait for.
+ examples. status=available, status=deleted
+ :param skip_deleted: ignore deleted transit gateways
+ :return dict: transit gateway object
+ """
+ polling_increment_secs = 5
+ wait_timeout = time() + wait_timeout
+ status_achieved = False
+ transit_gateway = dict()
+
+ while wait_timeout > time():
+ try:
+ transit_gateway = self.get_matching_tgw(tgw_id=tgw_id, skip_deleted=skip_deleted)
+
+ if transit_gateway:
+ if self._check_mode:
+ transit_gateway['state'] = status
+
+ if transit_gateway.get('state') == status:
+ status_achieved = True
+ break
+
+ elif transit_gateway.get('state') == 'failed':
+ break
+
+ else:
+ sleep(polling_increment_secs)
+
+ except ClientError as e:
+ self._module.fail_json_aws(e)
+
+ if not status_achieved:
+ self._module.fail_json(
+ msg="Wait time out reached, while waiting for results")
+
+ return transit_gateway
+
+ def get_matching_tgw(self, tgw_id, description=None, skip_deleted=True):
+ """ search for an existing tgw by either tgw_id or description
+ :param tgw_id: The AWS id of the transit gateway
+ :param description: The description of the transit gateway.
+ :param skip_deleted: ignore deleted transit gateways
+ :return dict: transit gateway object
+ """
+ filters = []
+ if tgw_id:
+ filters = ansible_dict_to_boto3_filter_list({'transit-gateway-id': tgw_id})
+
+ try:
+ response = AWSRetry.exponential_backoff()(self._connection.describe_transit_gateways)(Filters=filters)
+ except (ClientError, BotoCoreError) as e:
+ self._module.fail_json_aws(e)
+
+ tgw = None
+ tgws = []
+
+ if len(response.get('TransitGateways', [])) == 1 and tgw_id:
+ if (response['TransitGateways'][0]['State'] != 'deleted') or not skip_deleted:
+ tgws.extend(response['TransitGateways'])
+
+ for gateway in response.get('TransitGateways', []):
+ if description == gateway['Description'] and gateway['State'] != 'deleted':
+ tgws.append(gateway)
+
+ if len(tgws) > 1:
+ self._module.fail_json(
+ msg='EC2 returned more than one transit Gateway for description {0}, aborting'.format(description))
+ elif tgws:
+ tgw = camel_dict_to_snake_dict(tgws[0], ignore_list=['Tags'])
+ tgw['tags'] = boto3_tag_list_to_ansible_dict(tgws[0]['Tags'])
+
+ return tgw
+
+ @staticmethod
+ def enable_option_flag(flag):
+ disabled = "disable"
+ enabled = "enable"
+ if flag:
+ return enabled
+ return disabled
+
+ def create_tgw(self, description):
+ """
+ Create a transit gateway and optionally wait for status to become available.
+
+ :param description: The description of the transit gateway.
+ :return dict: transit gateway object
+ """
+ options = dict()
+ wait = self._module.params.get('wait')
+ wait_timeout = self._module.params.get('wait_timeout')
+
+ if self._module.params.get('asn'):
+ options['AmazonSideAsn'] = self._module.params.get('asn')
+
+ options['AutoAcceptSharedAttachments'] = self.enable_option_flag(self._module.params.get('auto_attach'))
+ options['DefaultRouteTableAssociation'] = self.enable_option_flag(self._module.params.get('auto_associate'))
+ options['DefaultRouteTablePropagation'] = self.enable_option_flag(self._module.params.get('auto_propagate'))
+ options['VpnEcmpSupport'] = self.enable_option_flag(self._module.params.get('vpn_ecmp_support'))
+ options['DnsSupport'] = self.enable_option_flag(self._module.params.get('dns_support'))
+
+ try:
+ response = self._connection.create_transit_gateway(Description=description, Options=options)
+ except (ClientError, BotoCoreError) as e:
+ self._module.fail_json_aws(e)
+
+ tgw_id = response['TransitGateway']['TransitGatewayId']
+
+ if wait:
+ result = self.wait_for_status(wait_timeout=wait_timeout, tgw_id=tgw_id, status="available")
+ else:
+ result = self.get_matching_tgw(tgw_id=tgw_id)
+
+ self._results['msg'] = (' Transit gateway {0} created'.format(result['transit_gateway_id']))
+
+ return result
+
+ def delete_tgw(self, tgw_id):
+ """
+ De;lete the transit gateway and optionally wait for status to become deleted
+
+ :param tgw_id: The id of the transit gateway
+ :return dict: transit gateway object
+ """
+ wait = self._module.params.get('wait')
+ wait_timeout = self._module.params.get('wait_timeout')
+
+ try:
+ response = self._connection.delete_transit_gateway(TransitGatewayId=tgw_id)
+ except (ClientError, BotoCoreError) as e:
+ self._module.fail_json_aws(e)
+
+ if wait:
+ result = self.wait_for_status(wait_timeout=wait_timeout, tgw_id=tgw_id, status="deleted", skip_deleted=False)
+ else:
+ result = self.get_matching_tgw(tgw_id=tgw_id, skip_deleted=False)
+
+ self._results['msg'] = (' Transit gateway {0} deleted'.format(tgw_id))
+
+ return result
+
+ def ensure_tags(self, tgw_id, tags, purge_tags):
+ """
+ Ensures tags are applied to the transit gateway. Optionally will remove any
+ existing tags not in the tags argument if purge_tags is set to true
+
+ :param tgw_id: The AWS id of the transit gateway
+ :param tags: list of tags to apply to the transit gateway.
+ :param purge_tags: when true existing tags not in tags parms are removed
+ :return: true if tags were updated
+ """
+ tags_changed = False
+ filters = ansible_dict_to_boto3_filter_list({'resource-id': tgw_id})
+ try:
+ cur_tags = self._connection.describe_tags(Filters=filters)
+ except (ClientError, BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="Couldn't describe tags")
+
+ to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags)
+
+ if to_update:
+ try:
+ if not self._check_mode:
+ AWSRetry.exponential_backoff()(self._connection.create_tags)(
+ Resources=[tgw_id],
+ Tags=ansible_dict_to_boto3_tag_list(to_update)
+ )
+ self._results['changed'] = True
+ tags_changed = True
+ except (ClientError, BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="Couldn't create tags {0} for resource {1}".format(
+ ansible_dict_to_boto3_tag_list(to_update), tgw_id))
+
+ if to_delete:
+ try:
+ if not self._check_mode:
+ tags_list = []
+ for key in to_delete:
+ tags_list.append({'Key': key})
+
+ AWSRetry.exponential_backoff()(self._connection.delete_tags)(
+ Resources=[tgw_id],
+ Tags=tags_list
+ )
+ self._results['changed'] = True
+ tags_changed = True
+ except (ClientError, BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="Couldn't delete tags {0} for resource {1}".format(
+ ansible_dict_to_boto3_tag_list(to_delete), tgw_id))
+
+ return tags_changed
+
+ def ensure_tgw_present(self, tgw_id=None, description=None):
+ """
+ Will create a tgw if no match to the tgw_id or description are found
+ Will update the tgw tags if matching one found but tags are not synced
+
+ :param tgw_id: The AWS id of the transit gateway
+ :param description: The description of the transit gateway.
+ :return dict: transit gateway object
+ """
+ tgw = self.get_matching_tgw(tgw_id, description)
+
+ if tgw is None:
+ if self._check_mode:
+ self._results['changed'] = True
+ self._results['transit_gateway_id'] = None
+ return self._results
+
+ try:
+ if not description:
+ self._module.fail_json(msg="Failed to create Transit Gateway: description argument required")
+ tgw = self.create_tgw(description)
+ self._results['changed'] = True
+ except (BotoCoreError, ClientError) as e:
+ self._module.fail_json_aws(e, msg='Unable to create Transit Gateway')
+
+ if self._module.params.get('tags') != tgw.get('tags'):
+ stringed_tags_dict = dict((to_text(k), to_text(v)) for k, v in self._module.params.get('tags').items())
+ if self.ensure_tags(tgw['transit_gateway_id'], stringed_tags_dict, self._module.params.get('purge_tags')):
+ self._results['changed'] = True
+
+ self._results['transit_gateway'] = self.get_matching_tgw(tgw_id=tgw['transit_gateway_id'])
+
+ return self._results
+
+ def ensure_tgw_absent(self, tgw_id=None, description=None):
+ """
+ Will delete the tgw if a single tgw is found not yet in deleted status
+
+ :param tgw_id: The AWS id of the transit gateway
+ :param description: The description of the transit gateway.
+ :return doct: transit gateway object
+ """
+ self._results['transit_gateway_id'] = None
+ tgw = self.get_matching_tgw(tgw_id, description)
+
+ if tgw is not None:
+ if self._check_mode:
+ self._results['changed'] = True
+ return self._results
+
+ try:
+ tgw = self.delete_tgw(tgw_id=tgw['transit_gateway_id'])
+ self._results['changed'] = True
+ self._results['transit_gateway'] = self.get_matching_tgw(tgw_id=tgw['transit_gateway_id'],
+ skip_deleted=False)
+ except (BotoCoreError, ClientError) as e:
+ self._module.fail_json_aws(e, msg='Unable to delete Transit Gateway')
+
+ return self._results
+
+
+def setup_module_object():
+ """
+ merge argument spec and create Ansible module object
+ :return: Ansible module object
+ """
+
+ argument_spec = dict(
+ asn=dict(type='int'),
+ auto_associate=dict(type='bool', default='yes'),
+ auto_attach=dict(type='bool', default='no'),
+ auto_propagate=dict(type='bool', default='yes'),
+ description=dict(type='str'),
+ dns_support=dict(type='bool', default='yes'),
+ purge_tags=dict(type='bool', default='yes'),
+ state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(default=dict(), type='dict'),
+ transit_gateway_id=dict(type='str'),
+ vpn_ecmp_support=dict(type='bool', default='yes'),
+ wait=dict(type='bool', default='yes'),
+ wait_timeout=dict(type='int', default=300)
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_one_of=[('description', 'transit_gateway_id')],
+ supports_check_mode=True,
+ )
+
+ return module
+
+
+def main():
+
+ module = setup_module_object()
+
+ results = dict(
+ changed=False
+ )
+
+ tgw_manager = AnsibleEc2Tgw(module=module, results=results)
+ tgw_manager.process()
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_info.py
new file mode 100644
index 00000000..2eacf01c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_info.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: ec2_transit_gateway_info
+short_description: Gather information about ec2 transit gateways in AWS
+version_added: 1.0.0
+description:
+ - Gather information about ec2 transit gateways in AWS
+author: "Bob Boldin (@BobBoldin)"
+requirements:
+ - botocore
+ - boto3
+options:
+ transit_gateway_ids:
+ description:
+ - A list of transit gateway IDs to gather information for.
+ aliases: [transit_gateway_id]
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTransitGateways.html) for filters.
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather info about all transit gateways
+ community.aws.ec2_transit_gateway_info:
+
+- name: Gather info about a particular transit gateway using filter transit gateway ID
+ community.aws.ec2_transit_gateway_info:
+ filters:
+ transit-gateway-id: tgw-02c42332e6b7da829
+
+- name: Gather info about a particular transit gateway using multiple option filters
+ community.aws.ec2_transit_gateway_info:
+ filters:
+ options.dns-support: enable
+ options.vpn-ecmp-support: enable
+
+- name: Gather info about multiple transit gateways using module param
+ community.aws.ec2_transit_gateway_info:
+ transit_gateway_ids:
+ - tgw-02c42332e6b7da829
+ - tgw-03c53443d5a8cb716
+'''
+
+RETURN = r'''
+transit_gateways:
+ description: >
+ Transit gateways that match the provided filters. Each element consists of a dict with all the information
+ related to that transit gateway.
+ returned: on success
+ type: complex
+ contains:
+ creation_time:
+ description: The creation time.
+ returned: always
+ type: str
+ sample: "2019-02-05T16:19:58+00:00"
+ description:
+ description: The description of the transit gateway.
+ returned: always
+ type: str
+ sample: "A transit gateway"
+ options:
+ description: A dictionary of the transit gateway options.
+ returned: always
+ type: complex
+ contains:
+ amazon_side_asn:
+ description:
+ - A private Autonomous System Number (ASN) for the Amazon
+ side of a BGP session. The range is 64512 to 65534 for
+ 16-bit ASNs and 4200000000 to 4294967294 for 32-bit ASNs.
+ returned: always
+ type: int
+ sample: 64512
+ auto_accept_shared_attachments:
+ description:
+ - Indicates whether attachment requests are automatically accepted.
+ returned: always
+ type: str
+ sample: "enable"
+ default_route_table_association:
+ description:
+ - Indicates whether resource attachments are automatically
+ associated with the default association route table.
+ returned: always
+ type: str
+ sample: "disable"
+ association_default_route_table_id:
+ description:
+ - The ID of the default association route table.
+ returned: when present
+ type: str
+ sample: "rtb-11223344"
+ default_route_table_propagation:
+ description:
+ - Indicates whether resource attachments automatically
+ propagate routes to the default propagation route table.
+ returned: always
+ type: str
+ sample: "disable"
+ dns_support:
+ description:
+ - Indicates whether DNS support is enabled.
+ returned: always
+ type: str
+ sample: "enable"
+ propagation_default_route_table_id:
+ description:
+ - The ID of the default propagation route table.
+ returned: when present
+ type: str
+ sample: "rtb-11223344"
+ vpn_ecmp_support:
+ description:
+ - Indicates whether Equal Cost Multipath Protocol support
+ is enabled.
+ returned: always
+ type: str
+ sample: "enable"
+ owner_id:
+ description: The AWS account number ID which owns the transit gateway.
+ returned: always
+ type: str
+ sample: "1234567654323"
+ state:
+ description: The state of the transit gateway.
+ returned: always
+ type: str
+ sample: "available"
+ tags:
+ description: A dict of tags associated with the transit gateway.
+ returned: always
+ type: dict
+ sample: '{
+ "Name": "A sample TGW"
+ }'
+ transit_gateway_arn:
+ description: The Amazon Resource Name (ARN) of the transit gateway.
+ returned: always
+ type: str
+ sample: "arn:aws:ec2:us-west-2:1234567654323:transit-gateway/tgw-02c42332e6b7da829"
+ transit_gateway_id:
+ description: The ID of the transit gateway.
+ returned: always
+ type: str
+ sample: "tgw-02c42332e6b7da829"
+'''
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # handled by imported AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (
+ AWSRetry,
+ boto3_tag_list_to_ansible_dict,
+ camel_dict_to_snake_dict,
+ ansible_dict_to_boto3_filter_list
+)
+
+
+class AnsibleEc2TgwInfo(object):
+
+ def __init__(self, module, results):
+ self._module = module
+ self._results = results
+ self._connection = self._module.client('ec2')
+ self._check_mode = self._module.check_mode
+
+ if not hasattr(self._connection, 'describe_transit_gateways'):
+ self._module.fail_json(msg='transit gateway module requires boto3 >= 1.9.52')
+
+ @AWSRetry.exponential_backoff()
+ def describe_transit_gateways(self):
+ """
+ Describe transit gateways.
+
+ module : AnsibleAWSModule object
+ connection : boto3 client connection object
+ """
+ # collect parameters
+ filters = ansible_dict_to_boto3_filter_list(self._module.params['filters'])
+ transit_gateway_ids = self._module.params['transit_gateway_ids']
+
+ # init empty list for return vars
+ transit_gateway_info = list()
+
+ # Get the basic transit gateway info
+ try:
+ response = self._connection.describe_transit_gateways(
+ TransitGatewayIds=transit_gateway_ids, Filters=filters)
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'InvalidTransitGatewayID.NotFound':
+ self._results['transit_gateways'] = []
+ return
+ raise
+
+ for transit_gateway in response['TransitGateways']:
+ transit_gateway_info.append(camel_dict_to_snake_dict(transit_gateway, ignore_list=['Tags']))
+ # convert tag list to ansible dict
+ transit_gateway_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(transit_gateway.get('Tags', []))
+
+ self._results['transit_gateways'] = transit_gateway_info
+ return
+
+
+def setup_module_object():
+ """
+ merge argument spec and create Ansible module object
+ :return: Ansible module object
+ """
+
+ argument_spec = dict(
+ transit_gateway_ids=dict(type='list', default=[], elements='str', aliases=['transit_gateway_id']),
+ filters=dict(type='dict', default={})
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ return module
+
+
+def main():
+
+ module = setup_module_object()
+
+ results = dict(
+ changed=False
+ )
+
+ tgwf_manager = AnsibleEc2TgwInfo(module=module, results=results)
+ try:
+ tgwf_manager.describe_transit_gateways()
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e)
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_egress_igw.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_egress_igw.py
new file mode 100644
index 00000000..d462696d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_egress_igw.py
@@ -0,0 +1,188 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_egress_igw
+version_added: 1.0.0
+short_description: Manage an AWS VPC Egress Only Internet gateway
+description:
+ - Manage an AWS VPC Egress Only Internet gateway
+author: Daniel Shepherd (@shepdelacreme)
+options:
+ vpc_id:
+ description:
+ - The VPC ID for the VPC that this Egress Only Internet Gateway should be attached.
+ required: true
+ type: str
+ state:
+ description:
+ - Create or delete the EIGW.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Ensure that the VPC has an Internet Gateway.
+# The Internet Gateway ID is can be accessed via {{eigw.gateway_id}} for use in setting up NATs etc.
+- community.aws.ec2_vpc_egress_igw:
+ vpc_id: vpc-abcdefgh
+ state: present
+ register: eigw
+
+'''
+
+RETURN = '''
+gateway_id:
+ description: The ID of the Egress Only Internet Gateway or Null.
+ returned: always
+ type: str
+ sample: eigw-0e00cf111ba5bc11e
+vpc_id:
+ description: The ID of the VPC to attach or detach gateway from.
+ returned: always
+ type: str
+ sample: vpc-012345678
+'''
+
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def delete_eigw(module, conn, eigw_id):
+ """
+ Delete EIGW.
+
+ module : AnsibleAWSModule object
+ conn : boto3 client connection object
+ eigw_id : ID of the EIGW to delete
+ """
+ changed = False
+
+ try:
+ response = conn.delete_egress_only_internet_gateway(DryRun=module.check_mode, EgressOnlyInternetGatewayId=eigw_id)
+ except botocore.exceptions.ClientError as e:
+ # When boto3 method is run with DryRun=True it returns an error on success
+ # We need to catch the error and return something valid
+ if e.response.get('Error', {}).get('Code') == "DryRunOperation":
+ changed = True
+ else:
+ module.fail_json_aws(e, msg="Could not delete Egress-Only Internet Gateway {0} from VPC {1}".format(eigw_id, module.vpc_id))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Could not delete Egress-Only Internet Gateway {0} from VPC {1}".format(eigw_id, module.vpc_id))
+
+ if not module.check_mode:
+ changed = response.get('ReturnCode', False)
+
+ return changed
+
+
+def create_eigw(module, conn, vpc_id):
+ """
+ Create EIGW.
+
+ module : AnsibleAWSModule object
+ conn : boto3 client connection object
+ vpc_id : ID of the VPC we are operating on
+ """
+ gateway_id = None
+ changed = False
+
+ try:
+ response = conn.create_egress_only_internet_gateway(DryRun=module.check_mode, VpcId=vpc_id)
+ except botocore.exceptions.ClientError as e:
+ # When boto3 method is run with DryRun=True it returns an error on success
+ # We need to catch the error and return something valid
+ if e.response.get('Error', {}).get('Code') == "DryRunOperation":
+ changed = True
+ elif e.response.get('Error', {}).get('Code') == "InvalidVpcID.NotFound":
+ module.fail_json_aws(e, msg="invalid vpc ID '{0}' provided".format(vpc_id))
+ else:
+ module.fail_json_aws(e, msg="Could not create Egress-Only Internet Gateway for vpc ID {0}".format(vpc_id))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Could not create Egress-Only Internet Gateway for vpc ID {0}".format(vpc_id))
+
+ if not module.check_mode:
+ gateway = response.get('EgressOnlyInternetGateway', {})
+ state = gateway.get('Attachments', [{}])[0].get('State')
+ gateway_id = gateway.get('EgressOnlyInternetGatewayId')
+
+ if gateway_id and state in ('attached', 'attaching'):
+ changed = True
+ else:
+ # EIGW gave back a bad attachment state or an invalid response so we error out
+ module.fail_json(msg='Unable to create and attach Egress Only Internet Gateway to VPCId: {0}. Bad or no state in response'.format(vpc_id),
+ **camel_dict_to_snake_dict(response))
+
+ return changed, gateway_id
+
+
+def describe_eigws(module, conn, vpc_id):
+ """
+ Describe EIGWs.
+
+ module : AnsibleAWSModule object
+ conn : boto3 client connection object
+ vpc_id : ID of the VPC we are operating on
+ """
+ gateway_id = None
+
+ try:
+ response = conn.describe_egress_only_internet_gateways()
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Could not get list of existing Egress-Only Internet Gateways")
+
+ for eigw in response.get('EgressOnlyInternetGateways', []):
+ for attachment in eigw.get('Attachments', []):
+ if attachment.get('VpcId') == vpc_id and attachment.get('State') in ('attached', 'attaching'):
+ gateway_id = eigw.get('EgressOnlyInternetGatewayId')
+
+ return gateway_id
+
+
+def main():
+ argument_spec = dict(
+ vpc_id=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ connection = module.client('ec2')
+
+ vpc_id = module.params.get('vpc_id')
+ state = module.params.get('state')
+
+ eigw_id = describe_eigws(module, connection, vpc_id)
+
+ result = dict(gateway_id=eigw_id, vpc_id=vpc_id)
+ changed = False
+
+ if state == 'present' and not eigw_id:
+ changed, result['gateway_id'] = create_eigw(module, connection, vpc_id)
+ elif state == 'absent' and eigw_id:
+ changed = delete_eigw(module, connection, eigw_id)
+
+ module.exit_json(changed=changed, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_endpoint.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_endpoint.py
new file mode 100644
index 00000000..771ea52b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_endpoint.py
@@ -0,0 +1,370 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: ec2_vpc_endpoint
+short_description: Create and delete AWS VPC Endpoints.
+version_added: 1.0.0
+description:
+ - Creates AWS VPC endpoints.
+ - Deletes AWS VPC endpoints.
+ - This module supports check mode.
+requirements: [ boto3 ]
+options:
+ vpc_id:
+ description:
+ - Required when creating a VPC endpoint.
+ required: false
+ type: str
+ service:
+ description:
+ - An AWS supported vpc endpoint service. Use the M(community.aws.ec2_vpc_endpoint_info)
+ module to describe the supported endpoint services.
+ - Required when creating an endpoint.
+ required: false
+ type: str
+ policy:
+ description:
+ - A properly formatted json policy as string, see
+ U(https://github.com/ansible/ansible/issues/7005#issuecomment-42894813).
+ Cannot be used with I(policy_file).
+ - Option when creating an endpoint. If not provided AWS will
+ utilise a default policy which provides full access to the service.
+ required: false
+ type: json
+ policy_file:
+ description:
+ - The path to the properly json formatted policy file, see
+ U(https://github.com/ansible/ansible/issues/7005#issuecomment-42894813)
+ on how to use it properly. Cannot be used with I(policy).
+ - Option when creating an endpoint. If not provided AWS will
+ utilise a default policy which provides full access to the service.
+ required: false
+ aliases: [ "policy_path" ]
+ type: path
+ state:
+ description:
+ - present to ensure resource is created.
+ - absent to remove resource
+ required: false
+ default: present
+ choices: [ "present", "absent"]
+ type: str
+ wait:
+ description:
+ - When specified, will wait for either available status for state present.
+ Unfortunately this is ignored for delete actions due to a difference in
+ behaviour from AWS.
+ required: false
+ default: no
+ type: bool
+ wait_timeout:
+ description:
+ - Used in conjunction with wait. Number of seconds to wait for status.
+ Unfortunately this is ignored for delete actions due to a difference in
+ behaviour from AWS.
+ required: false
+ default: 320
+ type: int
+ route_table_ids:
+ description:
+ - List of one or more route table ids to attach to the endpoint. A route
+ is added to the route table with the destination of the endpoint if
+ provided.
+ required: false
+ type: list
+ elements: str
+ vpc_endpoint_id:
+ description:
+ - One or more vpc endpoint ids to remove from the AWS account
+ required: false
+ type: str
+ client_token:
+ description:
+ - Optional client token to ensure idempotency
+ required: false
+ type: str
+author: Karen Cheng (@Etherdaemon)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create new vpc endpoint with a json template for policy
+ community.aws.ec2_vpc_endpoint:
+ state: present
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ service: com.amazonaws.ap-southeast-2.s3
+ policy: " {{ lookup( 'template', 'endpoint_policy.json.j2') }} "
+ route_table_ids:
+ - rtb-12345678
+ - rtb-87654321
+ register: new_vpc_endpoint
+
+- name: Create new vpc endpoint with the default policy
+ community.aws.ec2_vpc_endpoint:
+ state: present
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ service: com.amazonaws.ap-southeast-2.s3
+ route_table_ids:
+ - rtb-12345678
+ - rtb-87654321
+ register: new_vpc_endpoint
+
+- name: Create new vpc endpoint with json file
+ community.aws.ec2_vpc_endpoint:
+ state: present
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ service: com.amazonaws.ap-southeast-2.s3
+ policy_file: "{{ role_path }}/files/endpoint_policy.json"
+ route_table_ids:
+ - rtb-12345678
+ - rtb-87654321
+ register: new_vpc_endpoint
+
+- name: Delete newly created vpc endpoint
+ community.aws.ec2_vpc_endpoint:
+ state: absent
+ vpc_endpoint_id: "{{ new_vpc_endpoint.result['VpcEndpointId'] }}"
+ region: ap-southeast-2
+'''
+
+RETURN = r'''
+endpoints:
+ description: The resulting endpoints from the module call
+ returned: success
+ type: list
+ sample: [
+ {
+ "creation_timestamp": "2017-02-20T05:04:15+00:00",
+ "policy_document": {
+ "Id": "Policy1450910922815",
+ "Statement": [
+ {
+ "Action": "s3:*",
+ "Effect": "Allow",
+ "Principal": "*",
+ "Resource": [
+ "arn:aws:s3:::*/*",
+ "arn:aws:s3:::*"
+ ],
+ "Sid": "Stmt1450910920641"
+ }
+ ],
+ "Version": "2012-10-17"
+ },
+ "route_table_ids": [
+ "rtb-abcd1234"
+ ],
+ "service_name": "com.amazonaws.ap-southeast-2.s3",
+ "vpc_endpoint_id": "vpce-a1b2c3d4",
+ "vpc_id": "vpc-abbad0d0"
+ }
+ ]
+'''
+
+import datetime
+import json
+import time
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_native
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def date_handler(obj):
+ return obj.isoformat() if hasattr(obj, 'isoformat') else obj
+
+
+def wait_for_status(client, module, resource_id, status):
+ polling_increment_secs = 15
+ max_retries = (module.params.get('wait_timeout') // polling_increment_secs)
+ status_achieved = False
+
+ for x in range(0, max_retries):
+ try:
+ resource = get_endpoints(client, module, resource_id)['VpcEndpoints'][0]
+ if resource['State'] == status:
+ status_achieved = True
+ break
+ else:
+ time.sleep(polling_increment_secs)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ return status_achieved, resource
+
+
+def get_endpoints(client, module, resource_id=None):
+ params = dict()
+ if resource_id:
+ params['VpcEndpointIds'] = [resource_id]
+
+ result = json.loads(json.dumps(client.describe_vpc_endpoints(**params), default=date_handler))
+ return result
+
+
+def setup_creation(client, module):
+ vpc_id = module.params.get('vpc_id')
+ service_name = module.params.get('service')
+
+ if module.params.get('route_table_ids'):
+ route_table_ids = module.params.get('route_table_ids')
+ existing_endpoints = get_endpoints(client, module)
+ for endpoint in existing_endpoints['VpcEndpoints']:
+ if endpoint['VpcId'] == vpc_id and endpoint['ServiceName'] == service_name:
+ sorted_endpoint_rt_ids = sorted(endpoint['RouteTableIds'])
+ sorted_route_table_ids = sorted(route_table_ids)
+ if sorted_endpoint_rt_ids == sorted_route_table_ids:
+ return False, camel_dict_to_snake_dict(endpoint)
+
+ changed, result = create_vpc_endpoint(client, module)
+
+ return changed, json.loads(json.dumps(result, default=date_handler))
+
+
+def create_vpc_endpoint(client, module):
+ params = dict()
+ changed = False
+ token_provided = False
+ params['VpcId'] = module.params.get('vpc_id')
+ params['ServiceName'] = module.params.get('service')
+ params['DryRun'] = module.check_mode
+
+ if module.params.get('route_table_ids'):
+ params['RouteTableIds'] = module.params.get('route_table_ids')
+
+ if module.params.get('client_token'):
+ token_provided = True
+ request_time = datetime.datetime.utcnow()
+ params['ClientToken'] = module.params.get('client_token')
+
+ policy = None
+ if module.params.get('policy'):
+ try:
+ policy = json.loads(module.params.get('policy'))
+ except ValueError as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ elif module.params.get('policy_file'):
+ try:
+ with open(module.params.get('policy_file'), 'r') as json_data:
+ policy = json.load(json_data)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ if policy:
+ params['PolicyDocument'] = json.dumps(policy)
+
+ try:
+ changed = True
+ result = camel_dict_to_snake_dict(client.create_vpc_endpoint(**params)['VpcEndpoint'])
+ if token_provided and (request_time > result['creation_timestamp'].replace(tzinfo=None)):
+ changed = False
+ elif module.params.get('wait') and not module.check_mode:
+ status_achieved, result = wait_for_status(client, module, result['vpc_endpoint_id'], 'available')
+ if not status_achieved:
+ module.fail_json(msg='Error waiting for vpc endpoint to become available - please check the AWS console')
+ except is_boto3_error_code('DryRunOperation'):
+ changed = True
+ result = 'Would have created VPC Endpoint if not in check mode'
+ except is_boto3_error_code('IdempotentParameterMismatch'): # pylint: disable=duplicate-except
+ module.fail_json(msg="IdempotentParameterMismatch - updates of endpoints are not allowed by the API")
+ except is_boto3_error_code('RouteAlreadyExists'): # pylint: disable=duplicate-except
+ module.fail_json(msg="RouteAlreadyExists for one of the route tables - update is not allowed by the API")
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ return changed, result
+
+
+def setup_removal(client, module):
+ params = dict()
+ changed = False
+ params['DryRun'] = module.check_mode
+ if isinstance(module.params.get('vpc_endpoint_id'), string_types):
+ params['VpcEndpointIds'] = [module.params.get('vpc_endpoint_id')]
+ else:
+ params['VpcEndpointIds'] = module.params.get('vpc_endpoint_id')
+ try:
+ result = client.delete_vpc_endpoints(**params)['Unsuccessful']
+ if not module.check_mode and (result != []):
+ module.fail_json(msg=result)
+ except is_boto3_error_code('DryRunOperation'):
+ changed = True
+ result = 'Would have deleted VPC Endpoint if not in check mode'
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, "Failed to delete VPC endpoint")
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ return changed, result
+
+
+def main():
+ argument_spec = dict(
+ vpc_id=dict(),
+ service=dict(),
+ policy=dict(type='json'),
+ policy_file=dict(type='path', aliases=['policy_path']),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=320, required=False),
+ route_table_ids=dict(type='list', elements='str'),
+ vpc_endpoint_id=dict(),
+ client_token=dict(),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['policy', 'policy_file']],
+ required_if=[
+ ['state', 'present', ['vpc_id', 'service']],
+ ['state', 'absent', ['vpc_endpoint_id']],
+ ],
+ )
+
+ # Validate Requirements
+ state = module.params.get('state')
+
+ try:
+ ec2 = module.client('ec2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ # Ensure resource is present
+ if state == 'present':
+ (changed, results) = setup_creation(ec2, module)
+ else:
+ (changed, results) = setup_removal(ec2, module)
+
+ module.exit_json(changed=changed, result=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_endpoint_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_endpoint_facts.py
new file mode 100644
index 00000000..e72b487d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_endpoint_facts.py
@@ -0,0 +1,189 @@
+#!/usr/bin/python
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: ec2_vpc_endpoint_info
+short_description: Retrieves AWS VPC endpoints details using AWS methods.
+version_added: 1.0.0
+description:
+ - Gets various details related to AWS VPC Endpoints.
+ - This module was called C(ec2_vpc_endpoint_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+options:
+ query:
+ description:
+ - Specifies the query action to take. Services returns the supported
+ AWS services that can be specified when creating an endpoint.
+ required: True
+ choices:
+ - services
+ - endpoints
+ type: str
+ vpc_endpoint_ids:
+ description:
+ - Get details of specific endpoint IDs
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcEndpoints.html)
+ for possible filters.
+ type: dict
+author: Karen Cheng (@Etherdaemon)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Simple example of listing all support AWS services for VPC endpoints
+- name: List supported AWS endpoint services
+ community.aws.ec2_vpc_endpoint_info:
+ query: services
+ region: ap-southeast-2
+ register: supported_endpoint_services
+
+- name: Get all endpoints in ap-southeast-2 region
+ community.aws.ec2_vpc_endpoint_info:
+ query: endpoints
+ region: ap-southeast-2
+ register: existing_endpoints
+
+- name: Get all endpoints with specific filters
+ community.aws.ec2_vpc_endpoint_info:
+ query: endpoints
+ region: ap-southeast-2
+ filters:
+ vpc-id:
+ - vpc-12345678
+ - vpc-87654321
+ vpc-endpoint-state:
+ - available
+ - pending
+ register: existing_endpoints
+
+- name: Get details on specific endpoint
+ community.aws.ec2_vpc_endpoint_info:
+ query: endpoints
+ region: ap-southeast-2
+ vpc_endpoint_ids:
+ - vpce-12345678
+ register: endpoint_details
+'''
+
+RETURN = r'''
+service_names:
+ description: AWS VPC endpoint service names
+ returned: I(query) is C(services)
+ type: list
+ sample:
+ service_names:
+ - com.amazonaws.ap-southeast-2.s3
+vpc_endpoints:
+ description:
+ - A list of endpoints that match the query. Each endpoint has the keys creation_timestamp,
+ policy_document, route_table_ids, service_name, state, vpc_endpoint_id, vpc_id.
+ returned: I(query) is C(endpoints)
+ type: list
+ sample:
+ vpc_endpoints:
+ - creation_timestamp: "2017-02-16T11:06:48+00:00"
+ policy_document: >
+ "{\"Version\":\"2012-10-17\",\"Id\":\"Policy1450910922815\",
+ \"Statement\":[{\"Sid\":\"Stmt1450910920641\",\"Effect\":\"Allow\",
+ \"Principal\":\"*\",\"Action\":\"s3:*\",\"Resource\":[\"arn:aws:s3:::*/*\",\"arn:aws:s3:::*\"]}]}"
+ route_table_ids:
+ - rtb-abcd1234
+ service_name: "com.amazonaws.ap-southeast-2.s3"
+ state: "available"
+ vpc_endpoint_id: "vpce-abbad0d0"
+ vpc_id: "vpc-1111ffff"
+'''
+
+import json
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+def date_handler(obj):
+ return obj.isoformat() if hasattr(obj, 'isoformat') else obj
+
+
+@AWSRetry.exponential_backoff()
+def get_supported_services(client, module):
+ results = list()
+ params = dict()
+ while True:
+ response = client.describe_vpc_endpoint_services(**params)
+ results.extend(response['ServiceNames'])
+ if 'NextToken' in response:
+ params['NextToken'] = response['NextToken']
+ else:
+ break
+ return dict(service_names=results)
+
+
+@AWSRetry.exponential_backoff()
+def get_endpoints(client, module):
+ results = list()
+ params = dict()
+ params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ if module.params.get('vpc_endpoint_ids'):
+ params['VpcEndpointIds'] = module.params.get('vpc_endpoint_ids')
+ while True:
+ response = client.describe_vpc_endpoints(**params)
+ results.extend(response['VpcEndpoints'])
+ if 'NextToken' in response:
+ params['NextToken'] = response['NextToken']
+ else:
+ break
+ try:
+ results = json.loads(json.dumps(results, default=date_handler))
+ except Exception as e:
+ module.fail_json_aws(e, msg="Failed to get endpoints")
+ return dict(vpc_endpoints=[camel_dict_to_snake_dict(result) for result in results])
+
+
+def main():
+ argument_spec = dict(
+ query=dict(choices=['services', 'endpoints'], required=True),
+ filters=dict(default={}, type='dict'),
+ vpc_endpoint_ids=dict(type='list', elements='str'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'ec2_vpc_endpoint_facts':
+ module.deprecate("The 'ec2_vpc_endpoint_facts' module has been renamed to 'ec2_vpc_endpoint_info'", date='2021-12-01', collection_name='community.aws')
+
+ # Validate Requirements
+ try:
+ connection = module.client('ec2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ invocations = {
+ 'services': get_supported_services,
+ 'endpoints': get_endpoints,
+ }
+ results = invocations[module.params.get('query')](connection, module)
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_endpoint_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_endpoint_info.py
new file mode 100644
index 00000000..e72b487d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_endpoint_info.py
@@ -0,0 +1,189 @@
+#!/usr/bin/python
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: ec2_vpc_endpoint_info
+short_description: Retrieves AWS VPC endpoints details using AWS methods.
+version_added: 1.0.0
+description:
+ - Gets various details related to AWS VPC Endpoints.
+ - This module was called C(ec2_vpc_endpoint_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+options:
+ query:
+ description:
+ - Specifies the query action to take. Services returns the supported
+ AWS services that can be specified when creating an endpoint.
+ required: True
+ choices:
+ - services
+ - endpoints
+ type: str
+ vpc_endpoint_ids:
+ description:
+ - Get details of specific endpoint IDs
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcEndpoints.html)
+ for possible filters.
+ type: dict
+author: Karen Cheng (@Etherdaemon)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Simple example of listing all support AWS services for VPC endpoints
+- name: List supported AWS endpoint services
+ community.aws.ec2_vpc_endpoint_info:
+ query: services
+ region: ap-southeast-2
+ register: supported_endpoint_services
+
+- name: Get all endpoints in ap-southeast-2 region
+ community.aws.ec2_vpc_endpoint_info:
+ query: endpoints
+ region: ap-southeast-2
+ register: existing_endpoints
+
+- name: Get all endpoints with specific filters
+ community.aws.ec2_vpc_endpoint_info:
+ query: endpoints
+ region: ap-southeast-2
+ filters:
+ vpc-id:
+ - vpc-12345678
+ - vpc-87654321
+ vpc-endpoint-state:
+ - available
+ - pending
+ register: existing_endpoints
+
+- name: Get details on specific endpoint
+ community.aws.ec2_vpc_endpoint_info:
+ query: endpoints
+ region: ap-southeast-2
+ vpc_endpoint_ids:
+ - vpce-12345678
+ register: endpoint_details
+'''
+
+RETURN = r'''
+service_names:
+ description: AWS VPC endpoint service names
+ returned: I(query) is C(services)
+ type: list
+ sample:
+ service_names:
+ - com.amazonaws.ap-southeast-2.s3
+vpc_endpoints:
+ description:
+ - A list of endpoints that match the query. Each endpoint has the keys creation_timestamp,
+ policy_document, route_table_ids, service_name, state, vpc_endpoint_id, vpc_id.
+ returned: I(query) is C(endpoints)
+ type: list
+ sample:
+ vpc_endpoints:
+ - creation_timestamp: "2017-02-16T11:06:48+00:00"
+ policy_document: >
+ "{\"Version\":\"2012-10-17\",\"Id\":\"Policy1450910922815\",
+ \"Statement\":[{\"Sid\":\"Stmt1450910920641\",\"Effect\":\"Allow\",
+ \"Principal\":\"*\",\"Action\":\"s3:*\",\"Resource\":[\"arn:aws:s3:::*/*\",\"arn:aws:s3:::*\"]}]}"
+ route_table_ids:
+ - rtb-abcd1234
+ service_name: "com.amazonaws.ap-southeast-2.s3"
+ state: "available"
+ vpc_endpoint_id: "vpce-abbad0d0"
+ vpc_id: "vpc-1111ffff"
+'''
+
+import json
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+def date_handler(obj):
+ return obj.isoformat() if hasattr(obj, 'isoformat') else obj
+
+
+@AWSRetry.exponential_backoff()
+def get_supported_services(client, module):
+ results = list()
+ params = dict()
+ while True:
+ response = client.describe_vpc_endpoint_services(**params)
+ results.extend(response['ServiceNames'])
+ if 'NextToken' in response:
+ params['NextToken'] = response['NextToken']
+ else:
+ break
+ return dict(service_names=results)
+
+
+@AWSRetry.exponential_backoff()
+def get_endpoints(client, module):
+ results = list()
+ params = dict()
+ params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ if module.params.get('vpc_endpoint_ids'):
+ params['VpcEndpointIds'] = module.params.get('vpc_endpoint_ids')
+ while True:
+ response = client.describe_vpc_endpoints(**params)
+ results.extend(response['VpcEndpoints'])
+ if 'NextToken' in response:
+ params['NextToken'] = response['NextToken']
+ else:
+ break
+ try:
+ results = json.loads(json.dumps(results, default=date_handler))
+ except Exception as e:
+ module.fail_json_aws(e, msg="Failed to get endpoints")
+ return dict(vpc_endpoints=[camel_dict_to_snake_dict(result) for result in results])
+
+
+def main():
+ argument_spec = dict(
+ query=dict(choices=['services', 'endpoints'], required=True),
+ filters=dict(default={}, type='dict'),
+ vpc_endpoint_ids=dict(type='list', elements='str'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'ec2_vpc_endpoint_facts':
+ module.deprecate("The 'ec2_vpc_endpoint_facts' module has been renamed to 'ec2_vpc_endpoint_info'", date='2021-12-01', collection_name='community.aws')
+
+ # Validate Requirements
+ try:
+ connection = module.client('ec2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ invocations = {
+ 'services': get_supported_services,
+ 'endpoints': get_endpoints,
+ }
+ results = invocations[module.params.get('query')](connection, module)
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_igw.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_igw.py
new file mode 100644
index 00000000..3d8d9f3b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_igw.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_igw
+version_added: 1.0.0
+short_description: Manage an AWS VPC Internet gateway
+description:
+ - Manage an AWS VPC Internet gateway
+author: Robert Estelle (@erydo)
+options:
+ vpc_id:
+ description:
+ - The VPC ID for the VPC in which to manage the Internet Gateway.
+ required: true
+ type: str
+ tags:
+ description:
+ - A dict of tags to apply to the internet gateway.
+ - To remove all tags set I(tags={}) and I(purge_tags=true).
+ aliases: [ 'resource_tags' ]
+ type: dict
+ purge_tags:
+ description:
+ - Remove tags not listed in I(tags).
+ type: bool
+ default: true
+ version_added: 1.3.0
+ state:
+ description:
+ - Create or terminate the IGW
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements:
+ - botocore
+ - boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Ensure that the VPC has an Internet Gateway.
+# The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use in setting up NATs etc.
+- community.aws.ec2_vpc_igw:
+ vpc_id: vpc-abcdefgh
+ state: present
+ register: igw
+
+'''
+
+RETURN = '''
+changed:
+ description: If any changes have been made to the Internet Gateway.
+ type: bool
+ returned: always
+ sample:
+ changed: false
+gateway_id:
+ description: The unique identifier for the Internet Gateway.
+ type: str
+ returned: I(state=present)
+ sample:
+ gateway_id: "igw-XXXXXXXX"
+tags:
+ description: The tags associated the Internet Gateway.
+ type: dict
+ returned: I(state=present)
+ sample:
+ tags:
+ "Ansible": "Test"
+vpc_id:
+ description: The VPC ID associated with the Internet Gateway.
+ type: str
+ returned: I(state=present)
+ sample:
+ vpc_id: "vpc-XXXXXXXX"
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.six import string_types
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+
+
+class AnsibleEc2Igw(object):
+
+ def __init__(self, module, results):
+ self._module = module
+ self._results = results
+ self._connection = self._module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+ self._check_mode = self._module.check_mode
+
+ def process(self):
+ vpc_id = self._module.params.get('vpc_id')
+ state = self._module.params.get('state', 'present')
+ tags = self._module.params.get('tags')
+ purge_tags = self._module.params.get('purge_tags')
+
+ if state == 'present':
+ self.ensure_igw_present(vpc_id, tags, purge_tags)
+ elif state == 'absent':
+ self.ensure_igw_absent(vpc_id)
+
+ def get_matching_igw(self, vpc_id):
+ filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id})
+ igws = []
+ try:
+ response = self._connection.describe_internet_gateways(aws_retry=True, Filters=filters)
+ igws = response.get('InternetGateways', [])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e)
+
+ igw = None
+ if len(igws) > 1:
+ self._module.fail_json(
+ msg='EC2 returned more than one Internet Gateway for VPC {0}, aborting'.format(vpc_id))
+ elif igws:
+ igw = camel_dict_to_snake_dict(igws[0])
+
+ return igw
+
+ def check_input_tags(self, tags):
+ if tags is None:
+ return
+ nonstring_tags = [k for k, v in tags.items() if not isinstance(v, string_types)]
+ if nonstring_tags:
+ self._module.fail_json(msg='One or more tags contain non-string values: {0}'.format(nonstring_tags))
+
+ def ensure_tags(self, igw_id, tags, purge_tags):
+ final_tags = []
+
+ filters = ansible_dict_to_boto3_filter_list({'resource-id': igw_id, 'resource-type': 'internet-gateway'})
+ cur_tags = None
+ try:
+ cur_tags = self._connection.describe_tags(aws_retry=True, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="Couldn't describe tags")
+
+ if tags is None:
+ return boto3_tag_list_to_ansible_dict(cur_tags.get('Tags'))
+
+ to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags)
+ final_tags = boto3_tag_list_to_ansible_dict(cur_tags.get('Tags'))
+
+ if to_update:
+ try:
+ if self._check_mode:
+ # update tags
+ final_tags.update(to_update)
+ else:
+ self._connection.create_tags(
+ aws_retry=True,
+ Resources=[igw_id],
+ Tags=ansible_dict_to_boto3_tag_list(to_update)
+ )
+
+ self._results['changed'] = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="Couldn't create tags")
+
+ if to_delete:
+ try:
+ if self._check_mode:
+ # update tags
+ for key in to_delete:
+ del final_tags[key]
+ else:
+ tags_list = []
+ for key in to_delete:
+ tags_list.append({'Key': key})
+
+ self._connection.delete_tags(aws_retry=True, Resources=[igw_id], Tags=tags_list)
+
+ self._results['changed'] = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="Couldn't delete tags")
+
+ if not self._check_mode and (to_update or to_delete):
+ try:
+ response = self._connection.describe_tags(aws_retry=True, Filters=filters)
+ final_tags = boto3_tag_list_to_ansible_dict(response.get('Tags'))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="Couldn't describe tags")
+
+ return final_tags
+
+ @staticmethod
+ def get_igw_info(igw):
+ return {
+ 'gateway_id': igw['internet_gateway_id'],
+ 'tags': igw['tags'],
+ 'vpc_id': igw['vpc_id']
+ }
+
+ def ensure_igw_absent(self, vpc_id):
+ igw = self.get_matching_igw(vpc_id)
+ if igw is None:
+ return self._results
+
+ if self._check_mode:
+ self._results['changed'] = True
+ return self._results
+
+ try:
+ self._results['changed'] = True
+ self._connection.detach_internet_gateway(aws_retry=True, InternetGatewayId=igw['internet_gateway_id'], VpcId=vpc_id)
+ self._connection.delete_internet_gateway(aws_retry=True, InternetGatewayId=igw['internet_gateway_id'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="Unable to delete Internet Gateway")
+
+ return self._results
+
+ def ensure_igw_present(self, vpc_id, tags, purge_tags):
+ self.check_input_tags(tags)
+
+ igw = self.get_matching_igw(vpc_id)
+
+ if igw is None:
+ if self._check_mode:
+ self._results['changed'] = True
+ self._results['gateway_id'] = None
+ return self._results
+
+ try:
+ response = self._connection.create_internet_gateway(aws_retry=True)
+
+ # Ensure the gateway exists before trying to attach it or add tags
+ waiter = get_waiter(self._connection, 'internet_gateway_exists')
+ waiter.wait(InternetGatewayIds=[response['InternetGateway']['InternetGatewayId']])
+
+ igw = camel_dict_to_snake_dict(response['InternetGateway'])
+ self._connection.attach_internet_gateway(aws_retry=True, InternetGatewayId=igw['internet_gateway_id'], VpcId=vpc_id)
+ self._results['changed'] = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg='Unable to create Internet Gateway')
+
+ igw['vpc_id'] = vpc_id
+
+ igw['tags'] = self.ensure_tags(igw_id=igw['internet_gateway_id'], tags=tags, purge_tags=purge_tags)
+
+ igw_info = self.get_igw_info(igw)
+ self._results.update(igw_info)
+
+ return self._results
+
+
+def main():
+ argument_spec = dict(
+ vpc_id=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(required=False, type='dict', aliases=['resource_tags']),
+ purge_tags=dict(default=True, type='bool'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ results = dict(
+ changed=False
+ )
+ igw_manager = AnsibleEc2Igw(module=module, results=results)
+ igw_manager.process()
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_igw_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_igw_facts.py
new file mode 100644
index 00000000..ab7d26a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_igw_facts.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_vpc_igw_info
+version_added: 1.0.0
+short_description: Gather information about internet gateways in AWS
+description:
+ - Gather information about internet gateways in AWS.
+ - This module was called C(ec2_vpc_igw_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+author: "Nick Aslanidis (@naslanidis)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInternetGateways.html) for possible filters.
+ type: dict
+ internet_gateway_ids:
+ description:
+ - Get details of specific Internet Gateway ID. Provide this value as a list.
+ type: list
+ elements: str
+ convert_tags:
+ description:
+ - Convert tags from boto3 format (list of dictionaries) to the standard dictionary format.
+ - This currently defaults to C(False). The default will be changed to C(True) after 2022-06-22.
+ type: bool
+ version_added: 1.3.0
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# # Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all Internet Gateways for an account or profile
+ community.aws.ec2_vpc_igw_info:
+ region: ap-southeast-2
+ profile: production
+ register: igw_info
+
+- name: Gather information about a filtered list of Internet Gateways
+ community.aws.ec2_vpc_igw_info:
+ region: ap-southeast-2
+ profile: production
+ filters:
+ "tag:Name": "igw-123"
+ register: igw_info
+
+- name: Gather information about a specific internet gateway by InternetGatewayId
+ community.aws.ec2_vpc_igw_info:
+ region: ap-southeast-2
+ profile: production
+ internet_gateway_ids: igw-c1231234
+ register: igw_info
+'''
+
+RETURN = r'''
+internet_gateways:
+ description: The internet gateways for the account.
+ returned: always
+ type: list
+ sample: [
+ {
+ "attachments": [
+ {
+ "state": "available",
+ "vpc_id": "vpc-02123b67"
+ }
+ ],
+ "internet_gateway_id": "igw-2123634d",
+ "tags": [
+ {
+ "key": "Name",
+ "value": "test-vpc-20-igw"
+ }
+ ]
+ }
+ ]
+
+changed:
+ description: True if listing the internet gateways succeeds.
+ type: bool
+ returned: always
+ sample: "false"
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def get_internet_gateway_info(internet_gateway, convert_tags):
+ if convert_tags:
+ tags = boto3_tag_list_to_ansible_dict(internet_gateway['Tags'])
+ ignore_list = ["Tags"]
+ else:
+ tags = internet_gateway['Tags']
+ ignore_list = []
+ internet_gateway_info = {'InternetGatewayId': internet_gateway['InternetGatewayId'],
+ 'Attachments': internet_gateway['Attachments'],
+ 'Tags': tags}
+
+ internet_gateway_info = camel_dict_to_snake_dict(internet_gateway_info, ignore_list=ignore_list)
+ return internet_gateway_info
+
+
+def list_internet_gateways(connection, module):
+ params = dict()
+
+ params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ convert_tags = module.params.get('convert_tags')
+
+ if module.params.get("internet_gateway_ids"):
+ params['InternetGatewayIds'] = module.params.get("internet_gateway_ids")
+
+ try:
+ all_internet_gateways = connection.describe_internet_gateways(aws_retry=True, **params)
+ except is_boto3_error_code('InvalidInternetGatewayID.NotFound'):
+ module.fail_json('InternetGateway not found')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, 'Unable to describe internet gateways')
+
+ return [get_internet_gateway_info(igw, convert_tags)
+ for igw in all_internet_gateways['InternetGateways']]
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(type='dict', default=dict()),
+ internet_gateway_ids=dict(type='list', default=None, elements='str'),
+ convert_tags=dict(type='bool'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'ec2_vpc_igw_facts':
+ module.deprecate("The 'ec2_vpc_igw_facts' module has been renamed to 'ec2_vpc_igw_info'", date='2021-12-01', collection_name='community.aws')
+
+ if module.params.get('convert_tags') is None:
+ module.deprecate('This module currently returns boto3 style tags by default. '
+ 'This default has been deprecated and the module will return a simple dictionary in future. '
+ 'This behaviour can be controlled through the convert_tags parameter.',
+ date='2021-12-01', collection_name='community.aws')
+
+ # Validate Requirements
+ try:
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ # call your function here
+ results = list_internet_gateways(connection, module)
+
+ module.exit_json(internet_gateways=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_igw_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_igw_info.py
new file mode 100644
index 00000000..ab7d26a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_igw_info.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_vpc_igw_info
+version_added: 1.0.0
+short_description: Gather information about internet gateways in AWS
+description:
+ - Gather information about internet gateways in AWS.
+ - This module was called C(ec2_vpc_igw_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+author: "Nick Aslanidis (@naslanidis)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInternetGateways.html) for possible filters.
+ type: dict
+ internet_gateway_ids:
+ description:
+ - Get details of specific Internet Gateway ID. Provide this value as a list.
+ type: list
+ elements: str
+ convert_tags:
+ description:
+ - Convert tags from boto3 format (list of dictionaries) to the standard dictionary format.
+ - This currently defaults to C(False). The default will be changed to C(True) after 2022-06-22.
+ type: bool
+ version_added: 1.3.0
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# # Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all Internet Gateways for an account or profile
+ community.aws.ec2_vpc_igw_info:
+ region: ap-southeast-2
+ profile: production
+ register: igw_info
+
+- name: Gather information about a filtered list of Internet Gateways
+ community.aws.ec2_vpc_igw_info:
+ region: ap-southeast-2
+ profile: production
+ filters:
+ "tag:Name": "igw-123"
+ register: igw_info
+
+- name: Gather information about a specific internet gateway by InternetGatewayId
+ community.aws.ec2_vpc_igw_info:
+ region: ap-southeast-2
+ profile: production
+ internet_gateway_ids: igw-c1231234
+ register: igw_info
+'''
+
+RETURN = r'''
+internet_gateways:
+ description: The internet gateways for the account.
+ returned: always
+ type: list
+ sample: [
+ {
+ "attachments": [
+ {
+ "state": "available",
+ "vpc_id": "vpc-02123b67"
+ }
+ ],
+ "internet_gateway_id": "igw-2123634d",
+ "tags": [
+ {
+ "key": "Name",
+ "value": "test-vpc-20-igw"
+ }
+ ]
+ }
+ ]
+
+changed:
+ description: True if listing the internet gateways succeeds.
+ type: bool
+ returned: always
+ sample: "false"
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def get_internet_gateway_info(internet_gateway, convert_tags):
+ if convert_tags:
+ tags = boto3_tag_list_to_ansible_dict(internet_gateway['Tags'])
+ ignore_list = ["Tags"]
+ else:
+ tags = internet_gateway['Tags']
+ ignore_list = []
+ internet_gateway_info = {'InternetGatewayId': internet_gateway['InternetGatewayId'],
+ 'Attachments': internet_gateway['Attachments'],
+ 'Tags': tags}
+
+ internet_gateway_info = camel_dict_to_snake_dict(internet_gateway_info, ignore_list=ignore_list)
+ return internet_gateway_info
+
+
+def list_internet_gateways(connection, module):
+ params = dict()
+
+ params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ convert_tags = module.params.get('convert_tags')
+
+ if module.params.get("internet_gateway_ids"):
+ params['InternetGatewayIds'] = module.params.get("internet_gateway_ids")
+
+ try:
+ all_internet_gateways = connection.describe_internet_gateways(aws_retry=True, **params)
+ except is_boto3_error_code('InvalidInternetGatewayID.NotFound'):
+ module.fail_json('InternetGateway not found')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, 'Unable to describe internet gateways')
+
+ return [get_internet_gateway_info(igw, convert_tags)
+ for igw in all_internet_gateways['InternetGateways']]
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(type='dict', default=dict()),
+ internet_gateway_ids=dict(type='list', default=None, elements='str'),
+ convert_tags=dict(type='bool'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'ec2_vpc_igw_facts':
+ module.deprecate("The 'ec2_vpc_igw_facts' module has been renamed to 'ec2_vpc_igw_info'", date='2021-12-01', collection_name='community.aws')
+
+ if module.params.get('convert_tags') is None:
+ module.deprecate('This module currently returns boto3 style tags by default. '
+ 'This default has been deprecated and the module will return a simple dictionary in future. '
+ 'This behaviour can be controlled through the convert_tags parameter.',
+ date='2021-12-01', collection_name='community.aws')
+
+ # Validate Requirements
+ try:
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ # call your function here
+ results = list_internet_gateways(connection, module)
+
+ module.exit_json(internet_gateways=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl.py
new file mode 100644
index 00000000..387ceb48
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl.py
@@ -0,0 +1,632 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: ec2_vpc_nacl
+short_description: create and delete Network ACLs.
+version_added: 1.0.0
+description:
+ - Read the AWS documentation for Network ACLS
+ U(https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html)
+options:
+ name:
+ description:
+ - Tagged name identifying a network ACL.
+ - One and only one of the I(name) or I(nacl_id) is required.
+ required: false
+ type: str
+ nacl_id:
+ description:
+ - NACL id identifying a network ACL.
+ - One and only one of the I(name) or I(nacl_id) is required.
+ required: false
+ type: str
+ vpc_id:
+ description:
+ - VPC id of the requesting VPC.
+ - Required when state present.
+ required: false
+ type: str
+ subnets:
+ description:
+ - The list of subnets that should be associated with the network ACL.
+ - Must be specified as a list
+ - Each subnet can be specified as subnet ID, or its tagged name.
+ required: false
+ type: list
+ elements: str
+ egress:
+ description:
+ - A list of rules for outgoing traffic. Each rule must be specified as a list.
+ Each rule may contain the rule number (integer 1-32766), protocol (one of ['tcp', 'udp', 'icmp', '-1', 'all']),
+ the rule action ('allow' or 'deny') the CIDR of the IPv4 network range to allow or deny,
+ the ICMP type (-1 means all types), the ICMP code (-1 means all codes), the last port in the range for
+ TCP or UDP protocols, and the first port in the range for TCP or UDP protocols.
+ See examples.
+ default: []
+ required: false
+ type: list
+ elements: list
+ ingress:
+ description:
+ - List of rules for incoming traffic. Each rule must be specified as a list.
+ Each rule may contain the rule number (integer 1-32766), protocol (one of ['tcp', 'udp', 'icmp', '-1', 'all']),
+ the rule action ('allow' or 'deny') the CIDR of the IPv4 network range to allow or deny,
+ the ICMP type (-1 means all types), the ICMP code (-1 means all codes), the last port in the range for
+ TCP or UDP protocols, and the first port in the range for TCP or UDP protocols.
+ See examples.
+ default: []
+ required: false
+ type: list
+ elements: list
+ tags:
+ description:
+ - Dictionary of tags to look for and apply when creating a network ACL.
+ required: false
+ type: dict
+ state:
+ description:
+ - Creates or modifies an existing NACL
+ - Deletes a NACL and reassociates subnets to the default NACL
+ required: false
+ type: str
+ choices: ['present', 'absent']
+ default: present
+author: Mike Mochan (@mmochan)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements: [ botocore, boto3, json ]
+'''
+
+EXAMPLES = r'''
+
+# Complete example to create and delete a network ACL
+# that allows SSH, HTTP and ICMP in, and all traffic out.
+- name: "Create and associate production DMZ network ACL with DMZ subnets"
+ community.aws.ec2_vpc_nacl:
+ vpc_id: vpc-12345678
+ name: prod-dmz-nacl
+ region: ap-southeast-2
+ subnets: ['prod-dmz-1', 'prod-dmz-2']
+ tags:
+ CostCode: CC1234
+ Project: phoenix
+ Description: production DMZ
+ ingress:
+ # rule no, protocol, allow/deny, cidr, icmp_type, icmp_code,
+ # port from, port to
+ - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
+ - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
+ - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
+ egress:
+ - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
+ state: 'present'
+
+- name: "Remove the ingress and egress rules - defaults to deny all"
+ community.aws.ec2_vpc_nacl:
+ vpc_id: vpc-12345678
+ name: prod-dmz-nacl
+ region: ap-southeast-2
+ subnets:
+ - prod-dmz-1
+ - prod-dmz-2
+ tags:
+ CostCode: CC1234
+ Project: phoenix
+ Description: production DMZ
+ state: present
+
+- name: "Remove the NACL subnet associations and tags"
+ community.aws.ec2_vpc_nacl:
+ vpc_id: 'vpc-12345678'
+ name: prod-dmz-nacl
+ region: ap-southeast-2
+ state: present
+
+- name: "Delete nacl and subnet associations"
+ community.aws.ec2_vpc_nacl:
+ vpc_id: vpc-12345678
+ name: prod-dmz-nacl
+ state: absent
+
+- name: "Delete nacl by its id"
+ community.aws.ec2_vpc_nacl:
+ nacl_id: acl-33b4ee5b
+ state: absent
+'''
+RETURN = r'''
+task:
+ description: The result of the create, or delete action.
+ returned: success
+ type: dict
+nacl_id:
+ description: The id of the NACL (when creating or updating an ACL)
+ returned: success
+ type: str
+ sample: acl-123456789abcdef01
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+# VPC-supported IANA protocol numbers
+# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
+PROTOCOL_NUMBERS = {'all': -1, 'icmp': 1, 'tcp': 6, 'udp': 17, }
+
+
+# Utility methods
+def icmp_present(entry):
+ if len(entry) == 6 and entry[1] == 'icmp' or entry[1] == 1:
+ return True
+
+
+def load_tags(module):
+ tags = []
+ if module.params.get('tags'):
+ for name, value in module.params.get('tags').items():
+ tags.append({'Key': name, 'Value': str(value)})
+ tags.append({'Key': "Name", 'Value': module.params.get('name')})
+ else:
+ tags.append({'Key': "Name", 'Value': module.params.get('name')})
+ return tags
+
+
+def subnets_removed(nacl_id, subnets, client, module):
+ results = find_acl_by_id(nacl_id, client, module)
+ associations = results['NetworkAcls'][0]['Associations']
+ subnet_ids = [assoc['SubnetId'] for assoc in associations]
+ return [subnet for subnet in subnet_ids if subnet not in subnets]
+
+
+def subnets_added(nacl_id, subnets, client, module):
+ results = find_acl_by_id(nacl_id, client, module)
+ associations = results['NetworkAcls'][0]['Associations']
+ subnet_ids = [assoc['SubnetId'] for assoc in associations]
+ return [subnet for subnet in subnets if subnet not in subnet_ids]
+
+
+def subnets_changed(nacl, client, module):
+ changed = False
+ vpc_id = module.params.get('vpc_id')
+ nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
+ subnets = subnets_to_associate(nacl, client, module)
+ if not subnets:
+ default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0]
+ subnets = find_subnet_ids_by_nacl_id(nacl_id, client, module)
+ if subnets:
+ replace_network_acl_association(default_nacl_id, subnets, client, module)
+ changed = True
+ return changed
+ changed = False
+ return changed
+ subs_added = subnets_added(nacl_id, subnets, client, module)
+ if subs_added:
+ replace_network_acl_association(nacl_id, subs_added, client, module)
+ changed = True
+ subs_removed = subnets_removed(nacl_id, subnets, client, module)
+ if subs_removed:
+ default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0]
+ replace_network_acl_association(default_nacl_id, subs_removed, client, module)
+ changed = True
+ return changed
+
+
+def nacls_changed(nacl, client, module):
+ changed = False
+ params = dict()
+ params['egress'] = module.params.get('egress')
+ params['ingress'] = module.params.get('ingress')
+
+ nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
+ nacl = describe_network_acl(client, module)
+ entries = nacl['NetworkAcls'][0]['Entries']
+ egress = [rule for rule in entries if rule['Egress'] is True and rule['RuleNumber'] < 32767]
+ ingress = [rule for rule in entries if rule['Egress'] is False and rule['RuleNumber'] < 32767]
+ if rules_changed(egress, params['egress'], True, nacl_id, client, module):
+ changed = True
+ if rules_changed(ingress, params['ingress'], False, nacl_id, client, module):
+ changed = True
+ return changed
+
+
+def tags_changed(nacl_id, client, module):
+ changed = False
+ tags = dict()
+ if module.params.get('tags'):
+ tags = module.params.get('tags')
+ if module.params.get('name') and not tags.get('Name'):
+ tags['Name'] = module.params['name']
+ nacl = find_acl_by_id(nacl_id, client, module)
+ if nacl['NetworkAcls']:
+ nacl_values = [t.values() for t in nacl['NetworkAcls'][0]['Tags']]
+ nacl_tags = [item for sublist in nacl_values for item in sublist]
+ tag_values = [[key, str(value)] for key, value in tags.items()]
+ tags = [item for sublist in tag_values for item in sublist]
+ if sorted(nacl_tags) == sorted(tags):
+ changed = False
+ return changed
+ else:
+ delete_tags(nacl_id, client, module)
+ create_tags(nacl_id, client, module)
+ changed = True
+ return changed
+ return changed
+
+
+def rules_changed(aws_rules, param_rules, Egress, nacl_id, client, module):
+ changed = False
+ rules = list()
+ for entry in param_rules:
+ rules.append(process_rule_entry(entry, Egress))
+ if rules == aws_rules:
+ return changed
+ else:
+ removed_rules = [x for x in aws_rules if x not in rules]
+ if removed_rules:
+ params = dict()
+ for rule in removed_rules:
+ params['NetworkAclId'] = nacl_id
+ params['RuleNumber'] = rule['RuleNumber']
+ params['Egress'] = Egress
+ delete_network_acl_entry(params, client, module)
+ changed = True
+ added_rules = [x for x in rules if x not in aws_rules]
+ if added_rules:
+ for rule in added_rules:
+ rule['NetworkAclId'] = nacl_id
+ create_network_acl_entry(rule, client, module)
+ changed = True
+ return changed
+
+
+def process_rule_entry(entry, Egress):
+ params = dict()
+ params['RuleNumber'] = entry[0]
+ params['Protocol'] = str(PROTOCOL_NUMBERS[entry[1]])
+ params['RuleAction'] = entry[2]
+ params['Egress'] = Egress
+ params['CidrBlock'] = entry[3]
+ if icmp_present(entry):
+ params['IcmpTypeCode'] = {"Type": int(entry[4]), "Code": int(entry[5])}
+ else:
+ if entry[6] or entry[7]:
+ params['PortRange'] = {"From": entry[6], 'To': entry[7]}
+ return params
+
+
+def restore_default_associations(assoc_ids, default_nacl_id, client, module):
+ if assoc_ids:
+ params = dict()
+ params['NetworkAclId'] = default_nacl_id[0]
+ for assoc_id in assoc_ids:
+ params['AssociationId'] = assoc_id
+ restore_default_acl_association(params, client, module)
+ return True
+
+
+def construct_acl_entries(nacl, client, module):
+ for entry in module.params.get('ingress'):
+ params = process_rule_entry(entry, Egress=False)
+ params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId']
+ create_network_acl_entry(params, client, module)
+ for rule in module.params.get('egress'):
+ params = process_rule_entry(rule, Egress=True)
+ params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId']
+ create_network_acl_entry(params, client, module)
+
+
+# Module invocations
+def setup_network_acl(client, module):
+ changed = False
+ nacl = describe_network_acl(client, module)
+ if not nacl['NetworkAcls']:
+ nacl = create_network_acl(module.params.get('vpc_id'), client, module)
+ nacl_id = nacl['NetworkAcl']['NetworkAclId']
+ create_tags(nacl_id, client, module)
+ subnets = subnets_to_associate(nacl, client, module)
+ replace_network_acl_association(nacl_id, subnets, client, module)
+ construct_acl_entries(nacl, client, module)
+ changed = True
+ return(changed, nacl['NetworkAcl']['NetworkAclId'])
+ else:
+ changed = False
+ nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
+ changed |= subnets_changed(nacl, client, module)
+ changed |= nacls_changed(nacl, client, module)
+ changed |= tags_changed(nacl_id, client, module)
+ return (changed, nacl_id)
+
+
+def remove_network_acl(client, module):
+ changed = False
+ result = dict()
+ nacl = describe_network_acl(client, module)
+ if nacl['NetworkAcls']:
+ nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
+ vpc_id = nacl['NetworkAcls'][0]['VpcId']
+ associations = nacl['NetworkAcls'][0]['Associations']
+ assoc_ids = [a['NetworkAclAssociationId'] for a in associations]
+ default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)
+ if not default_nacl_id:
+ result = {vpc_id: "Default NACL ID not found - Check the VPC ID"}
+ return changed, result
+ if restore_default_associations(assoc_ids, default_nacl_id, client, module):
+ delete_network_acl(nacl_id, client, module)
+ changed = True
+ result[nacl_id] = "Successfully deleted"
+ return changed, result
+ if not assoc_ids:
+ delete_network_acl(nacl_id, client, module)
+ changed = True
+ result[nacl_id] = "Successfully deleted"
+ return changed, result
+ return changed, result
+
+
+# Boto3 client methods
+@AWSRetry.jittered_backoff()
+def _create_network_acl(client, *args, **kwargs):
+ return client.create_network_acl(*args, **kwargs)
+
+
+def create_network_acl(vpc_id, client, module):
+ try:
+ if module.check_mode:
+ nacl = dict(NetworkAcl=dict(NetworkAclId="nacl-00000000"))
+ else:
+ nacl = _create_network_acl(client, VpcId=vpc_id)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+ return nacl
+
+
+@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
+def _create_network_acl_entry(client, *args, **kwargs):
+ return client.create_network_acl_entry(*args, **kwargs)
+
+
+def create_network_acl_entry(params, client, module):
+ try:
+ if not module.check_mode:
+ _create_network_acl_entry(client, **params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+
+@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
+def _create_tags(client, *args, **kwargs):
+ return client.create_tags(*args, **kwargs)
+
+
+def create_tags(nacl_id, client, module):
+ try:
+ delete_tags(nacl_id, client, module)
+ if not module.check_mode:
+ _create_tags(client, Resources=[nacl_id], Tags=load_tags(module))
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+
+@AWSRetry.jittered_backoff()
+def _delete_network_acl(client, *args, **kwargs):
+ return client.delete_network_acl(*args, **kwargs)
+
+
+def delete_network_acl(nacl_id, client, module):
+ try:
+ if not module.check_mode:
+ _delete_network_acl(client, NetworkAclId=nacl_id)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+
+@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
+def _delete_network_acl_entry(client, *args, **kwargs):
+ return client.delete_network_acl_entry(*args, **kwargs)
+
+
+def delete_network_acl_entry(params, client, module):
+ try:
+ if not module.check_mode:
+ _delete_network_acl_entry(client, **params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+
+@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
+def _delete_tags(client, *args, **kwargs):
+ return client.delete_tags(*args, **kwargs)
+
+
+def delete_tags(nacl_id, client, module):
+ try:
+ if not module.check_mode:
+ _delete_tags(client, Resources=[nacl_id])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+
+@AWSRetry.jittered_backoff()
+def _describe_network_acls(client, **kwargs):
+ return client.describe_network_acls(**kwargs)
+
+
+@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
+def _describe_network_acls_retry_missing(client, **kwargs):
+ return client.describe_network_acls(**kwargs)
+
+
+def describe_acl_associations(subnets, client, module):
+ if not subnets:
+ return []
+ try:
+ results = _describe_network_acls_retry_missing(client, Filters=[
+ {'Name': 'association.subnet-id', 'Values': subnets}
+ ])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+ associations = results['NetworkAcls'][0]['Associations']
+ return [a['NetworkAclAssociationId'] for a in associations if a['SubnetId'] in subnets]
+
+
+def describe_network_acl(client, module):
+ try:
+ if module.params.get('nacl_id'):
+ nacl = _describe_network_acls(client, Filters=[
+ {'Name': 'network-acl-id', 'Values': [module.params.get('nacl_id')]}
+ ])
+ else:
+ nacl = _describe_network_acls(client, Filters=[
+ {'Name': 'tag:Name', 'Values': [module.params.get('name')]}
+ ])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+ return nacl
+
+
+def find_acl_by_id(nacl_id, client, module):
+ try:
+ return _describe_network_acls_retry_missing(client, NetworkAclIds=[nacl_id])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+
+def find_default_vpc_nacl(vpc_id, client, module):
+ try:
+ response = _describe_network_acls_retry_missing(client, Filters=[
+ {'Name': 'vpc-id', 'Values': [vpc_id]}])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+ nacls = response['NetworkAcls']
+ return [n['NetworkAclId'] for n in nacls if n['IsDefault'] is True]
+
+
+def find_subnet_ids_by_nacl_id(nacl_id, client, module):
+ try:
+ results = _describe_network_acls_retry_missing(client, Filters=[
+ {'Name': 'association.network-acl-id', 'Values': [nacl_id]}
+ ])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+ if results['NetworkAcls']:
+ associations = results['NetworkAcls'][0]['Associations']
+ return [s['SubnetId'] for s in associations if s['SubnetId']]
+ else:
+ return []
+
+
+@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
+def _replace_network_acl_association(client, *args, **kwargs):
+ return client.replace_network_acl_association(*args, **kwargs)
+
+
+def replace_network_acl_association(nacl_id, subnets, client, module):
+ params = dict()
+ params['NetworkAclId'] = nacl_id
+ for association in describe_acl_associations(subnets, client, module):
+ params['AssociationId'] = association
+ try:
+ if not module.check_mode:
+ _replace_network_acl_association(client, **params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+
+@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
+def _replace_network_acl_entry(client, *args, **kwargs):
+ return client.replace_network_acl_entry(*args, **kwargs)
+
+
+def replace_network_acl_entry(entries, Egress, nacl_id, client, module):
+ for entry in entries:
+ params = entry
+ params['NetworkAclId'] = nacl_id
+ try:
+ if not module.check_mode:
+ _replace_network_acl_entry(client, **params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+
+@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
+def _replace_network_acl_association(client, *args, **kwargs):
+ return client.replace_network_acl_association(*args, **kwargs)
+
+
+def restore_default_acl_association(params, client, module):
+ try:
+ if not module.check_mode:
+ _replace_network_acl_association(client, **params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+
+@AWSRetry.jittered_backoff()
+def _describe_subnets(client, *args, **kwargs):
+ return client.describe_subnets(*args, **kwargs)
+
+
+def subnets_to_associate(nacl, client, module):
+ params = list(module.params.get('subnets'))
+ if not params:
+ return []
+ all_found = []
+ if any(x.startswith("subnet-") for x in params):
+ try:
+ subnets = _describe_subnets(client, Filters=[
+ {'Name': 'subnet-id', 'Values': params}])
+ all_found.extend(subnets.get('Subnets', []))
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+ if len(params) != len(all_found):
+ try:
+ subnets = _describe_subnets(client, Filters=[
+ {'Name': 'tag:Name', 'Values': params}])
+ all_found.extend(subnets.get('Subnets', []))
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+ return list(set(s['SubnetId'] for s in all_found if s.get('SubnetId')))
+
+
+def main():
+ argument_spec = dict(
+ vpc_id=dict(),
+ name=dict(),
+ nacl_id=dict(),
+ subnets=dict(required=False, type='list', default=list(), elements='str'),
+ tags=dict(required=False, type='dict'),
+ ingress=dict(required=False, type='list', default=list(), elements='list'),
+ egress=dict(required=False, type='list', default=list(), elements='list'),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['name', 'nacl_id']],
+ required_if=[['state', 'present', ['vpc_id']]])
+
+ state = module.params.get('state').lower()
+
+ client = module.client('ec2')
+
+ invocations = {
+ "present": setup_network_acl,
+ "absent": remove_network_acl
+ }
+ (changed, results) = invocations[state](client, module)
+ module.exit_json(changed=changed, nacl_id=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl_facts.py
new file mode 100644
index 00000000..aabe489c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl_facts.py
@@ -0,0 +1,222 @@
+#!/usr/bin/python
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_vpc_nacl_info
+version_added: 1.0.0
+short_description: Gather information about Network ACLs in an AWS VPC
+description:
+ - Gather information about Network ACLs in an AWS VPC
+ - This module was called C(ec2_vpc_nacl_facts) before Ansible 2.9. The usage did not change.
+author: "Brad Davidson (@brandond)"
+requirements: [ boto3 ]
+options:
+ nacl_ids:
+ description:
+ - A list of Network ACL IDs to retrieve information about.
+ required: false
+ default: []
+ aliases: [nacl_id]
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
+ U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkAcls.html) for possible filters. Filter
+ names and values are case sensitive.
+ required: false
+ default: {}
+ type: dict
+notes:
+ - By default, the module will return all Network ACLs.
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all Network ACLs:
+- name: Get All NACLs
+ community.aws.ec2_vpc_nacl_info:
+ region: us-west-2
+ register: all_nacls
+
+# Retrieve default Network ACLs:
+- name: Get Default NACLs
+ community.aws.ec2_vpc_nacl_info:
+ region: us-west-2
+ filters:
+ 'default': 'true'
+ register: default_nacls
+'''
+
+RETURN = r'''
+nacls:
+ description: Returns an array of complex objects as described below.
+ returned: success
+ type: complex
+ contains:
+ nacl_id:
+ description: The ID of the Network Access Control List.
+ returned: always
+ type: str
+ vpc_id:
+ description: The ID of the VPC that the NACL is attached to.
+ returned: always
+ type: str
+ is_default:
+ description: True if the NACL is the default for its VPC.
+ returned: always
+ type: bool
+ tags:
+ description: A dict of tags associated with the NACL.
+ returned: always
+ type: dict
+ subnets:
+ description: A list of subnet IDs that are associated with the NACL.
+ returned: always
+ type: list
+ elements: str
+ ingress:
+ description:
+ - A list of NACL ingress rules with the following format.
+ - "C([rule no, protocol, allow/deny, v4 or v6 cidr, icmp_type, icmp_code, port from, port to])"
+ returned: always
+ type: list
+ elements: list
+ sample: [[100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]]
+ egress:
+ description:
+ - A list of NACL egress rules with the following format.
+ - "C([rule no, protocol, allow/deny, v4 or v6 cidr, icmp_type, icmp_code, port from, port to])"
+ returned: always
+ type: list
+ elements: list
+ sample: [[100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]]
+'''
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible.module_utils._text import to_native
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AWSRetry,
+ ansible_dict_to_boto3_filter_list,
+ camel_dict_to_snake_dict,
+ boto3_tag_list_to_ansible_dict,
+ )
+
+
+# VPC-supported IANA protocol numbers
+# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
+PROTOCOL_NAMES = {'-1': 'all', '1': 'icmp', '6': 'tcp', '17': 'udp'}
+
+
+def list_ec2_vpc_nacls(connection, module):
+
+ nacl_ids = module.params.get("nacl_ids")
+ filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+
+ if nacl_ids is None:
+ nacl_ids = []
+
+ try:
+ nacls = connection.describe_network_acls(aws_retry=True, NetworkAclIds=nacl_ids, Filters=filters)
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'InvalidNetworkAclID.NotFound':
+ module.fail_json(msg='Unable to describe ACL. NetworkAcl does not exist')
+ module.fail_json_aws(e, msg="Unable to describe network ACLs {0}".format(nacl_ids))
+ except BotoCoreError as e:
+ module.fail_json_aws(e, msg="Unable to describe network ACLs {0}".format(nacl_ids))
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_nacls = []
+ for nacl in nacls['NetworkAcls']:
+ snaked_nacls.append(camel_dict_to_snake_dict(nacl))
+
+ # Turn the boto3 result in to ansible friendly tag dictionary
+ for nacl in snaked_nacls:
+ if 'tags' in nacl:
+ nacl['tags'] = boto3_tag_list_to_ansible_dict(nacl['tags'], 'key', 'value')
+ if 'entries' in nacl:
+ nacl['egress'] = [nacl_entry_to_list(entry) for entry in nacl['entries']
+ if entry['rule_number'] < 32767 and entry['egress']]
+ nacl['ingress'] = [nacl_entry_to_list(entry) for entry in nacl['entries']
+ if entry['rule_number'] < 32767 and not entry['egress']]
+ del nacl['entries']
+ if 'associations' in nacl:
+ nacl['subnets'] = [a['subnet_id'] for a in nacl['associations']]
+ del nacl['associations']
+ if 'network_acl_id' in nacl:
+ nacl['nacl_id'] = nacl['network_acl_id']
+ del nacl['network_acl_id']
+
+ module.exit_json(nacls=snaked_nacls)
+
+
+def nacl_entry_to_list(entry):
+
+ # entry list format
+ # [ rule_num, protocol name or number, allow or deny, ipv4/6 cidr, icmp type, icmp code, port from, port to]
+ elist = []
+
+ elist.append(entry['rule_number'])
+
+ if entry.get('protocol') in PROTOCOL_NAMES:
+ elist.append(PROTOCOL_NAMES[entry['protocol']])
+ else:
+ elist.append(entry.get('protocol'))
+
+ elist.append(entry['rule_action'])
+
+ if entry.get('cidr_block'):
+ elist.append(entry['cidr_block'])
+ elif entry.get('ipv6_cidr_block'):
+ elist.append(entry['ipv6_cidr_block'])
+ else:
+ elist.append(None)
+
+ elist = elist + [None, None, None, None]
+
+ if entry['protocol'] in ('1', '58'):
+ elist[4] = entry.get('icmp_type_code', {}).get('type')
+ elist[5] = entry.get('icmp_type_code', {}).get('code')
+
+ if entry['protocol'] not in ('1', '6', '17', '58'):
+ elist[6] = 0
+ elist[7] = 65535
+ elif 'port_range' in entry:
+ elist[6] = entry['port_range']['from']
+ elist[7] = entry['port_range']['to']
+
+ return elist
+
+
+def main():
+
+ argument_spec = dict(
+ nacl_ids=dict(default=[], type='list', aliases=['nacl_id'], elements='str'),
+ filters=dict(default={}, type='dict'))
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'ec2_vpc_nacl_facts':
+ module.deprecate("The 'ec2_vpc_nacl_facts' module has been renamed to 'ec2_vpc_nacl_info'", date='2021-12-01', collection_name='community.aws')
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ list_ec2_vpc_nacls(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl_info.py
new file mode 100644
index 00000000..aabe489c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl_info.py
@@ -0,0 +1,222 @@
+#!/usr/bin/python
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_vpc_nacl_info
+version_added: 1.0.0
+short_description: Gather information about Network ACLs in an AWS VPC
+description:
+ - Gather information about Network ACLs in an AWS VPC
+ - This module was called C(ec2_vpc_nacl_facts) before Ansible 2.9. The usage did not change.
+author: "Brad Davidson (@brandond)"
+requirements: [ boto3 ]
+options:
+ nacl_ids:
+ description:
+ - A list of Network ACL IDs to retrieve information about.
+ required: false
+ default: []
+ aliases: [nacl_id]
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
+ U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkAcls.html) for possible filters. Filter
+ names and values are case sensitive.
+ required: false
+ default: {}
+ type: dict
+notes:
+ - By default, the module will return all Network ACLs.
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all Network ACLs:
+- name: Get All NACLs
+ community.aws.ec2_vpc_nacl_info:
+ region: us-west-2
+ register: all_nacls
+
+# Retrieve default Network ACLs:
+- name: Get Default NACLs
+ community.aws.ec2_vpc_nacl_info:
+ region: us-west-2
+ filters:
+ 'default': 'true'
+ register: default_nacls
+'''
+
+RETURN = r'''
+nacls:
+ description: Returns an array of complex objects as described below.
+ returned: success
+ type: complex
+ contains:
+ nacl_id:
+ description: The ID of the Network Access Control List.
+ returned: always
+ type: str
+ vpc_id:
+ description: The ID of the VPC that the NACL is attached to.
+ returned: always
+ type: str
+ is_default:
+ description: True if the NACL is the default for its VPC.
+ returned: always
+ type: bool
+ tags:
+ description: A dict of tags associated with the NACL.
+ returned: always
+ type: dict
+ subnets:
+ description: A list of subnet IDs that are associated with the NACL.
+ returned: always
+ type: list
+ elements: str
+ ingress:
+ description:
+ - A list of NACL ingress rules with the following format.
+ - "C([rule no, protocol, allow/deny, v4 or v6 cidr, icmp_type, icmp_code, port from, port to])"
+ returned: always
+ type: list
+ elements: list
+ sample: [[100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]]
+ egress:
+ description:
+ - A list of NACL egress rules with the following format.
+ - "C([rule no, protocol, allow/deny, v4 or v6 cidr, icmp_type, icmp_code, port from, port to])"
+ returned: always
+ type: list
+ elements: list
+ sample: [[100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]]
+'''
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible.module_utils._text import to_native
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AWSRetry,
+ ansible_dict_to_boto3_filter_list,
+ camel_dict_to_snake_dict,
+ boto3_tag_list_to_ansible_dict,
+ )
+
+
+# VPC-supported IANA protocol numbers
+# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
+PROTOCOL_NAMES = {'-1': 'all', '1': 'icmp', '6': 'tcp', '17': 'udp'}
+
+
+def list_ec2_vpc_nacls(connection, module):
+
+ nacl_ids = module.params.get("nacl_ids")
+ filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+
+ if nacl_ids is None:
+ nacl_ids = []
+
+ try:
+ nacls = connection.describe_network_acls(aws_retry=True, NetworkAclIds=nacl_ids, Filters=filters)
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'InvalidNetworkAclID.NotFound':
+ module.fail_json(msg='Unable to describe ACL. NetworkAcl does not exist')
+ module.fail_json_aws(e, msg="Unable to describe network ACLs {0}".format(nacl_ids))
+ except BotoCoreError as e:
+ module.fail_json_aws(e, msg="Unable to describe network ACLs {0}".format(nacl_ids))
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_nacls = []
+ for nacl in nacls['NetworkAcls']:
+ snaked_nacls.append(camel_dict_to_snake_dict(nacl))
+
+ # Turn the boto3 result in to ansible friendly tag dictionary
+ for nacl in snaked_nacls:
+ if 'tags' in nacl:
+ nacl['tags'] = boto3_tag_list_to_ansible_dict(nacl['tags'], 'key', 'value')
+ if 'entries' in nacl:
+ nacl['egress'] = [nacl_entry_to_list(entry) for entry in nacl['entries']
+ if entry['rule_number'] < 32767 and entry['egress']]
+ nacl['ingress'] = [nacl_entry_to_list(entry) for entry in nacl['entries']
+ if entry['rule_number'] < 32767 and not entry['egress']]
+ del nacl['entries']
+ if 'associations' in nacl:
+ nacl['subnets'] = [a['subnet_id'] for a in nacl['associations']]
+ del nacl['associations']
+ if 'network_acl_id' in nacl:
+ nacl['nacl_id'] = nacl['network_acl_id']
+ del nacl['network_acl_id']
+
+ module.exit_json(nacls=snaked_nacls)
+
+
+def nacl_entry_to_list(entry):
+
+ # entry list format
+ # [ rule_num, protocol name or number, allow or deny, ipv4/6 cidr, icmp type, icmp code, port from, port to]
+ elist = []
+
+ elist.append(entry['rule_number'])
+
+ if entry.get('protocol') in PROTOCOL_NAMES:
+ elist.append(PROTOCOL_NAMES[entry['protocol']])
+ else:
+ elist.append(entry.get('protocol'))
+
+ elist.append(entry['rule_action'])
+
+ if entry.get('cidr_block'):
+ elist.append(entry['cidr_block'])
+ elif entry.get('ipv6_cidr_block'):
+ elist.append(entry['ipv6_cidr_block'])
+ else:
+ elist.append(None)
+
+ elist = elist + [None, None, None, None]
+
+ if entry['protocol'] in ('1', '58'):
+ elist[4] = entry.get('icmp_type_code', {}).get('type')
+ elist[5] = entry.get('icmp_type_code', {}).get('code')
+
+ if entry['protocol'] not in ('1', '6', '17', '58'):
+ elist[6] = 0
+ elist[7] = 65535
+ elif 'port_range' in entry:
+ elist[6] = entry['port_range']['from']
+ elist[7] = entry['port_range']['to']
+
+ return elist
+
+
+def main():
+
+ argument_spec = dict(
+ nacl_ids=dict(default=[], type='list', aliases=['nacl_id'], elements='str'),
+ filters=dict(default={}, type='dict'))
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'ec2_vpc_nacl_facts':
+ module.deprecate("The 'ec2_vpc_nacl_facts' module has been renamed to 'ec2_vpc_nacl_info'", date='2021-12-01', collection_name='community.aws')
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ list_ec2_vpc_nacls(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_nat_gateway.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_nat_gateway.py
new file mode 100644
index 00000000..9072a8e3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_nat_gateway.py
@@ -0,0 +1,1004 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_nat_gateway
+version_added: 1.0.0
+short_description: Manage AWS VPC NAT Gateways.
+description:
+ - Ensure the state of AWS VPC NAT Gateways based on their id, allocation and subnet ids.
+requirements: [boto3, botocore]
+options:
+ state:
+ description:
+ - Ensure NAT Gateway is present or absent.
+ default: "present"
+ choices: ["present", "absent"]
+ type: str
+ nat_gateway_id:
+ description:
+ - The id AWS dynamically allocates to the NAT Gateway on creation.
+ This is required when the absent option is present.
+ type: str
+ subnet_id:
+ description:
+ - The id of the subnet to create the NAT Gateway in. This is required
+ with the present option.
+ type: str
+ allocation_id:
+ description:
+ - The id of the elastic IP allocation. If this is not passed and the
+ eip_address is not passed. An EIP is generated for this NAT Gateway.
+ type: str
+ eip_address:
+ description:
+ - The elastic IP address of the EIP you want attached to this NAT Gateway.
+ If this is not passed and the allocation_id is not passed,
+ an EIP is generated for this NAT Gateway.
+ type: str
+ if_exist_do_not_create:
+ description:
+ - if a NAT Gateway exists already in the subnet_id, then do not create a new one.
+ required: false
+ default: false
+ type: bool
+ release_eip:
+ description:
+ - Deallocate the EIP from the VPC.
+ - Option is only valid with the absent state.
+ - You should use this with the wait option. Since you can not release an address while a delete operation is happening.
+ default: false
+ type: bool
+ wait:
+ description:
+ - Wait for operation to complete before returning.
+ default: false
+ type: bool
+ wait_timeout:
+ description:
+ - How many seconds to wait for an operation to complete before timing out.
+ default: 320
+ type: int
+ client_token:
+ description:
+ - Optional unique token to be used during create to ensure idempotency.
+ When specifying this option, ensure you specify the eip_address parameter
+ as well otherwise any subsequent runs will fail.
+ type: str
+author:
+ - Allen Sanabria (@linuxdynasty)
+ - Jon Hadfield (@jonhadfield)
+ - Karen Cheng (@Etherdaemon)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create new nat gateway with client token.
+ community.aws.ec2_vpc_nat_gateway:
+ state: present
+ subnet_id: subnet-12345678
+ eip_address: 52.1.1.1
+ region: ap-southeast-2
+ client_token: abcd-12345678
+ register: new_nat_gateway
+
+- name: Create new nat gateway using an allocation-id.
+ community.aws.ec2_vpc_nat_gateway:
+ state: present
+ subnet_id: subnet-12345678
+ allocation_id: eipalloc-12345678
+ region: ap-southeast-2
+ register: new_nat_gateway
+
+- name: Create new nat gateway, using an EIP address and wait for available status.
+ community.aws.ec2_vpc_nat_gateway:
+ state: present
+ subnet_id: subnet-12345678
+ eip_address: 52.1.1.1
+ wait: true
+ region: ap-southeast-2
+ register: new_nat_gateway
+
+- name: Create new nat gateway and allocate new EIP.
+ community.aws.ec2_vpc_nat_gateway:
+ state: present
+ subnet_id: subnet-12345678
+ wait: true
+ region: ap-southeast-2
+ register: new_nat_gateway
+
+- name: Create new nat gateway and allocate new EIP if a nat gateway does not yet exist in the subnet.
+ community.aws.ec2_vpc_nat_gateway:
+ state: present
+ subnet_id: subnet-12345678
+ wait: true
+ region: ap-southeast-2
+ if_exist_do_not_create: true
+ register: new_nat_gateway
+
+- name: Delete nat gateway using discovered nat gateways from facts module.
+ community.aws.ec2_vpc_nat_gateway:
+ state: absent
+ region: ap-southeast-2
+ wait: true
+ nat_gateway_id: "{{ item.NatGatewayId }}"
+ release_eip: true
+ register: delete_nat_gateway_result
+ loop: "{{ gateways_to_remove.result }}"
+
+- name: Delete nat gateway and wait for deleted status.
+ community.aws.ec2_vpc_nat_gateway:
+ state: absent
+ nat_gateway_id: nat-12345678
+ wait: true
+ wait_timeout: 500
+ region: ap-southeast-2
+
+- name: Delete nat gateway and release EIP.
+ community.aws.ec2_vpc_nat_gateway:
+ state: absent
+ nat_gateway_id: nat-12345678
+ release_eip: true
+ wait: yes
+ wait_timeout: 300
+ region: ap-southeast-2
+'''
+
+RETURN = '''
+create_time:
+ description: The ISO 8601 date time format in UTC.
+ returned: In all cases.
+ type: str
+ sample: "2016-03-05T05:19:20.282000+00:00'"
+nat_gateway_id:
+ description: id of the VPC NAT Gateway
+ returned: In all cases.
+ type: str
+ sample: "nat-0d1e3a878585988f8"
+subnet_id:
+ description: id of the Subnet
+ returned: In all cases.
+ type: str
+ sample: "subnet-12345"
+state:
+ description: The current state of the NAT Gateway.
+ returned: In all cases.
+ type: str
+ sample: "available"
+vpc_id:
+ description: id of the VPC.
+ returned: In all cases.
+ type: str
+ sample: "vpc-12345"
+nat_gateway_addresses:
+ description: List of dictionaries containing the public_ip, network_interface_id, private_ip, and allocation_id.
+ returned: In all cases.
+ type: str
+ sample: [
+ {
+ 'public_ip': '52.52.52.52',
+ 'network_interface_id': 'eni-12345',
+ 'private_ip': '10.0.0.100',
+ 'allocation_id': 'eipalloc-12345'
+ }
+ ]
+'''
+
+import datetime
+import random
+import time
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+DRY_RUN_GATEWAYS = [
+ {
+ "nat_gateway_id": "nat-123456789",
+ "subnet_id": "subnet-123456789",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "55.55.55.55",
+ "network_interface_id": "eni-1234567",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-1234567"
+ }
+ ],
+ "state": "available",
+ "create_time": "2016-03-05T05:19:20.282000+00:00",
+ "vpc_id": "vpc-12345678"
+ }
+]
+
+DRY_RUN_ALLOCATION_UNCONVERTED = {
+ 'Addresses': [
+ {
+ 'PublicIp': '55.55.55.55',
+ 'Domain': 'vpc',
+ 'AllocationId': 'eipalloc-1234567'
+ }
+ ]
+}
+
+DRY_RUN_MSGS = 'DryRun Mode:'
+
+
+def get_nat_gateways(client, subnet_id=None, nat_gateway_id=None,
+ states=None, check_mode=False):
+ """Retrieve a list of NAT Gateways
+ Args:
+ client (botocore.client.EC2): Boto3 client
+
+ Kwargs:
+ subnet_id (str): The subnet_id the nat resides in.
+ nat_gateway_id (str): The Amazon nat id.
+ states (list): States available (pending, failed, available, deleting, and deleted)
+ default=None
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> subnet_id = 'subnet-12345678'
+ >>> get_nat_gateways(client, subnet_id)
+ [
+ true,
+ "",
+ {
+ "nat_gateway_id": "nat-123456789",
+ "subnet_id": "subnet-123456789",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "55.55.55.55",
+ "network_interface_id": "eni-1234567",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-1234567"
+ }
+ ],
+ "state": "deleted",
+ "create_time": "2016-03-05T00:33:21.209000+00:00",
+ "delete_time": "2016-03-05T00:36:37.329000+00:00",
+ "vpc_id": "vpc-12345678"
+ }
+
+ Returns:
+ Tuple (bool, str, list)
+ """
+ params = dict()
+ err_msg = ""
+ gateways_retrieved = False
+ existing_gateways = list()
+ if not states:
+ states = ['available', 'pending']
+ if nat_gateway_id:
+ params['NatGatewayIds'] = [nat_gateway_id]
+ else:
+ params['Filter'] = [
+ {
+ 'Name': 'subnet-id',
+ 'Values': [subnet_id]
+ },
+ {
+ 'Name': 'state',
+ 'Values': states
+ }
+ ]
+
+ try:
+ if not check_mode:
+ gateways = client.describe_nat_gateways(**params)['NatGateways']
+ if gateways:
+ for gw in gateways:
+ existing_gateways.append(camel_dict_to_snake_dict(gw))
+ gateways_retrieved = True
+ else:
+ gateways_retrieved = True
+ if nat_gateway_id:
+ if DRY_RUN_GATEWAYS[0]['nat_gateway_id'] == nat_gateway_id:
+ existing_gateways = DRY_RUN_GATEWAYS
+ elif subnet_id:
+ if DRY_RUN_GATEWAYS[0]['subnet_id'] == subnet_id:
+ existing_gateways = DRY_RUN_GATEWAYS
+ err_msg = '{0} Retrieving gateways'.format(DRY_RUN_MSGS)
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ return gateways_retrieved, err_msg, existing_gateways
+
+
+def wait_for_status(client, wait_timeout, nat_gateway_id, status,
+ check_mode=False):
+ """Wait for the NAT Gateway to reach a status
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ wait_timeout (int): Number of seconds to wait, until this timeout is reached.
+ nat_gateway_id (str): The Amazon nat id.
+ status (str): The status to wait for.
+ examples. status=available, status=deleted
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> subnet_id = 'subnet-12345678'
+ >>> allocation_id = 'eipalloc-12345678'
+ >>> wait_for_status(client, subnet_id, allocation_id)
+ [
+ true,
+ "",
+ {
+ "nat_gateway_id": "nat-123456789",
+ "subnet_id": "subnet-1234567",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "55.55.55.55",
+ "network_interface_id": "eni-1234567",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-12345678"
+ }
+ ],
+ "state": "deleted",
+ "create_time": "2016-03-05T00:33:21.209000+00:00",
+ "delete_time": "2016-03-05T00:36:37.329000+00:00",
+ "vpc_id": "vpc-12345677"
+ }
+ ]
+
+ Returns:
+ Tuple (bool, str, dict)
+ """
+ polling_increment_secs = 5
+ wait_timeout = time.time() + wait_timeout
+ status_achieved = False
+ nat_gateway = dict()
+ states = ['pending', 'failed', 'available', 'deleting', 'deleted']
+ err_msg = ""
+
+ while wait_timeout > time.time():
+ try:
+ gws_retrieved, err_msg, nat_gateways = (
+ get_nat_gateways(
+ client, nat_gateway_id=nat_gateway_id,
+ states=states, check_mode=check_mode
+ )
+ )
+ if gws_retrieved and nat_gateways:
+ nat_gateway = nat_gateways[0]
+ if check_mode:
+ nat_gateway['state'] = status
+
+ if nat_gateway.get('state') == status:
+ status_achieved = True
+ break
+
+ elif nat_gateway.get('state') == 'failed':
+ err_msg = nat_gateway.get('failure_message')
+ break
+
+ elif nat_gateway.get('state') == 'pending':
+ if 'failure_message' in nat_gateway:
+ err_msg = nat_gateway.get('failure_message')
+ status_achieved = False
+ break
+
+ else:
+ time.sleep(polling_increment_secs)
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ if not status_achieved:
+ err_msg = "Wait time out reached, while waiting for results"
+
+ return status_achieved, err_msg, nat_gateway
+
+
+def gateway_in_subnet_exists(client, subnet_id, allocation_id=None,
+ check_mode=False):
+ """Retrieve all NAT Gateways for a subnet.
+ Args:
+ subnet_id (str): The subnet_id the nat resides in.
+
+ Kwargs:
+ allocation_id (str): The EIP Amazon identifier.
+ default = None
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> subnet_id = 'subnet-1234567'
+ >>> allocation_id = 'eipalloc-1234567'
+ >>> gateway_in_subnet_exists(client, subnet_id, allocation_id)
+ (
+ [
+ {
+ "nat_gateway_id": "nat-123456789",
+ "subnet_id": "subnet-123456789",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "55.55.55.55",
+ "network_interface_id": "eni-1234567",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-1234567"
+ }
+ ],
+ "state": "deleted",
+ "create_time": "2016-03-05T00:33:21.209000+00:00",
+ "delete_time": "2016-03-05T00:36:37.329000+00:00",
+ "vpc_id": "vpc-1234567"
+ }
+ ],
+ False
+ )
+
+ Returns:
+ Tuple (list, bool)
+ """
+ allocation_id_exists = False
+ gateways = []
+ states = ['available', 'pending']
+ gws_retrieved, err_msg, gws = (
+ get_nat_gateways(
+ client, subnet_id, states=states, check_mode=check_mode
+ )
+ )
+ if not gws_retrieved:
+ return gateways, allocation_id_exists
+ for gw in gws:
+ for address in gw['nat_gateway_addresses']:
+ if allocation_id:
+ if address.get('allocation_id') == allocation_id:
+ allocation_id_exists = True
+ gateways.append(gw)
+ else:
+ gateways.append(gw)
+
+ return gateways, allocation_id_exists
+
+
+def get_eip_allocation_id_by_address(client, eip_address, check_mode=False):
+ """Release an EIP from your EIP Pool
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ eip_address (str): The Elastic IP Address of the EIP.
+
+ Kwargs:
+ check_mode (bool): if set to true, do not run anything and
+ falsify the results.
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> eip_address = '52.87.29.36'
+ >>> get_eip_allocation_id_by_address(client, eip_address)
+ 'eipalloc-36014da3'
+
+ Returns:
+ Tuple (str, str)
+ """
+ params = {
+ 'PublicIps': [eip_address],
+ }
+ allocation_id = None
+ err_msg = ""
+ try:
+ if not check_mode:
+ allocations = client.describe_addresses(**params)['Addresses']
+ if len(allocations) == 1:
+ allocation = allocations[0]
+ else:
+ allocation = None
+ else:
+ dry_run_eip = (
+ DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0]['PublicIp']
+ )
+ if dry_run_eip == eip_address:
+ allocation = DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0]
+ else:
+ allocation = None
+ if allocation:
+ if allocation.get('Domain') != 'vpc':
+ err_msg = (
+ "EIP {0} is a non-VPC EIP, please allocate a VPC scoped EIP"
+ .format(eip_address)
+ )
+ else:
+ allocation_id = allocation.get('AllocationId')
+ else:
+ err_msg = (
+ "EIP {0} does not exist".format(eip_address)
+ )
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ return allocation_id, err_msg
+
+
+def allocate_eip_address(client, check_mode=False):
+ """Release an EIP from your EIP Pool
+ Args:
+ client (botocore.client.EC2): Boto3 client
+
+ Kwargs:
+ check_mode (bool): if set to true, do not run anything and
+ falsify the results.
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> allocate_eip_address(client)
+ True
+
+ Returns:
+ Tuple (bool, str)
+ """
+ ip_allocated = False
+ new_eip = None
+ err_msg = ''
+ params = {
+ 'Domain': 'vpc',
+ }
+ try:
+ if check_mode:
+ ip_allocated = True
+ random_numbers = (
+ ''.join(str(x) for x in random.sample(range(0, 9), 7))
+ )
+ new_eip = 'eipalloc-{0}'.format(random_numbers)
+ else:
+ new_eip = client.allocate_address(**params)['AllocationId']
+ ip_allocated = True
+ err_msg = 'eipalloc id {0} created'.format(new_eip)
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ return ip_allocated, err_msg, new_eip
+
+
+def release_address(client, allocation_id, check_mode=False):
+ """Release an EIP from your EIP Pool
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ allocation_id (str): The eip Amazon identifier.
+
+ Kwargs:
+ check_mode (bool): if set to true, do not run anything and
+ falsify the results.
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> allocation_id = "eipalloc-123456"
+ >>> release_address(client, allocation_id)
+ True
+
+ Returns:
+ Boolean, string
+ """
+ err_msg = ''
+ if check_mode:
+ return True, ''
+
+ ip_released = False
+ try:
+ client.describe_addresses(AllocationIds=[allocation_id])
+ except botocore.exceptions.ClientError as e:
+ # IP address likely already released
+ # Happens with gateway in 'deleted' state that
+ # still lists associations
+ return True, str(e)
+ try:
+ client.release_address(AllocationId=allocation_id)
+ ip_released = True
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ return ip_released, err_msg
+
+
+def create(client, subnet_id, allocation_id, client_token=None,
+ wait=False, wait_timeout=0, if_exist_do_not_create=False,
+ check_mode=False):
+ """Create an Amazon NAT Gateway.
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ subnet_id (str): The subnet_id the nat resides in.
+ allocation_id (str): The eip Amazon identifier.
+
+ Kwargs:
+ if_exist_do_not_create (bool): if a nat gateway already exists in this
+ subnet, than do not create another one.
+ default = False
+ wait (bool): Wait for the nat to be in the deleted state before returning.
+ default = False
+ wait_timeout (int): Number of seconds to wait, until this timeout is reached.
+ default = 0
+ client_token (str):
+ default = None
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> subnet_id = 'subnet-1234567'
+ >>> allocation_id = 'eipalloc-1234567'
+ >>> create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500)
+ [
+ true,
+ "",
+ {
+ "nat_gateway_id": "nat-123456789",
+ "subnet_id": "subnet-1234567",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "55.55.55.55",
+ "network_interface_id": "eni-1234567",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-1234567"
+ }
+ ],
+ "state": "deleted",
+ "create_time": "2016-03-05T00:33:21.209000+00:00",
+ "delete_time": "2016-03-05T00:36:37.329000+00:00",
+ "vpc_id": "vpc-1234567"
+ }
+ ]
+
+ Returns:
+ Tuple (bool, str, list)
+ """
+ params = {
+ 'SubnetId': subnet_id,
+ 'AllocationId': allocation_id
+ }
+ request_time = datetime.datetime.utcnow()
+ changed = False
+ success = False
+ token_provided = False
+ err_msg = ""
+
+ if client_token:
+ token_provided = True
+ params['ClientToken'] = client_token
+
+ try:
+ if not check_mode:
+ result = camel_dict_to_snake_dict(client.create_nat_gateway(**params)["NatGateway"])
+ else:
+ result = DRY_RUN_GATEWAYS[0]
+ result['create_time'] = datetime.datetime.utcnow()
+ result['nat_gateway_addresses'][0]['allocation_id'] = allocation_id
+ result['subnet_id'] = subnet_id
+
+ success = True
+ changed = True
+ create_time = result['create_time'].replace(tzinfo=None)
+ if token_provided and (request_time > create_time):
+ changed = False
+ elif wait:
+ success, err_msg, result = (
+ wait_for_status(
+ client, wait_timeout, result['nat_gateway_id'], 'available',
+ check_mode=check_mode
+ )
+ )
+ if success:
+ err_msg = (
+ 'NAT gateway {0} created'.format(result['nat_gateway_id'])
+ )
+
+ except is_boto3_error_code('IdempotentParameterMismatch'):
+ err_msg = (
+ 'NAT Gateway does not support update and token has already been provided: ' + err_msg
+ )
+ success = False
+ changed = False
+ result = None
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ err_msg = to_native(e)
+ success = False
+ changed = False
+ result = None
+
+ return success, changed, err_msg, result
+
+
+def pre_create(client, subnet_id, allocation_id=None, eip_address=None,
+ if_exist_do_not_create=False, wait=False, wait_timeout=0,
+ client_token=None, check_mode=False):
+ """Create an Amazon NAT Gateway.
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ subnet_id (str): The subnet_id the nat resides in.
+
+ Kwargs:
+ allocation_id (str): The EIP Amazon identifier.
+ default = None
+ eip_address (str): The Elastic IP Address of the EIP.
+ default = None
+ if_exist_do_not_create (bool): if a nat gateway already exists in this
+ subnet, than do not create another one.
+ default = False
+ wait (bool): Wait for the nat to be in the deleted state before returning.
+ default = False
+ wait_timeout (int): Number of seconds to wait, until this timeout is reached.
+ default = 0
+ client_token (str):
+ default = None
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> subnet_id = 'subnet-w4t12897'
+ >>> allocation_id = 'eipalloc-36014da3'
+ >>> pre_create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500)
+ [
+ true,
+ "",
+ {
+ "nat_gateway_id": "nat-03835afb6e31df79b",
+ "subnet_id": "subnet-w4t12897",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "52.87.29.36",
+ "network_interface_id": "eni-5579742d",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-36014da3"
+ }
+ ],
+ "state": "deleted",
+ "create_time": "2016-03-05T00:33:21.209000+00:00",
+ "delete_time": "2016-03-05T00:36:37.329000+00:00",
+ "vpc_id": "vpc-w68571b5"
+ }
+ ]
+
+ Returns:
+ Tuple (bool, bool, str, list)
+ """
+ success = False
+ changed = False
+ err_msg = ""
+ results = list()
+
+ if not allocation_id and not eip_address:
+ existing_gateways, allocation_id_exists = (
+ gateway_in_subnet_exists(client, subnet_id, check_mode=check_mode)
+ )
+
+ if len(existing_gateways) > 0 and if_exist_do_not_create:
+ success = True
+ changed = False
+ results = existing_gateways[0]
+ err_msg = (
+ 'NAT Gateway {0} already exists in subnet_id {1}'
+ .format(
+ existing_gateways[0]['nat_gateway_id'], subnet_id
+ )
+ )
+ return success, changed, err_msg, results
+ else:
+ success, err_msg, allocation_id = (
+ allocate_eip_address(client, check_mode=check_mode)
+ )
+ if not success:
+ return success, 'False', err_msg, dict()
+
+ elif eip_address or allocation_id:
+ if eip_address and not allocation_id:
+ allocation_id, err_msg = (
+ get_eip_allocation_id_by_address(
+ client, eip_address, check_mode=check_mode
+ )
+ )
+ if not allocation_id:
+ success = False
+ changed = False
+ return success, changed, err_msg, dict()
+
+ existing_gateways, allocation_id_exists = (
+ gateway_in_subnet_exists(
+ client, subnet_id, allocation_id, check_mode=check_mode
+ )
+ )
+ if len(existing_gateways) > 0 and (allocation_id_exists or if_exist_do_not_create):
+ success = True
+ changed = False
+ results = existing_gateways[0]
+ err_msg = (
+ 'NAT Gateway {0} already exists in subnet_id {1}'
+ .format(
+ existing_gateways[0]['nat_gateway_id'], subnet_id
+ )
+ )
+ return success, changed, err_msg, results
+
+ success, changed, err_msg, results = create(
+ client, subnet_id, allocation_id, client_token,
+ wait, wait_timeout, if_exist_do_not_create, check_mode=check_mode
+ )
+
+ return success, changed, err_msg, results
+
+
+def remove(client, nat_gateway_id, wait=False, wait_timeout=0,
+ release_eip=False, check_mode=False):
+ """Delete an Amazon NAT Gateway.
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ nat_gateway_id (str): The Amazon nat id.
+
+ Kwargs:
+ wait (bool): Wait for the nat to be in the deleted state before returning.
+ wait_timeout (int): Number of seconds to wait, until this timeout is reached.
+ release_eip (bool): Once the nat has been deleted, you can deallocate the eip from the vpc.
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> nat_gw_id = 'nat-03835afb6e31df79b'
+ >>> remove(client, nat_gw_id, wait=True, wait_timeout=500, release_eip=True)
+ [
+ true,
+ "",
+ {
+ "nat_gateway_id": "nat-03835afb6e31df79b",
+ "subnet_id": "subnet-w4t12897",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "52.87.29.36",
+ "network_interface_id": "eni-5579742d",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-36014da3"
+ }
+ ],
+ "state": "deleted",
+ "create_time": "2016-03-05T00:33:21.209000+00:00",
+ "delete_time": "2016-03-05T00:36:37.329000+00:00",
+ "vpc_id": "vpc-w68571b5"
+ }
+ ]
+
+ Returns:
+ Tuple (bool, str, list)
+ """
+ params = {
+ 'NatGatewayId': nat_gateway_id
+ }
+ success = False
+ changed = False
+ err_msg = ""
+ results = list()
+ states = ['pending', 'available']
+ try:
+ exist, err_msg, gw = (
+ get_nat_gateways(
+ client, nat_gateway_id=nat_gateway_id,
+ states=states, check_mode=check_mode
+ )
+ )
+ if exist and len(gw) == 1:
+ results = gw[0]
+ if not check_mode:
+ client.delete_nat_gateway(**params)
+
+ allocation_id = (
+ results['nat_gateway_addresses'][0]['allocation_id']
+ )
+ changed = True
+ success = True
+ err_msg = (
+ 'NAT gateway {0} is in a deleting state. Delete was successful'
+ .format(nat_gateway_id)
+ )
+
+ if wait:
+ status_achieved, err_msg, results = (
+ wait_for_status(
+ client, wait_timeout, nat_gateway_id, 'deleted',
+ check_mode=check_mode
+ )
+ )
+ if status_achieved:
+ err_msg = (
+ 'NAT gateway {0} was deleted successfully'
+ .format(nat_gateway_id)
+ )
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ if release_eip:
+ eip_released, eip_err = (
+ release_address(client, allocation_id, check_mode)
+ )
+ if not eip_released:
+ err_msg = (
+ "{0}: Failed to release EIP {1}: {2}"
+ .format(err_msg, allocation_id, eip_err)
+ )
+ success = False
+
+ return success, changed, err_msg, results
+
+
+def main():
+ argument_spec = dict(
+ subnet_id=dict(type='str'),
+ eip_address=dict(type='str'),
+ allocation_id=dict(type='str'),
+ if_exist_do_not_create=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=320, required=False),
+ release_eip=dict(type='bool', default=False),
+ nat_gateway_id=dict(type='str'),
+ client_token=dict(type='str'),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['allocation_id', 'eip_address']
+ ],
+ required_if=[['state', 'absent', ['nat_gateway_id']],
+ ['state', 'present', ['subnet_id']]],
+ )
+
+ state = module.params.get('state').lower()
+ check_mode = module.check_mode
+ subnet_id = module.params.get('subnet_id')
+ allocation_id = module.params.get('allocation_id')
+ eip_address = module.params.get('eip_address')
+ nat_gateway_id = module.params.get('nat_gateway_id')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ release_eip = module.params.get('release_eip')
+ client_token = module.params.get('client_token')
+ if_exist_do_not_create = module.params.get('if_exist_do_not_create')
+
+ try:
+ client = module.client('ec2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ changed = False
+ err_msg = ''
+
+ if state == 'present':
+ success, changed, err_msg, results = (
+ pre_create(
+ client, subnet_id, allocation_id, eip_address,
+ if_exist_do_not_create, wait, wait_timeout,
+ client_token, check_mode=check_mode
+ )
+ )
+ else:
+ success, changed, err_msg, results = (
+ remove(
+ client, nat_gateway_id, wait, wait_timeout, release_eip,
+ check_mode=check_mode
+ )
+ )
+
+ if not success:
+ module.fail_json(
+ msg=err_msg, success=success, changed=changed
+ )
+ else:
+ module.exit_json(
+ msg=err_msg, success=success, changed=changed, **results
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_nat_gateway_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_nat_gateway_facts.py
new file mode 100644
index 00000000..97816c72
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_nat_gateway_facts.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: ec2_vpc_nat_gateway_info
+short_description: Retrieves AWS VPC Managed Nat Gateway details using AWS methods.
+version_added: 1.0.0
+description:
+ - Gets various details related to AWS VPC Managed Nat Gateways
+ - This module was called C(ec2_vpc_nat_gateway_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+options:
+ nat_gateway_ids:
+ description:
+ - List of specific nat gateway IDs to fetch details for.
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNatGateways.html)
+ for possible filters.
+ type: dict
+author: Karen Cheng (@Etherdaemon)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Simple example of listing all nat gateways
+- name: List all managed nat gateways in ap-southeast-2
+ community.aws.ec2_vpc_nat_gateway_info:
+ region: ap-southeast-2
+ register: all_ngws
+
+- name: Debugging the result
+ ansible.builtin.debug:
+ msg: "{{ all_ngws.result }}"
+
+- name: Get details on specific nat gateways
+ community.aws.ec2_vpc_nat_gateway_info:
+ nat_gateway_ids:
+ - nat-1234567891234567
+ - nat-7654321987654321
+ region: ap-southeast-2
+ register: specific_ngws
+
+- name: Get all nat gateways with specific filters
+ community.aws.ec2_vpc_nat_gateway_info:
+ region: ap-southeast-2
+ filters:
+ state: ['pending']
+ register: pending_ngws
+
+- name: Get nat gateways with specific filter
+ community.aws.ec2_vpc_nat_gateway_info:
+ region: ap-southeast-2
+ filters:
+ subnet-id: subnet-12345678
+ state: ['available']
+ register: existing_nat_gateways
+'''
+
+RETURN = r'''
+result:
+ description: The result of the describe, converted to ansible snake case style.
+ See http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.describe_nat_gateways for the response.
+ returned: success
+ type: list
+'''
+
+import json
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def date_handler(obj):
+ return obj.isoformat() if hasattr(obj, 'isoformat') else obj
+
+
+def get_nat_gateways(client, module, nat_gateway_id=None):
+ params = dict()
+ nat_gateways = list()
+
+ params['Filter'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ params['NatGatewayIds'] = module.params.get('nat_gateway_ids')
+
+ try:
+ result = json.loads(json.dumps(client.describe_nat_gateways(**params), default=date_handler))
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ for gateway in result['NatGateways']:
+ # Turn the boto3 result into ansible_friendly_snaked_names
+ converted_gateway = camel_dict_to_snake_dict(gateway)
+ if 'tags' in converted_gateway:
+ # Turn the boto3 result into ansible friendly tag dictionary
+ converted_gateway['tags'] = boto3_tag_list_to_ansible_dict(converted_gateway['tags'])
+
+ nat_gateways.append(converted_gateway)
+
+ return nat_gateways
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(default={}, type='dict'),
+ nat_gateway_ids=dict(default=[], type='list', elements='str'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True,)
+ if module._name == 'ec2_vpc_nat_gateway_facts':
+ module.deprecate("The 'ec2_vpc_nat_gateway_facts' module has been renamed to 'ec2_vpc_nat_gateway_info'",
+ date='2021-12-01', collection_name='community.aws')
+
+ try:
+ connection = module.client('ec2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ results = get_nat_gateways(connection, module)
+
+ module.exit_json(result=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_nat_gateway_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_nat_gateway_info.py
new file mode 100644
index 00000000..97816c72
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_nat_gateway_info.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: ec2_vpc_nat_gateway_info
+short_description: Retrieves AWS VPC Managed Nat Gateway details using AWS methods.
+version_added: 1.0.0
+description:
+ - Gets various details related to AWS VPC Managed Nat Gateways
+ - This module was called C(ec2_vpc_nat_gateway_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+options:
+ nat_gateway_ids:
+ description:
+ - List of specific nat gateway IDs to fetch details for.
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNatGateways.html)
+ for possible filters.
+ type: dict
+author: Karen Cheng (@Etherdaemon)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Simple example of listing all nat gateways
+- name: List all managed nat gateways in ap-southeast-2
+ community.aws.ec2_vpc_nat_gateway_info:
+ region: ap-southeast-2
+ register: all_ngws
+
+- name: Debugging the result
+ ansible.builtin.debug:
+ msg: "{{ all_ngws.result }}"
+
+- name: Get details on specific nat gateways
+ community.aws.ec2_vpc_nat_gateway_info:
+ nat_gateway_ids:
+ - nat-1234567891234567
+ - nat-7654321987654321
+ region: ap-southeast-2
+ register: specific_ngws
+
+- name: Get all nat gateways with specific filters
+ community.aws.ec2_vpc_nat_gateway_info:
+ region: ap-southeast-2
+ filters:
+ state: ['pending']
+ register: pending_ngws
+
+- name: Get nat gateways with specific filter
+ community.aws.ec2_vpc_nat_gateway_info:
+ region: ap-southeast-2
+ filters:
+ subnet-id: subnet-12345678
+ state: ['available']
+ register: existing_nat_gateways
+'''
+
+RETURN = r'''
+result:
+ description: The result of the describe, converted to ansible snake case style.
+ See http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.describe_nat_gateways for the response.
+ returned: success
+ type: list
+'''
+
+import json
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def date_handler(obj):
+ return obj.isoformat() if hasattr(obj, 'isoformat') else obj
+
+
+def get_nat_gateways(client, module, nat_gateway_id=None):
+ params = dict()
+ nat_gateways = list()
+
+ params['Filter'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ params['NatGatewayIds'] = module.params.get('nat_gateway_ids')
+
+ try:
+ result = json.loads(json.dumps(client.describe_nat_gateways(**params), default=date_handler))
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ for gateway in result['NatGateways']:
+ # Turn the boto3 result into ansible_friendly_snaked_names
+ converted_gateway = camel_dict_to_snake_dict(gateway)
+ if 'tags' in converted_gateway:
+ # Turn the boto3 result into ansible friendly tag dictionary
+ converted_gateway['tags'] = boto3_tag_list_to_ansible_dict(converted_gateway['tags'])
+
+ nat_gateways.append(converted_gateway)
+
+ return nat_gateways
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(default={}, type='dict'),
+ nat_gateway_ids=dict(default=[], type='list', elements='str'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True,)
+ if module._name == 'ec2_vpc_nat_gateway_facts':
+ module.deprecate("The 'ec2_vpc_nat_gateway_facts' module has been renamed to 'ec2_vpc_nat_gateway_info'",
+ date='2021-12-01', collection_name='community.aws')
+
+ try:
+ connection = module.client('ec2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ results = get_nat_gateways(connection, module)
+
+ module.exit_json(result=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_peer.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_peer.py
new file mode 100644
index 00000000..31f6ea20
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_peer.py
@@ -0,0 +1,435 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: ec2_vpc_peer
+short_description: create, delete, accept, and reject VPC peering connections between two VPCs.
+version_added: 1.0.0
+description:
+ - Read the AWS documentation for VPC Peering Connections
+ U(https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-peering.html).
+options:
+ vpc_id:
+ description:
+ - VPC id of the requesting VPC.
+ required: false
+ type: str
+ peering_id:
+ description:
+ - Peering connection id.
+ required: false
+ type: str
+ peer_region:
+ description:
+ - Region of the accepting VPC.
+ required: false
+ type: str
+ peer_vpc_id:
+ description:
+ - VPC id of the accepting VPC.
+ required: false
+ type: str
+ peer_owner_id:
+ description:
+ - The AWS account number for cross account peering.
+ required: false
+ type: str
+ tags:
+ description:
+ - Dictionary of tags to look for and apply when creating a Peering Connection.
+ required: false
+ type: dict
+ state:
+ description:
+ - Create, delete, accept, reject a peering connection.
+ required: false
+ default: present
+ choices: ['present', 'absent', 'accept', 'reject']
+ type: str
+author: Mike Mochan (@mmochan)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements: [ botocore, boto3, json ]
+'''
+
+EXAMPLES = '''
+# Complete example to create and accept a local peering connection.
+- name: Create local account VPC peering Connection
+ community.aws.ec2_vpc_peer:
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-87654321
+ state: present
+ tags:
+ Name: Peering connection for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: Accept local VPC peering request
+ community.aws.ec2_vpc_peer:
+ region: ap-southeast-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ state: accept
+ register: action_peer
+
+# Complete example to delete a local peering connection.
+- name: Create local account VPC peering Connection
+ community.aws.ec2_vpc_peer:
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-87654321
+ state: present
+ tags:
+ Name: Peering connection for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: delete a local VPC peering Connection
+ community.aws.ec2_vpc_peer:
+ region: ap-southeast-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ state: absent
+ register: vpc_peer
+
+ # Complete example to create and accept a cross account peering connection.
+- name: Create cross account VPC peering Connection
+ community.aws.ec2_vpc_peer:
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-12345678
+ peer_owner_id: 123456789102
+ state: present
+ tags:
+ Name: Peering connection for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: Accept peering connection from remote account
+ community.aws.ec2_vpc_peer:
+ region: ap-southeast-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ profile: bot03_profile_for_cross_account
+ state: accept
+ register: vpc_peer
+
+# Complete example to create and accept an intra-region peering connection.
+- name: Create intra-region VPC peering Connection
+ community.aws.ec2_vpc_peer:
+ region: us-east-1
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-87654321
+ peer_region: us-west-2
+ state: present
+ tags:
+ Name: Peering connection for us-east-1 VPC to us-west-2 VPC
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: Accept peering connection from peer region
+ community.aws.ec2_vpc_peer:
+ region: us-west-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ state: accept
+ register: vpc_peer
+
+# Complete example to create and reject a local peering connection.
+- name: Create local account VPC peering Connection
+ community.aws.ec2_vpc_peer:
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-87654321
+ state: present
+ tags:
+ Name: Peering connection for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: Reject a local VPC peering Connection
+ community.aws.ec2_vpc_peer:
+ region: ap-southeast-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ state: reject
+
+# Complete example to create and accept a cross account peering connection.
+- name: Create cross account VPC peering Connection
+ community.aws.ec2_vpc_peer:
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-12345678
+ peer_owner_id: 123456789102
+ state: present
+ tags:
+ Name: Peering connection for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: Accept a cross account VPC peering connection request
+ community.aws.ec2_vpc_peer:
+ region: ap-southeast-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ profile: bot03_profile_for_cross_account
+ state: accept
+ tags:
+ Name: Peering connection for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+
+# Complete example to create and reject a cross account peering connection.
+- name: Create cross account VPC peering Connection
+ community.aws.ec2_vpc_peer:
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-12345678
+ peer_owner_id: 123456789102
+ state: present
+ tags:
+ Name: Peering connection for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: Reject a cross account VPC peering Connection
+ community.aws.ec2_vpc_peer:
+ region: ap-southeast-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ profile: bot03_profile_for_cross_account
+ state: reject
+
+'''
+RETURN = '''
+task:
+ description: The result of the create, accept, reject or delete action.
+ returned: success
+ type: dict
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+import distutils.version
+import traceback
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+
+
+def tags_changed(pcx_id, client, module):
+ changed = False
+ tags = dict()
+ if module.params.get('tags'):
+ tags = module.params.get('tags')
+ pcx = find_pcx_by_id(pcx_id, client, module)
+ if pcx['VpcPeeringConnections']:
+ pcx_values = [t.values() for t in pcx['VpcPeeringConnections'][0]['Tags']]
+ pcx_tags = [item for sublist in pcx_values for item in sublist]
+ tag_values = [[key, str(value)] for key, value in tags.items()]
+ tags = [item for sublist in tag_values for item in sublist]
+ if sorted(pcx_tags) == sorted(tags):
+ changed = False
+ elif tags:
+ delete_tags(pcx_id, client, module)
+ create_tags(pcx_id, client, module)
+ changed = True
+ return changed
+
+
+def describe_peering_connections(params, client):
+ result = client.describe_vpc_peering_connections(
+ Filters=[
+ {'Name': 'requester-vpc-info.vpc-id', 'Values': [params['VpcId']]},
+ {'Name': 'accepter-vpc-info.vpc-id', 'Values': [params['PeerVpcId']]}
+ ]
+ )
+ if result['VpcPeeringConnections'] == []:
+ result = client.describe_vpc_peering_connections(
+ Filters=[
+ {'Name': 'requester-vpc-info.vpc-id', 'Values': [params['PeerVpcId']]},
+ {'Name': 'accepter-vpc-info.vpc-id', 'Values': [params['VpcId']]}
+ ]
+ )
+ return result
+
+
+def is_active(peering_conn):
+ return peering_conn['Status']['Code'] == 'active'
+
+
+def is_pending(peering_conn):
+ return peering_conn['Status']['Code'] == 'pending-acceptance'
+
+
+def create_peer_connection(client, module):
+ changed = False
+ params = dict()
+ params['VpcId'] = module.params.get('vpc_id')
+ params['PeerVpcId'] = module.params.get('peer_vpc_id')
+ if module.params.get('peer_region'):
+ if distutils.version.StrictVersion(botocore.__version__) < distutils.version.StrictVersion('1.8.6'):
+ module.fail_json(msg="specifying peer_region parameter requires botocore >= 1.8.6")
+ params['PeerRegion'] = module.params.get('peer_region')
+ if module.params.get('peer_owner_id'):
+ params['PeerOwnerId'] = str(module.params.get('peer_owner_id'))
+ peering_conns = describe_peering_connections(params, client)
+ for peering_conn in peering_conns['VpcPeeringConnections']:
+ pcx_id = peering_conn['VpcPeeringConnectionId']
+ if tags_changed(pcx_id, client, module):
+ changed = True
+ if is_active(peering_conn):
+ return (changed, peering_conn['VpcPeeringConnectionId'])
+ if is_pending(peering_conn):
+ return (changed, peering_conn['VpcPeeringConnectionId'])
+ try:
+ peering_conn = client.create_vpc_peering_connection(**params)
+ pcx_id = peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId']
+ if module.params.get('tags'):
+ create_tags(pcx_id, client, module)
+ changed = True
+ return (changed, peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId'])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def remove_peer_connection(client, module):
+ pcx_id = module.params.get('peering_id')
+ if not pcx_id:
+ params = dict()
+ params['VpcId'] = module.params.get('vpc_id')
+ params['PeerVpcId'] = module.params.get('peer_vpc_id')
+ params['PeerRegion'] = module.params.get('peer_region')
+ if module.params.get('peer_owner_id'):
+ params['PeerOwnerId'] = str(module.params.get('peer_owner_id'))
+ peering_conns = describe_peering_connections(params, client)
+ if not peering_conns:
+ module.exit_json(changed=False)
+ else:
+ pcx_id = peering_conns['VpcPeeringConnections'][0]['VpcPeeringConnectionId']
+
+ try:
+ params = dict()
+ params['VpcPeeringConnectionId'] = pcx_id
+ client.delete_vpc_peering_connection(**params)
+ module.exit_json(changed=True)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def peer_status(client, module):
+ params = dict()
+ params['VpcPeeringConnectionIds'] = [module.params.get('peering_id')]
+ try:
+ vpc_peering_connection = client.describe_vpc_peering_connections(**params)
+ return vpc_peering_connection['VpcPeeringConnections'][0]['Status']['Code']
+ except is_boto3_error_code('InvalidVpcPeeringConnectionId.Malformed') as e: # pylint: disable=duplicate-except
+ module.fail_json(msg='Malformed connection ID: {0}'.format(e), traceback=traceback.format_exc())
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json(msg='Error while describing peering connection by peering_id: {0}'.format(e), traceback=traceback.format_exc())
+
+
+def accept_reject(state, client, module):
+ changed = False
+ params = dict()
+ params['VpcPeeringConnectionId'] = module.params.get('peering_id')
+ if peer_status(client, module) != 'active':
+ try:
+ if state == 'accept':
+ client.accept_vpc_peering_connection(**params)
+ else:
+ client.reject_vpc_peering_connection(**params)
+ if module.params.get('tags'):
+ create_tags(params['VpcPeeringConnectionId'], client, module)
+ changed = True
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+ if tags_changed(params['VpcPeeringConnectionId'], client, module):
+ changed = True
+ return changed, params['VpcPeeringConnectionId']
+
+
+def load_tags(module):
+ tags = []
+ if module.params.get('tags'):
+ for name, value in module.params.get('tags').items():
+ tags.append({'Key': name, 'Value': str(value)})
+ return tags
+
+
+def create_tags(pcx_id, client, module):
+ try:
+ delete_tags(pcx_id, client, module)
+ client.create_tags(Resources=[pcx_id], Tags=load_tags(module))
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def delete_tags(pcx_id, client, module):
+ try:
+ client.delete_tags(Resources=[pcx_id])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def find_pcx_by_id(pcx_id, client, module):
+ try:
+ return client.describe_vpc_peering_connections(VpcPeeringConnectionIds=[pcx_id])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def main():
+ argument_spec = dict(
+ vpc_id=dict(),
+ peer_vpc_id=dict(),
+ peer_region=dict(),
+ peering_id=dict(),
+ peer_owner_id=dict(),
+ tags=dict(required=False, type='dict'),
+ state=dict(default='present', choices=['present', 'absent', 'accept', 'reject']),
+ )
+ required_if = [
+ ('state', 'present', ['vpc_id', 'peer_vpc_id']),
+ ('state', 'accept', ['peering_id']),
+ ('state', 'reject', ['peering_id'])
+ ]
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if)
+
+ state = module.params.get('state')
+ peering_id = module.params.get('peering_id')
+ vpc_id = module.params.get('vpc_id')
+ peer_vpc_id = module.params.get('peer_vpc_id')
+
+ try:
+ client = module.client('ec2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ if state == 'present':
+ (changed, results) = create_peer_connection(client, module)
+ module.exit_json(changed=changed, peering_id=results)
+ elif state == 'absent':
+ if not peering_id and (not vpc_id or not peer_vpc_id):
+ module.fail_json(msg='state is absent but one of the following is missing: peering_id or [vpc_id, peer_vpc_id]')
+
+ remove_peer_connection(client, module)
+ else:
+ (changed, results) = accept_reject(state, client, module)
+ module.exit_json(changed=changed, peering_id=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_peering_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_peering_facts.py
new file mode 100644
index 00000000..117992e7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_peering_facts.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: ec2_vpc_peering_info
+short_description: Retrieves AWS VPC Peering details using AWS methods.
+version_added: 1.0.0
+description:
+ - Gets various details related to AWS VPC Peers
+ - This module was called C(ec2_vpc_peering_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+options:
+ peer_connection_ids:
+ description:
+ - List of specific VPC peer IDs to get details for.
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcPeeringConnections.html)
+ for possible filters.
+ type: dict
+author: Karen Cheng (@Etherdaemon)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Simple example of listing all VPC Peers
+- name: List all vpc peers
+ community.aws.ec2_vpc_peering_info:
+ region: ap-southeast-2
+ register: all_vpc_peers
+
+- name: Debugging the result
+ ansible.builtin.debug:
+ msg: "{{ all_vpc_peers.result }}"
+
+- name: Get details on specific VPC peer
+ community.aws.ec2_vpc_peering_info:
+ peer_connection_ids:
+ - pcx-12345678
+ - pcx-87654321
+ region: ap-southeast-2
+ register: all_vpc_peers
+
+- name: Get all vpc peers with specific filters
+ community.aws.ec2_vpc_peering_info:
+ region: ap-southeast-2
+ filters:
+ status-code: ['pending-acceptance']
+ register: pending_vpc_peers
+'''
+
+RETURN = r'''
+result:
+ description: The result of the describe.
+ returned: success
+ type: list
+'''
+
+import json
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def date_handler(obj):
+ return obj.isoformat() if hasattr(obj, 'isoformat') else obj
+
+
+def get_vpc_peers(client, module):
+ params = dict()
+ params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ if module.params.get('peer_connection_ids'):
+ params['VpcPeeringConnectionIds'] = module.params.get('peer_connection_ids')
+ try:
+ result = json.loads(json.dumps(client.describe_vpc_peering_connections(**params), default=date_handler))
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ return result['VpcPeeringConnections']
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(default=dict(), type='dict'),
+ peer_connection_ids=dict(default=None, type='list', elements='str'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True,)
+ if module._name == 'ec2_vpc_peering_facts':
+ module.deprecate("The 'ec2_vpc_peering_facts' module has been renamed to 'ec2_vpc_peering_info'", date='2021-12-01', collection_name='community.aws')
+
+ try:
+ ec2 = module.client('ec2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ # Turn the boto3 result in to ansible friendly_snaked_names
+ results = [camel_dict_to_snake_dict(peer) for peer in get_vpc_peers(ec2, module)]
+
+ # Turn the boto3 result in to ansible friendly tag dictionary
+ for peer in results:
+ peer['tags'] = boto3_tag_list_to_ansible_dict(peer.get('tags', []))
+
+ module.exit_json(result=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_peering_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_peering_info.py
new file mode 100644
index 00000000..117992e7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_peering_info.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: ec2_vpc_peering_info
+short_description: Retrieves AWS VPC Peering details using AWS methods.
+version_added: 1.0.0
+description:
+ - Gets various details related to AWS VPC Peers
+ - This module was called C(ec2_vpc_peering_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+options:
+ peer_connection_ids:
+ description:
+ - List of specific VPC peer IDs to get details for.
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcPeeringConnections.html)
+ for possible filters.
+ type: dict
+author: Karen Cheng (@Etherdaemon)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Simple example of listing all VPC Peers
+- name: List all vpc peers
+ community.aws.ec2_vpc_peering_info:
+ region: ap-southeast-2
+ register: all_vpc_peers
+
+- name: Debugging the result
+ ansible.builtin.debug:
+ msg: "{{ all_vpc_peers.result }}"
+
+- name: Get details on specific VPC peer
+ community.aws.ec2_vpc_peering_info:
+ peer_connection_ids:
+ - pcx-12345678
+ - pcx-87654321
+ region: ap-southeast-2
+ register: all_vpc_peers
+
+- name: Get all vpc peers with specific filters
+ community.aws.ec2_vpc_peering_info:
+ region: ap-southeast-2
+ filters:
+ status-code: ['pending-acceptance']
+ register: pending_vpc_peers
+'''
+
+RETURN = r'''
+result:
+ description: The result of the describe.
+ returned: success
+ type: list
+'''
+
+import json
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def date_handler(obj):
+ return obj.isoformat() if hasattr(obj, 'isoformat') else obj
+
+
+def get_vpc_peers(client, module):
+ params = dict()
+ params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ if module.params.get('peer_connection_ids'):
+ params['VpcPeeringConnectionIds'] = module.params.get('peer_connection_ids')
+ try:
+ result = json.loads(json.dumps(client.describe_vpc_peering_connections(**params), default=date_handler))
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ return result['VpcPeeringConnections']
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(default=dict(), type='dict'),
+ peer_connection_ids=dict(default=None, type='list', elements='str'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True,)
+ if module._name == 'ec2_vpc_peering_facts':
+ module.deprecate("The 'ec2_vpc_peering_facts' module has been renamed to 'ec2_vpc_peering_info'", date='2021-12-01', collection_name='community.aws')
+
+ try:
+ ec2 = module.client('ec2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ # Turn the boto3 result in to ansible friendly_snaked_names
+ results = [camel_dict_to_snake_dict(peer) for peer in get_vpc_peers(ec2, module)]
+
+ # Turn the boto3 result in to ansible friendly tag dictionary
+ for peer in results:
+ peer['tags'] = boto3_tag_list_to_ansible_dict(peer.get('tags', []))
+
+ module.exit_json(result=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_route_table.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_route_table.py
new file mode 100644
index 00000000..ca5d586b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_route_table.py
@@ -0,0 +1,744 @@
+#!/usr/bin/python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_vpc_route_table
+version_added: 1.0.0
+short_description: Manage route tables for AWS virtual private clouds
+description:
+ - Manage route tables for AWS virtual private clouds
+author:
+- Robert Estelle (@erydo)
+- Rob White (@wimnat)
+- Will Thames (@willthames)
+options:
+ lookup:
+ description: Look up route table by either tags or by route table ID. Non-unique tag lookup will fail.
+ If no tags are specified then no lookup for an existing route table is performed and a new
+ route table will be created. To change tags of a route table you must look up by id.
+ default: tag
+ choices: [ 'tag', 'id' ]
+ type: str
+ propagating_vgw_ids:
+ description: Enable route propagation from virtual gateways specified by ID.
+ type: list
+ elements: str
+ purge_routes:
+ description: Purge existing routes that are not found in routes.
+ type: bool
+ default: 'yes'
+ purge_subnets:
+ description: Purge existing subnets that are not found in subnets. Ignored unless the subnets option is supplied.
+ default: 'true'
+ type: bool
+ purge_tags:
+ description: Purge existing tags that are not found in route table.
+ type: bool
+ default: 'no'
+ route_table_id:
+ description:
+ - The ID of the route table to update or delete.
+ - Required when I(lookup=id).
+ type: str
+ routes:
+ description: List of routes in the route table.
+ Routes are specified as dicts containing the keys 'dest' and one of 'gateway_id',
+ 'instance_id', 'network_interface_id', or 'vpc_peering_connection_id'.
+ If 'gateway_id' is specified, you can refer to the VPC's IGW by using the value 'igw'.
+ Routes are required for present states.
+ type: list
+ elements: dict
+ state:
+ description: Create or destroy the VPC route table.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ subnets:
+ description: An array of subnets to add to this route table. Subnets may be specified
+ by either subnet ID, Name tag, or by a CIDR such as '10.0.0.0/24'.
+ type: list
+ elements: str
+ tags:
+ description: >
+ A dictionary of resource tags of the form: C({ tag1: value1, tag2: value2 }). Tags are
+ used to uniquely identify route tables within a VPC when the route_table_id is not supplied.
+ aliases: [ "resource_tags" ]
+ type: dict
+ vpc_id:
+ description:
+ - VPC ID of the VPC in which to create the route table.
+ - Required when I(state=present) or I(lookup=tag).
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Basic creation example:
+- name: Set up public subnet route table
+ community.aws.ec2_vpc_route_table:
+ vpc_id: vpc-1245678
+ region: us-west-1
+ tags:
+ Name: Public
+ subnets:
+ - "{{ jumpbox_subnet.subnet.id }}"
+ - "{{ frontend_subnet.subnet.id }}"
+ - "{{ vpn_subnet.subnet_id }}"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: "{{ igw.gateway_id }}"
+ register: public_route_table
+
+- name: Set up NAT-protected route table
+ community.aws.ec2_vpc_route_table:
+ vpc_id: vpc-1245678
+ region: us-west-1
+ tags:
+ Name: Internal
+ subnets:
+ - "{{ application_subnet.subnet.id }}"
+ - 'Database Subnet'
+ - '10.0.0.0/8'
+ routes:
+ - dest: 0.0.0.0/0
+ instance_id: "{{ nat.instance_id }}"
+ register: nat_route_table
+
+- name: delete route table
+ community.aws.ec2_vpc_route_table:
+ vpc_id: vpc-1245678
+ region: us-west-1
+ route_table_id: "{{ route_table.id }}"
+ lookup: id
+ state: absent
+'''
+
+RETURN = r'''
+route_table:
+ description: Route Table result
+ returned: always
+ type: complex
+ contains:
+ associations:
+ description: List of subnets associated with the route table
+ returned: always
+ type: complex
+ contains:
+ main:
+ description: Whether this is the main route table
+ returned: always
+ type: bool
+ sample: false
+ route_table_association_id:
+ description: ID of association between route table and subnet
+ returned: always
+ type: str
+ sample: rtbassoc-ab47cfc3
+ route_table_id:
+ description: ID of the route table
+ returned: always
+ type: str
+ sample: rtb-bf779ed7
+ subnet_id:
+ description: ID of the subnet
+ returned: always
+ type: str
+ sample: subnet-82055af9
+ id:
+ description: ID of the route table (same as route_table_id for backwards compatibility)
+ returned: always
+ type: str
+ sample: rtb-bf779ed7
+ propagating_vgws:
+ description: List of Virtual Private Gateways propagating routes
+ returned: always
+ type: list
+ sample: []
+ route_table_id:
+ description: ID of the route table
+ returned: always
+ type: str
+ sample: rtb-bf779ed7
+ routes:
+ description: List of routes in the route table
+ returned: always
+ type: complex
+ contains:
+ destination_cidr_block:
+ description: CIDR block of destination
+ returned: always
+ type: str
+ sample: 10.228.228.0/22
+ gateway_id:
+ description: ID of the gateway
+ returned: when gateway is local or internet gateway
+ type: str
+ sample: local
+ instance_id:
+ description: ID of a NAT instance
+ returned: when the route is via an EC2 instance
+ type: str
+ sample: i-abcd123456789
+ instance_owner_id:
+ description: AWS account owning the NAT instance
+ returned: when the route is via an EC2 instance
+ type: str
+ sample: 123456789012
+ nat_gateway_id:
+ description: ID of the NAT gateway
+ returned: when the route is via a NAT gateway
+ type: str
+ sample: local
+ origin:
+ description: mechanism through which the route is in the table
+ returned: always
+ type: str
+ sample: CreateRouteTable
+ state:
+ description: state of the route
+ returned: always
+ type: str
+ sample: active
+ tags:
+ description: Tags applied to the route table
+ returned: always
+ type: dict
+ sample:
+ Name: Public route table
+ Public: 'true'
+ vpc_id:
+ description: ID for the VPC in which the route lives
+ returned: always
+ type: str
+ sample: vpc-6e2d2407
+'''
+
+import re
+from time import sleep
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags, AWSRetry
+
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+CIDR_RE = re.compile(r'^(\d{1,3}\.){3}\d{1,3}/\d{1,2}$')
+SUBNET_RE = re.compile(r'^subnet-[A-z0-9]+$')
+ROUTE_TABLE_RE = re.compile(r'^rtb-[A-z0-9]+$')
+
+
+@AWSRetry.exponential_backoff()
+def describe_subnets_with_backoff(connection, **params):
+ return connection.describe_subnets(**params)['Subnets']
+
+
+def find_subnets(connection, module, vpc_id, identified_subnets):
+ """
+ Finds a list of subnets, each identified either by a raw ID, a unique
+ 'Name' tag, or a CIDR such as 10.0.0.0/8.
+
+ Note that this function is duplicated in other ec2 modules, and should
+ potentially be moved into a shared module_utils
+ """
+ subnet_ids = []
+ subnet_names = []
+ subnet_cidrs = []
+ for subnet in (identified_subnets or []):
+ if re.match(SUBNET_RE, subnet):
+ subnet_ids.append(subnet)
+ elif re.match(CIDR_RE, subnet):
+ subnet_cidrs.append(subnet)
+ else:
+ subnet_names.append(subnet)
+
+ subnets_by_id = []
+ if subnet_ids:
+ filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id})
+ try:
+ subnets_by_id = describe_subnets_with_backoff(connection, SubnetIds=subnet_ids, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't find subnet with id %s" % subnet_ids)
+
+ subnets_by_cidr = []
+ if subnet_cidrs:
+ filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr': subnet_cidrs})
+ try:
+ subnets_by_cidr = describe_subnets_with_backoff(connection, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't find subnet with cidr %s" % subnet_cidrs)
+
+ subnets_by_name = []
+ if subnet_names:
+ filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'tag:Name': subnet_names})
+ try:
+ subnets_by_name = describe_subnets_with_backoff(connection, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't find subnet with names %s" % subnet_names)
+
+ for name in subnet_names:
+ matching_count = len([1 for s in subnets_by_name for t in s.get('Tags', []) if t['Key'] == 'Name' and t['Value'] == name])
+ if matching_count == 0:
+ module.fail_json(msg='Subnet named "{0}" does not exist'.format(name))
+ elif matching_count > 1:
+ module.fail_json(msg='Multiple subnets named "{0}"'.format(name))
+
+ return subnets_by_id + subnets_by_cidr + subnets_by_name
+
+
+def find_igw(connection, module, vpc_id):
+ """
+ Finds the Internet gateway for the given VPC ID.
+ """
+ filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id})
+ try:
+ igw = connection.describe_internet_gateways(Filters=filters)['InternetGateways']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='No IGW found for VPC {0}'.format(vpc_id))
+ if len(igw) == 1:
+ return igw[0]['InternetGatewayId']
+ elif len(igw) == 0:
+ module.fail_json(msg='No IGWs found for VPC {0}'.format(vpc_id))
+ else:
+ module.fail_json(msg='Multiple IGWs found for VPC {0}'.format(vpc_id))
+
+
+@AWSRetry.exponential_backoff()
+def describe_tags_with_backoff(connection, resource_id):
+ filters = ansible_dict_to_boto3_filter_list({'resource-id': resource_id})
+ paginator = connection.get_paginator('describe_tags')
+ tags = paginator.paginate(Filters=filters).build_full_result()['Tags']
+ return boto3_tag_list_to_ansible_dict(tags)
+
+
+def tags_match(match_tags, candidate_tags):
+ return all((k in candidate_tags and candidate_tags[k] == v
+ for k, v in match_tags.items()))
+
+
+def ensure_tags(connection=None, module=None, resource_id=None, tags=None, purge_tags=None, check_mode=None):
+ try:
+ cur_tags = describe_tags_with_backoff(connection, resource_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Unable to list tags for VPC')
+
+ to_add, to_delete = compare_aws_tags(cur_tags, tags, purge_tags)
+
+ if not to_add and not to_delete:
+ return {'changed': False, 'tags': cur_tags}
+ if check_mode:
+ if not purge_tags:
+ tags = cur_tags.update(tags)
+ return {'changed': True, 'tags': tags}
+
+ if to_delete:
+ try:
+ connection.delete_tags(Resources=[resource_id], Tags=[{'Key': k} for k in to_delete])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete tags")
+ if to_add:
+ try:
+ connection.create_tags(Resources=[resource_id], Tags=ansible_dict_to_boto3_tag_list(to_add))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create tags")
+
+ try:
+ latest_tags = describe_tags_with_backoff(connection, resource_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Unable to list tags for VPC')
+ return {'changed': True, 'tags': latest_tags}
+
+
+@AWSRetry.exponential_backoff()
+def describe_route_tables_with_backoff(connection, **params):
+ try:
+ return connection.describe_route_tables(**params)['RouteTables']
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'InvalidRouteTableID.NotFound':
+ return None
+ else:
+ raise
+
+
+def get_route_table_by_id(connection, module, route_table_id):
+
+ route_table = None
+ try:
+ route_tables = describe_route_tables_with_backoff(connection, RouteTableIds=[route_table_id])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get route table")
+ if route_tables:
+ route_table = route_tables[0]
+
+ return route_table
+
+
+def get_route_table_by_tags(connection, module, vpc_id, tags):
+ count = 0
+ route_table = None
+ filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id})
+ try:
+ route_tables = describe_route_tables_with_backoff(connection, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get route table")
+ for table in route_tables:
+ this_tags = describe_tags_with_backoff(connection, table['RouteTableId'])
+ if tags_match(tags, this_tags):
+ route_table = table
+ count += 1
+
+ if count > 1:
+ module.fail_json(msg="Tags provided do not identify a unique route table")
+ else:
+ return route_table
+
+
+def route_spec_matches_route(route_spec, route):
+ if route_spec.get('GatewayId') and 'nat-' in route_spec['GatewayId']:
+ route_spec['NatGatewayId'] = route_spec.pop('GatewayId')
+ if route_spec.get('GatewayId') and 'vpce-' in route_spec['GatewayId']:
+ if route_spec.get('DestinationCidrBlock', '').startswith('pl-'):
+ route_spec['DestinationPrefixListId'] = route_spec.pop('DestinationCidrBlock')
+
+ return set(route_spec.items()).issubset(route.items())
+
+
+def route_spec_matches_route_cidr(route_spec, route):
+ return route_spec['DestinationCidrBlock'] == route.get('DestinationCidrBlock')
+
+
+def rename_key(d, old_key, new_key):
+ d[new_key] = d.pop(old_key)
+
+
+def index_of_matching_route(route_spec, routes_to_match):
+ for i, route in enumerate(routes_to_match):
+ if route_spec_matches_route(route_spec, route):
+ return "exact", i
+ elif 'Origin' in route_spec and route_spec['Origin'] != 'EnableVgwRoutePropagation':
+ if route_spec_matches_route_cidr(route_spec, route):
+ return "replace", i
+
+
+def ensure_routes(connection=None, module=None, route_table=None, route_specs=None,
+ propagating_vgw_ids=None, check_mode=None, purge_routes=None):
+ routes_to_match = [route for route in route_table['Routes']]
+ route_specs_to_create = []
+ route_specs_to_recreate = []
+ for route_spec in route_specs:
+ match = index_of_matching_route(route_spec, routes_to_match)
+ if match is None:
+ if route_spec.get('DestinationCidrBlock'):
+ route_specs_to_create.append(route_spec)
+ else:
+ module.warn("Skipping creating {0} because it has no destination cidr block. "
+ "To add VPC endpoints to route tables use the ec2_vpc_endpoint module.".format(route_spec))
+ else:
+ if match[0] == "replace":
+ if route_spec.get('DestinationCidrBlock'):
+ route_specs_to_recreate.append(route_spec)
+ else:
+ module.warn("Skipping recreating route {0} because it has no destination cidr block.".format(route_spec))
+ del routes_to_match[match[1]]
+
+ routes_to_delete = []
+ if purge_routes:
+ for r in routes_to_match:
+ if not r.get('DestinationCidrBlock'):
+ module.warn("Skipping purging route {0} because it has no destination cidr block. "
+ "To remove VPC endpoints from route tables use the ec2_vpc_endpoint module.".format(r))
+ continue
+ if r['Origin'] == 'CreateRoute':
+ routes_to_delete.append(r)
+
+ changed = bool(routes_to_delete or route_specs_to_create or route_specs_to_recreate)
+ if changed and not check_mode:
+ for route in routes_to_delete:
+ try:
+ connection.delete_route(RouteTableId=route_table['RouteTableId'], DestinationCidrBlock=route['DestinationCidrBlock'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete route")
+
+ for route_spec in route_specs_to_recreate:
+ try:
+ connection.replace_route(RouteTableId=route_table['RouteTableId'],
+ **route_spec)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't recreate route")
+
+ for route_spec in route_specs_to_create:
+ try:
+ connection.create_route(RouteTableId=route_table['RouteTableId'],
+ **route_spec)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create route")
+
+ return {'changed': bool(changed)}
+
+
+def ensure_subnet_association(connection=None, module=None, vpc_id=None, route_table_id=None, subnet_id=None,
+ check_mode=None):
+ filters = ansible_dict_to_boto3_filter_list({'association.subnet-id': subnet_id, 'vpc-id': vpc_id})
+ try:
+ route_tables = describe_route_tables_with_backoff(connection, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get route tables")
+ for route_table in route_tables:
+ if route_table['RouteTableId'] is None:
+ continue
+ for a in route_table['Associations']:
+ if a['Main']:
+ continue
+ if a['SubnetId'] == subnet_id:
+ if route_table['RouteTableId'] == route_table_id:
+ return {'changed': False, 'association_id': a['RouteTableAssociationId']}
+ else:
+ if check_mode:
+ return {'changed': True}
+ try:
+ connection.disassociate_route_table(AssociationId=a['RouteTableAssociationId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't disassociate subnet from route table")
+
+ try:
+ association_id = connection.associate_route_table(RouteTableId=route_table_id, SubnetId=subnet_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't associate subnet with route table")
+ return {'changed': True, 'association_id': association_id}
+
+
+def ensure_subnet_associations(connection=None, module=None, route_table=None, subnets=None,
+ check_mode=None, purge_subnets=None):
+ current_association_ids = [a['RouteTableAssociationId'] for a in route_table['Associations'] if not a['Main']]
+ new_association_ids = []
+ changed = False
+ for subnet in subnets:
+ result = ensure_subnet_association(connection=connection, module=module, vpc_id=route_table['VpcId'],
+ route_table_id=route_table['RouteTableId'], subnet_id=subnet['SubnetId'], check_mode=check_mode)
+ changed = changed or result['changed']
+ if changed and check_mode:
+ return {'changed': True}
+ new_association_ids.append(result['association_id'])
+
+ if purge_subnets:
+ to_delete = [a_id for a_id in current_association_ids
+ if a_id not in new_association_ids]
+
+ for a_id in to_delete:
+ changed = True
+ if not check_mode:
+ try:
+ connection.disassociate_route_table(AssociationId=a_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't disassociate subnet from route table")
+
+ return {'changed': changed}
+
+
+def ensure_propagation(connection=None, module=None, route_table=None, propagating_vgw_ids=None,
+ check_mode=None):
+ changed = False
+ gateways = [gateway['GatewayId'] for gateway in route_table['PropagatingVgws']]
+ to_add = set(propagating_vgw_ids) - set(gateways)
+ if to_add:
+ changed = True
+ if not check_mode:
+ for vgw_id in to_add:
+ try:
+ connection.enable_vgw_route_propagation(RouteTableId=route_table['RouteTableId'],
+ GatewayId=vgw_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't enable route propagation")
+
+ return {'changed': changed}
+
+
+def ensure_route_table_absent(connection, module):
+
+ lookup = module.params.get('lookup')
+ route_table_id = module.params.get('route_table_id')
+ tags = module.params.get('tags')
+ vpc_id = module.params.get('vpc_id')
+ purge_subnets = module.params.get('purge_subnets')
+
+ if lookup == 'tag':
+ if tags is not None:
+ route_table = get_route_table_by_tags(connection, module, vpc_id, tags)
+ else:
+ route_table = None
+ elif lookup == 'id':
+ route_table = get_route_table_by_id(connection, module, route_table_id)
+
+ if route_table is None:
+ return {'changed': False}
+
+ # disassociate subnets before deleting route table
+ if not module.check_mode:
+ ensure_subnet_associations(connection=connection, module=module, route_table=route_table,
+ subnets=[], check_mode=False, purge_subnets=purge_subnets)
+ try:
+ connection.delete_route_table(RouteTableId=route_table['RouteTableId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error deleting route table")
+
+ return {'changed': True}
+
+
+def get_route_table_info(connection, module, route_table):
+ result = get_route_table_by_id(connection, module, route_table['RouteTableId'])
+ try:
+ result['Tags'] = describe_tags_with_backoff(connection, route_table['RouteTableId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get tags for route table")
+ result = camel_dict_to_snake_dict(result, ignore_list=['Tags'])
+ # backwards compatibility
+ result['id'] = result['route_table_id']
+ return result
+
+
+def create_route_spec(connection, module, vpc_id):
+ routes = module.params.get('routes')
+
+ for route_spec in routes:
+ rename_key(route_spec, 'dest', 'destination_cidr_block')
+
+ if route_spec.get('gateway_id') and route_spec['gateway_id'].lower() == 'igw':
+ igw = find_igw(connection, module, vpc_id)
+ route_spec['gateway_id'] = igw
+ if route_spec.get('gateway_id') and route_spec['gateway_id'].startswith('nat-'):
+ rename_key(route_spec, 'gateway_id', 'nat_gateway_id')
+
+ return snake_dict_to_camel_dict(routes, capitalize_first=True)
+
+
+def ensure_route_table_present(connection, module):
+
+ lookup = module.params.get('lookup')
+ propagating_vgw_ids = module.params.get('propagating_vgw_ids')
+ purge_routes = module.params.get('purge_routes')
+ purge_subnets = module.params.get('purge_subnets')
+ purge_tags = module.params.get('purge_tags')
+ route_table_id = module.params.get('route_table_id')
+ subnets = module.params.get('subnets')
+ tags = module.params.get('tags')
+ vpc_id = module.params.get('vpc_id')
+ routes = create_route_spec(connection, module, vpc_id)
+
+ changed = False
+ tags_valid = False
+
+ if lookup == 'tag':
+ if tags is not None:
+ try:
+ route_table = get_route_table_by_tags(connection, module, vpc_id, tags)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error finding route table with lookup 'tag'")
+ else:
+ route_table = None
+ elif lookup == 'id':
+ try:
+ route_table = get_route_table_by_id(connection, module, route_table_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error finding route table with lookup 'id'")
+
+ # If no route table returned then create new route table
+ if route_table is None:
+ changed = True
+ if not module.check_mode:
+ try:
+ route_table = connection.create_route_table(VpcId=vpc_id)['RouteTable']
+ # try to wait for route table to be present before moving on
+ get_waiter(
+ connection, 'route_table_exists'
+ ).wait(
+ RouteTableIds=[route_table['RouteTableId']],
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error creating route table")
+ else:
+ route_table = {"id": "rtb-xxxxxxxx", "route_table_id": "rtb-xxxxxxxx", "vpc_id": vpc_id}
+ module.exit_json(changed=changed, route_table=route_table)
+
+ if routes is not None:
+ result = ensure_routes(connection=connection, module=module, route_table=route_table,
+ route_specs=routes, propagating_vgw_ids=propagating_vgw_ids,
+ check_mode=module.check_mode, purge_routes=purge_routes)
+ changed = changed or result['changed']
+
+ if propagating_vgw_ids is not None:
+ result = ensure_propagation(connection=connection, module=module, route_table=route_table,
+ propagating_vgw_ids=propagating_vgw_ids, check_mode=module.check_mode)
+ changed = changed or result['changed']
+
+ if not tags_valid and tags is not None:
+ result = ensure_tags(connection=connection, module=module, resource_id=route_table['RouteTableId'], tags=tags,
+ purge_tags=purge_tags, check_mode=module.check_mode)
+ route_table['Tags'] = result['tags']
+ changed = changed or result['changed']
+
+ if subnets is not None:
+ associated_subnets = find_subnets(connection, module, vpc_id, subnets)
+
+ result = ensure_subnet_associations(connection=connection, module=module, route_table=route_table,
+ subnets=associated_subnets, check_mode=module.check_mode,
+ purge_subnets=purge_subnets)
+ changed = changed or result['changed']
+
+ if changed:
+ # pause to allow route table routes/subnets/associations to be updated before exiting with final state
+ sleep(5)
+ module.exit_json(changed=changed, route_table=get_route_table_info(connection, module, route_table))
+
+
+def main():
+ argument_spec = dict(
+ lookup=dict(default='tag', choices=['tag', 'id']),
+ propagating_vgw_ids=dict(type='list', elements='str'),
+ purge_routes=dict(default=True, type='bool'),
+ purge_subnets=dict(default=True, type='bool'),
+ purge_tags=dict(default=False, type='bool'),
+ route_table_id=dict(),
+ routes=dict(default=[], type='list', elements='dict'),
+ state=dict(default='present', choices=['present', 'absent']),
+ subnets=dict(type='list', elements='str'),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ vpc_id=dict()
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[['lookup', 'id', ['route_table_id']],
+ ['lookup', 'tag', ['vpc_id']],
+ ['state', 'present', ['vpc_id']]],
+ supports_check_mode=True)
+
+ connection = module.client('ec2')
+
+ state = module.params.get('state')
+
+ if state == 'present':
+ result = ensure_route_table_present(connection, module)
+ elif state == 'absent':
+ result = ensure_route_table_absent(connection, module)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_route_table_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_route_table_facts.py
new file mode 100644
index 00000000..9ff9959c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_route_table_facts.py
@@ -0,0 +1,130 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_route_table_info
+version_added: 1.0.0
+short_description: Gather information about ec2 VPC route tables in AWS
+description:
+ - Gather information about ec2 VPC route tables in AWS
+ - This module was called C(ec2_vpc_route_table_facts) before Ansible 2.9. The usage did not change.
+author: "Rob White (@wimnat)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters.
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all VPC route tables
+ community.aws.ec2_vpc_route_table_info:
+
+- name: Gather information about a particular VPC route table using route table ID
+ community.aws.ec2_vpc_route_table_info:
+ filters:
+ route-table-id: rtb-00112233
+
+- name: Gather information about any VPC route table with a tag key Name and value Example
+ community.aws.ec2_vpc_route_table_info:
+ filters:
+ "tag:Name": Example
+
+- name: Gather information about any VPC route table within VPC with ID vpc-abcdef00
+ community.aws.ec2_vpc_route_table_info:
+ filters:
+ vpc-id: vpc-abcdef00
+
+'''
+
+try:
+ import boto.vpc
+ from boto.exception import BotoServerError
+except ImportError:
+ pass # Handled by HAS_BOTO
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO
+
+
+def get_route_table_info(route_table):
+
+ # Add any routes to array
+ routes = []
+ associations = []
+ for route in route_table.routes:
+ routes.append(route.__dict__)
+ for association in route_table.associations:
+ associations.append(association.__dict__)
+
+ route_table_info = {'id': route_table.id,
+ 'routes': routes,
+ 'associations': associations,
+ 'tags': route_table.tags,
+ 'vpc_id': route_table.vpc_id
+ }
+
+ return route_table_info
+
+
+def list_ec2_vpc_route_tables(connection, module):
+
+ filters = module.params.get("filters")
+ route_table_dict_array = []
+
+ try:
+ all_route_tables = connection.get_all_route_tables(filters=filters)
+ except BotoServerError as e:
+ module.fail_json_aws(e, msg="Failed to get route tables")
+
+ for route_table in all_route_tables:
+ route_table_dict_array.append(get_route_table_info(route_table))
+
+ module.exit_json(route_tables=route_table_dict_array)
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(default=None, type='dict'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+ if module._name == 'ec2_vpc_route_table_facts':
+ module.deprecate("The 'ec2_vpc_route_table_facts' module has been renamed to 'ec2_vpc_route_table_info'",
+ date='2021-12-01', collection_name='community.aws')
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_ec2_vpc_route_tables(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_route_table_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_route_table_info.py
new file mode 100644
index 00000000..9ff9959c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_route_table_info.py
@@ -0,0 +1,130 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_route_table_info
+version_added: 1.0.0
+short_description: Gather information about ec2 VPC route tables in AWS
+description:
+ - Gather information about ec2 VPC route tables in AWS
+ - This module was called C(ec2_vpc_route_table_facts) before Ansible 2.9. The usage did not change.
+author: "Rob White (@wimnat)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters.
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all VPC route tables
+ community.aws.ec2_vpc_route_table_info:
+
+- name: Gather information about a particular VPC route table using route table ID
+ community.aws.ec2_vpc_route_table_info:
+ filters:
+ route-table-id: rtb-00112233
+
+- name: Gather information about any VPC route table with a tag key Name and value Example
+ community.aws.ec2_vpc_route_table_info:
+ filters:
+ "tag:Name": Example
+
+- name: Gather information about any VPC route table within VPC with ID vpc-abcdef00
+ community.aws.ec2_vpc_route_table_info:
+ filters:
+ vpc-id: vpc-abcdef00
+
+'''
+
+try:
+ import boto.vpc
+ from boto.exception import BotoServerError
+except ImportError:
+ pass # Handled by HAS_BOTO
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO
+
+
+def get_route_table_info(route_table):
+
+ # Add any routes to array
+ routes = []
+ associations = []
+ for route in route_table.routes:
+ routes.append(route.__dict__)
+ for association in route_table.associations:
+ associations.append(association.__dict__)
+
+ route_table_info = {'id': route_table.id,
+ 'routes': routes,
+ 'associations': associations,
+ 'tags': route_table.tags,
+ 'vpc_id': route_table.vpc_id
+ }
+
+ return route_table_info
+
+
+def list_ec2_vpc_route_tables(connection, module):
+
+ filters = module.params.get("filters")
+ route_table_dict_array = []
+
+ try:
+ all_route_tables = connection.get_all_route_tables(filters=filters)
+ except BotoServerError as e:
+ module.fail_json_aws(e, msg="Failed to get route tables")
+
+ for route_table in all_route_tables:
+ route_table_dict_array.append(get_route_table_info(route_table))
+
+ module.exit_json(route_tables=route_table_dict_array)
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(default=None, type='dict'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+ if module._name == 'ec2_vpc_route_table_facts':
+ module.deprecate("The 'ec2_vpc_route_table_facts' module has been renamed to 'ec2_vpc_route_table_info'",
+ date='2021-12-01', collection_name='community.aws')
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_ec2_vpc_route_tables(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw.py
new file mode 100644
index 00000000..2f8702ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw.py
@@ -0,0 +1,570 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: ec2_vpc_vgw
+short_description: Create and delete AWS VPN Virtual Gateways.
+version_added: 1.0.0
+description:
+ - Creates AWS VPN Virtual Gateways
+ - Deletes AWS VPN Virtual Gateways
+ - Attaches Virtual Gateways to VPCs
+ - Detaches Virtual Gateways from VPCs
+requirements: [ boto3 ]
+options:
+ state:
+ description:
+ - present to ensure resource is created.
+ - absent to remove resource
+ default: present
+ choices: [ "present", "absent"]
+ type: str
+ name:
+ description:
+ - name of the vgw to be created or deleted
+ type: str
+ type:
+ description:
+ - type of the virtual gateway to be created
+ choices: [ "ipsec.1" ]
+ default: "ipsec.1"
+ type: str
+ vpn_gateway_id:
+ description:
+ - vpn gateway id of an existing virtual gateway
+ type: str
+ vpc_id:
+ description:
+ - the vpc-id of a vpc to attach or detach
+ type: str
+ asn:
+ description:
+ - the BGP ASN of the amazon side
+ type: int
+ wait_timeout:
+ description:
+ - number of seconds to wait for status during vpc attach and detach
+ default: 320
+ type: int
+ tags:
+ description:
+ - dictionary of resource tags
+ aliases: [ "resource_tags" ]
+ type: dict
+author: Nick Aslanidis (@naslanidis)
+extends_documentation_fragment:
+- amazon.aws.ec2
+- amazon.aws.aws
+
+'''
+
+EXAMPLES = '''
+- name: Create a new vgw attached to a specific VPC
+ community.aws.ec2_vpc_vgw:
+ state: present
+ region: ap-southeast-2
+ profile: personal
+ vpc_id: vpc-12345678
+ name: personal-testing
+ type: ipsec.1
+ register: created_vgw
+
+- name: Create a new unattached vgw
+ community.aws.ec2_vpc_vgw:
+ state: present
+ region: ap-southeast-2
+ profile: personal
+ name: personal-testing
+ type: ipsec.1
+ tags:
+ environment: production
+ owner: ABC
+ register: created_vgw
+
+- name: Remove a new vgw using the name
+ community.aws.ec2_vpc_vgw:
+ state: absent
+ region: ap-southeast-2
+ profile: personal
+ name: personal-testing
+ type: ipsec.1
+ register: deleted_vgw
+
+- name: Remove a new vgw using the vpn_gateway_id
+ community.aws.ec2_vpc_vgw:
+ state: absent
+ region: ap-southeast-2
+ profile: personal
+ vpn_gateway_id: vgw-3a9aa123
+ register: deleted_vgw
+'''
+
+RETURN = '''
+result:
+ description: The result of the create, or delete action.
+ returned: success
+ type: dict
+'''
+
+import time
+import traceback
+
+try:
+ import botocore
+ import boto3
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+
+
+def get_vgw_info(vgws):
+ if not isinstance(vgws, list):
+ return
+
+ for vgw in vgws:
+ vgw_info = {
+ 'id': vgw['VpnGatewayId'],
+ 'type': vgw['Type'],
+ 'state': vgw['State'],
+ 'vpc_id': None,
+ 'tags': dict()
+ }
+
+ for tag in vgw['Tags']:
+ vgw_info['tags'][tag['Key']] = tag['Value']
+
+ if len(vgw['VpcAttachments']) != 0 and vgw['VpcAttachments'][0]['State'] == 'attached':
+ vgw_info['vpc_id'] = vgw['VpcAttachments'][0]['VpcId']
+
+ return vgw_info
+
+
+def wait_for_status(client, module, vpn_gateway_id, status):
+ polling_increment_secs = 15
+ max_retries = (module.params.get('wait_timeout') // polling_increment_secs)
+ status_achieved = False
+
+ for x in range(0, max_retries):
+ try:
+ response = find_vgw(client, module, vpn_gateway_id)
+ if response[0]['VpcAttachments'][0]['State'] == status:
+ status_achieved = True
+ break
+ else:
+ time.sleep(polling_increment_secs)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ result = response
+ return status_achieved, result
+
+
+def attach_vgw(client, module, vpn_gateway_id):
+ params = dict()
+ params['VpcId'] = module.params.get('vpc_id')
+
+ try:
+ # Immediately after a detachment, the EC2 API sometimes will report the VpnGateways[0].State
+ # as available several seconds before actually permitting a new attachment.
+ # So we catch and retry that error. See https://github.com/ansible/ansible/issues/53185
+ response = AWSRetry.jittered_backoff(retries=5,
+ catch_extra_error_codes=['InvalidParameterValue']
+ )(client.attach_vpn_gateway)(VpnGatewayId=vpn_gateway_id,
+ VpcId=params['VpcId'])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'attached')
+ if not status_achieved:
+ module.fail_json(msg='Error waiting for vpc to attach to vgw - please check the AWS console')
+
+ result = response
+ return result
+
+
+def detach_vgw(client, module, vpn_gateway_id, vpc_id=None):
+ params = dict()
+ params['VpcId'] = module.params.get('vpc_id')
+
+ if vpc_id:
+ try:
+ response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=vpc_id)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ else:
+ try:
+ response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId'])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'detached')
+ if not status_achieved:
+ module.fail_json(msg='Error waiting for vpc to detach from vgw - please check the AWS console')
+
+ result = response
+ return result
+
+
+def create_vgw(client, module):
+ params = dict()
+ params['Type'] = module.params.get('type')
+ if module.params.get('asn'):
+ params['AmazonSideAsn'] = module.params.get('asn')
+
+ try:
+ response = client.create_vpn_gateway(**params)
+ get_waiter(
+ client, 'vpn_gateway_exists'
+ ).wait(
+ VpnGatewayIds=[response['VpnGateway']['VpnGatewayId']]
+ )
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json(msg="Failed to wait for Vpn Gateway {0} to be available".format(response['VpnGateway']['VpnGatewayId']),
+ exception=traceback.format_exc())
+ except is_boto3_error_code('VpnGatewayLimitExceeded'):
+ module.fail_json(msg="Too many VPN gateways exist in this account.", exception=traceback.format_exc())
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ result = response
+ return result
+
+
+def delete_vgw(client, module, vpn_gateway_id):
+
+ try:
+ response = client.delete_vpn_gateway(VpnGatewayId=vpn_gateway_id)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ # return the deleted VpnGatewayId as this is not included in the above response
+ result = vpn_gateway_id
+ return result
+
+
+def create_tags(client, module, vpn_gateway_id):
+ params = dict()
+
+ try:
+ response = client.create_tags(Resources=[vpn_gateway_id], Tags=load_tags(module))
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ result = response
+ return result
+
+
+def delete_tags(client, module, vpn_gateway_id, tags_to_delete=None):
+ params = dict()
+
+ if tags_to_delete:
+ try:
+ response = client.delete_tags(Resources=[vpn_gateway_id], Tags=tags_to_delete)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ else:
+ try:
+ response = client.delete_tags(Resources=[vpn_gateway_id])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ result = response
+ return result
+
+
+def load_tags(module):
+ tags = []
+
+ if module.params.get('tags'):
+ for name, value in module.params.get('tags').items():
+ tags.append({'Key': name, 'Value': str(value)})
+ tags.append({'Key': "Name", 'Value': module.params.get('name')})
+ else:
+ tags.append({'Key': "Name", 'Value': module.params.get('name')})
+ return tags
+
+
+def find_tags(client, module, resource_id=None):
+
+ if resource_id:
+ try:
+ response = client.describe_tags(Filters=[
+ {'Name': 'resource-id', 'Values': [resource_id]}
+ ])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ result = response
+ return result
+
+
+def check_tags(client, module, existing_vgw, vpn_gateway_id):
+ params = dict()
+ params['Tags'] = module.params.get('tags')
+ vgw = existing_vgw
+ changed = False
+ tags_list = {}
+
+ # format tags for comparison
+ for tags in existing_vgw[0]['Tags']:
+ if tags['Key'] != 'Name':
+ tags_list[tags['Key']] = tags['Value']
+
+ # if existing tags don't match the tags arg, delete existing and recreate with new list
+ if params['Tags'] is not None and tags_list != params['Tags']:
+ delete_tags(client, module, vpn_gateway_id)
+ create_tags(client, module, vpn_gateway_id)
+ vgw = find_vgw(client, module)
+ changed = True
+
+ # if no tag args are supplied, delete any existing tags with the exception of the name tag
+ if params['Tags'] is None and tags_list != {}:
+ tags_to_delete = []
+ for tags in existing_vgw[0]['Tags']:
+ if tags['Key'] != 'Name':
+ tags_to_delete.append(tags)
+
+ delete_tags(client, module, vpn_gateway_id, tags_to_delete)
+ vgw = find_vgw(client, module)
+ changed = True
+
+ return vgw, changed
+
+
+def find_vpc(client, module):
+ params = dict()
+ params['vpc_id'] = module.params.get('vpc_id')
+
+ if params['vpc_id']:
+ try:
+ response = client.describe_vpcs(VpcIds=[params['vpc_id']])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ result = response
+ return result
+
+
+def find_vgw(client, module, vpn_gateway_id=None):
+ params = dict()
+ if vpn_gateway_id:
+ params['VpnGatewayIds'] = vpn_gateway_id
+ else:
+ params['Filters'] = [
+ {'Name': 'type', 'Values': [module.params.get('type')]},
+ {'Name': 'tag:Name', 'Values': [module.params.get('name')]},
+ ]
+ if module.params.get('state') == 'present':
+ params['Filters'].append({'Name': 'state', 'Values': ['pending', 'available']})
+ try:
+ response = client.describe_vpn_gateways(**params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ return sorted(response['VpnGateways'], key=lambda k: k['VpnGatewayId'])
+
+
+def ensure_vgw_present(client, module):
+
+ # If an existing vgw name and type matches our args, then a match is considered to have been
+ # found and we will not create another vgw.
+
+ changed = False
+ params = dict()
+ result = dict()
+ params['Name'] = module.params.get('name')
+ params['VpcId'] = module.params.get('vpc_id')
+ params['Type'] = module.params.get('type')
+ params['Tags'] = module.params.get('tags')
+ params['VpnGatewayIds'] = module.params.get('vpn_gateway_id')
+
+ # check that the vpc_id exists. If not, an exception is thrown
+ if params['VpcId']:
+ vpc = find_vpc(client, module)
+
+ # check if a gateway matching our module args already exists
+ existing_vgw = find_vgw(client, module)
+
+ if existing_vgw != []:
+ vpn_gateway_id = existing_vgw[0]['VpnGatewayId']
+ vgw, changed = check_tags(client, module, existing_vgw, vpn_gateway_id)
+
+ # if a vpc_id was provided, check if it exists and if it's attached
+ if params['VpcId']:
+
+ current_vpc_attachments = existing_vgw[0]['VpcAttachments']
+
+ if current_vpc_attachments != [] and current_vpc_attachments[0]['State'] == 'attached':
+ if current_vpc_attachments[0]['VpcId'] != params['VpcId'] or current_vpc_attachments[0]['State'] != 'attached':
+ # detach the existing vpc from the virtual gateway
+ vpc_to_detach = current_vpc_attachments[0]['VpcId']
+ detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
+ get_waiter(client, 'vpn_gateway_detached').wait(VpnGatewayIds=[vpn_gateway_id])
+ attached_vgw = attach_vgw(client, module, vpn_gateway_id)
+ changed = True
+ else:
+ # attach the vgw to the supplied vpc
+ attached_vgw = attach_vgw(client, module, vpn_gateway_id)
+ changed = True
+
+ # if params['VpcId'] is not provided, check the vgw is attached to a vpc. if so, detach it.
+ else:
+ existing_vgw = find_vgw(client, module, [vpn_gateway_id])
+
+ if existing_vgw[0]['VpcAttachments'] != []:
+ if existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
+ # detach the vpc from the vgw
+ vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
+ detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
+ changed = True
+
+ else:
+ # create a new vgw
+ new_vgw = create_vgw(client, module)
+ changed = True
+ vpn_gateway_id = new_vgw['VpnGateway']['VpnGatewayId']
+
+ # tag the new virtual gateway
+ create_tags(client, module, vpn_gateway_id)
+
+ # if a vpc-id was supplied, attempt to attach it to the vgw
+ if params['VpcId']:
+ attached_vgw = attach_vgw(client, module, vpn_gateway_id)
+ changed = True
+
+ # return current state of the vgw
+ vgw = find_vgw(client, module, [vpn_gateway_id])
+ result = get_vgw_info(vgw)
+ return changed, result
+
+
+def ensure_vgw_absent(client, module):
+
+ # If an existing vgw name and type matches our args, then a match is considered to have been
+ # found and we will take steps to delete it.
+
+ changed = False
+ params = dict()
+ result = dict()
+ params['Name'] = module.params.get('name')
+ params['VpcId'] = module.params.get('vpc_id')
+ params['Type'] = module.params.get('type')
+ params['Tags'] = module.params.get('tags')
+ params['VpnGatewayIds'] = module.params.get('vpn_gateway_id')
+
+ # check if a gateway matching our module args already exists
+ if params['VpnGatewayIds']:
+ existing_vgw_with_id = find_vgw(client, module, [params['VpnGatewayIds']])
+ if existing_vgw_with_id != [] and existing_vgw_with_id[0]['State'] != 'deleted':
+ existing_vgw = existing_vgw_with_id
+ if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
+ if params['VpcId']:
+ if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']:
+ module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console')
+
+ else:
+ # detach the vpc from the vgw
+ detach_vgw(client, module, params['VpnGatewayIds'], params['VpcId'])
+ deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
+ changed = True
+
+ else:
+ # attempt to detach any attached vpcs
+ vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
+ detach_vgw(client, module, params['VpnGatewayIds'], vpc_to_detach)
+ deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
+ changed = True
+
+ else:
+ # no vpc's are attached so attempt to delete the vgw
+ deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
+ changed = True
+
+ else:
+ changed = False
+ deleted_vgw = "Nothing to do"
+
+ else:
+ # Check that a name and type argument has been supplied if no vgw-id
+ if not module.params.get('name') or not module.params.get('type'):
+ module.fail_json(msg='A name and type is required when no vgw-id and a status of \'absent\' is supplied')
+
+ existing_vgw = find_vgw(client, module)
+ if existing_vgw != [] and existing_vgw[0]['State'] != 'deleted':
+ vpn_gateway_id = existing_vgw[0]['VpnGatewayId']
+ if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
+ if params['VpcId']:
+ if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']:
+ module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console')
+
+ else:
+ # detach the vpc from the vgw
+ detach_vgw(client, module, vpn_gateway_id, params['VpcId'])
+
+ # now that the vpc has been detached, delete the vgw
+ deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
+ changed = True
+
+ else:
+ # attempt to detach any attached vpcs
+ vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
+ detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
+ changed = True
+
+ # now that the vpc has been detached, delete the vgw
+ deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
+
+ else:
+ # no vpc's are attached so attempt to delete the vgw
+ deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
+ changed = True
+
+ else:
+ changed = False
+ deleted_vgw = None
+
+ result = deleted_vgw
+ return changed, result
+
+
+def main():
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ name=dict(),
+ vpn_gateway_id=dict(),
+ vpc_id=dict(),
+ asn=dict(type='int'),
+ wait_timeout=dict(type='int', default=320),
+ type=dict(default='ipsec.1', choices=['ipsec.1']),
+ tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[['state', 'present', ['name']]])
+
+ state = module.params.get('state').lower()
+
+ try:
+ client = module.client('ec2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ if state == 'present':
+ (changed, results) = ensure_vgw_present(client, module)
+ else:
+ (changed, results) = ensure_vgw_absent(client, module)
+ module.exit_json(changed=changed, vgw=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw_facts.py
new file mode 100644
index 00000000..692c291a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw_facts.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_vpc_vgw_info
+version_added: 1.0.0
+short_description: Gather information about virtual gateways in AWS
+description:
+ - Gather information about virtual gateways in AWS.
+ - This module was called C(ec2_vpc_vgw_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpnGateways.html) for possible filters.
+ type: dict
+ vpn_gateway_ids:
+ description:
+ - Get details of a specific Virtual Gateway ID. This value should be provided as a list.
+ type: list
+ elements: str
+author: "Nick Aslanidis (@naslanidis)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# # Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all virtual gateways for an account or profile
+ community.aws.ec2_vpc_vgw_info:
+ region: ap-southeast-2
+ profile: production
+ register: vgw_info
+
+- name: Gather information about a filtered list of Virtual Gateways
+ community.aws.ec2_vpc_vgw_info:
+ region: ap-southeast-2
+ profile: production
+ filters:
+ "tag:Name": "main-virt-gateway"
+ register: vgw_info
+
+- name: Gather information about a specific virtual gateway by VpnGatewayIds
+ community.aws.ec2_vpc_vgw_info:
+ region: ap-southeast-2
+ profile: production
+ vpn_gateway_ids: vgw-c432f6a7
+ register: vgw_info
+'''
+
+RETURN = r'''
+virtual_gateways:
+ description: The virtual gateways for the account.
+ returned: always
+ type: list
+ sample: [
+ {
+ "state": "available",
+ "tags": [
+ {
+ "key": "Name",
+ "value": "TEST-VGW"
+ }
+ ],
+ "type": "ipsec.1",
+ "vpc_attachments": [
+ {
+ "state": "attached",
+ "vpc_id": "vpc-22a93c74"
+ }
+ ],
+ "vpn_gateway_id": "vgw-23e3d64e"
+ }
+ ]
+
+changed:
+ description: True if listing the virtual gateways succeeds.
+ returned: always
+ type: bool
+ sample: "false"
+'''
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+
+
+def get_virtual_gateway_info(virtual_gateway):
+ virtual_gateway_info = {'VpnGatewayId': virtual_gateway['VpnGatewayId'],
+ 'State': virtual_gateway['State'],
+ 'Type': virtual_gateway['Type'],
+ 'VpcAttachments': virtual_gateway['VpcAttachments'],
+ 'Tags': virtual_gateway.get('Tags', [])}
+ return virtual_gateway_info
+
+
+def list_virtual_gateways(client, module):
+ params = dict()
+
+ params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ params['DryRun'] = module.check_mode
+
+ if module.params.get("vpn_gateway_ids"):
+ params['VpnGatewayIds'] = module.params.get("vpn_gateway_ids")
+
+ try:
+ all_virtual_gateways = client.describe_vpn_gateways(**params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+ return [camel_dict_to_snake_dict(get_virtual_gateway_info(vgw))
+ for vgw in all_virtual_gateways['VpnGateways']]
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(type='dict', default=dict()),
+ vpn_gateway_ids=dict(type='list', default=None, elements='str')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'ec2_vpc_vgw_facts':
+ module.deprecate("The 'ec2_vpc_vgw_facts' module has been renamed to 'ec2_vpc_vgw_info'", date='2021-12-01', collection_name='community.aws')
+
+ try:
+ connection = module.client('ec2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ # call your function here
+ results = list_virtual_gateways(connection, module)
+
+ module.exit_json(virtual_gateways=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw_info.py
new file mode 100644
index 00000000..692c291a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw_info.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_vpc_vgw_info
+version_added: 1.0.0
+short_description: Gather information about virtual gateways in AWS
+description:
+ - Gather information about virtual gateways in AWS.
+ - This module was called C(ec2_vpc_vgw_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpnGateways.html) for possible filters.
+ type: dict
+ vpn_gateway_ids:
+ description:
+ - Get details of a specific Virtual Gateway ID. This value should be provided as a list.
+ type: list
+ elements: str
+author: "Nick Aslanidis (@naslanidis)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# # Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all virtual gateways for an account or profile
+ community.aws.ec2_vpc_vgw_info:
+ region: ap-southeast-2
+ profile: production
+ register: vgw_info
+
+- name: Gather information about a filtered list of Virtual Gateways
+ community.aws.ec2_vpc_vgw_info:
+ region: ap-southeast-2
+ profile: production
+ filters:
+ "tag:Name": "main-virt-gateway"
+ register: vgw_info
+
+- name: Gather information about a specific virtual gateway by VpnGatewayIds
+ community.aws.ec2_vpc_vgw_info:
+ region: ap-southeast-2
+ profile: production
+ vpn_gateway_ids: vgw-c432f6a7
+ register: vgw_info
+'''
+
+RETURN = r'''
+virtual_gateways:
+ description: The virtual gateways for the account.
+ returned: always
+ type: list
+ sample: [
+ {
+ "state": "available",
+ "tags": [
+ {
+ "key": "Name",
+ "value": "TEST-VGW"
+ }
+ ],
+ "type": "ipsec.1",
+ "vpc_attachments": [
+ {
+ "state": "attached",
+ "vpc_id": "vpc-22a93c74"
+ }
+ ],
+ "vpn_gateway_id": "vgw-23e3d64e"
+ }
+ ]
+
+changed:
+ description: True if listing the virtual gateways succeeds.
+ returned: always
+ type: bool
+ sample: "false"
+'''
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+
+
+def get_virtual_gateway_info(virtual_gateway):
+ virtual_gateway_info = {'VpnGatewayId': virtual_gateway['VpnGatewayId'],
+ 'State': virtual_gateway['State'],
+ 'Type': virtual_gateway['Type'],
+ 'VpcAttachments': virtual_gateway['VpcAttachments'],
+ 'Tags': virtual_gateway.get('Tags', [])}
+ return virtual_gateway_info
+
+
+def list_virtual_gateways(client, module):
+ params = dict()
+
+ params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ params['DryRun'] = module.check_mode
+
+ if module.params.get("vpn_gateway_ids"):
+ params['VpnGatewayIds'] = module.params.get("vpn_gateway_ids")
+
+ try:
+ all_virtual_gateways = client.describe_vpn_gateways(**params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+ return [camel_dict_to_snake_dict(get_virtual_gateway_info(vgw))
+ for vgw in all_virtual_gateways['VpnGateways']]
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(type='dict', default=dict()),
+ vpn_gateway_ids=dict(type='list', default=None, elements='str')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'ec2_vpc_vgw_facts':
+ module.deprecate("The 'ec2_vpc_vgw_facts' module has been renamed to 'ec2_vpc_vgw_info'", date='2021-12-01', collection_name='community.aws')
+
+ try:
+ connection = module.client('ec2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ # call your function here
+ results = list_virtual_gateways(connection, module)
+
+ module.exit_json(virtual_gateways=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py
new file mode 100644
index 00000000..6e18e724
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py
@@ -0,0 +1,778 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_vpc_vpn
+version_added: 1.0.0
+short_description: Create, modify, and delete EC2 VPN connections.
+description:
+ - This module creates, modifies, and deletes VPN connections. Idempotence is achieved by using the filters
+ option or specifying the VPN connection identifier.
+extends_documentation_fragment:
+- amazon.aws.ec2
+- amazon.aws.aws
+
+requirements: ['boto3', 'botocore']
+author: "Sloane Hertel (@s-hertel)"
+options:
+ state:
+ description:
+ - The desired state of the VPN connection.
+ choices: ['present', 'absent']
+ default: present
+ required: no
+ type: str
+ customer_gateway_id:
+ description:
+ - The ID of the customer gateway.
+ type: str
+ connection_type:
+ description:
+ - The type of VPN connection.
+ - At this time only C(ipsec.1) is supported.
+ default: ipsec.1
+ type: str
+ vpn_gateway_id:
+ description:
+ - The ID of the virtual private gateway.
+ type: str
+ vpn_connection_id:
+ description:
+ - The ID of the VPN connection. Required to modify or delete a connection if the filters option does not provide a unique match.
+ type: str
+ tags:
+ description:
+ - Tags to attach to the VPN connection.
+ type: dict
+ purge_tags:
+ description:
+ - Whether or not to delete VPN connections tags that are associated with the connection but not specified in the task.
+ type: bool
+ default: false
+ static_only:
+ description:
+ - Indicates whether the VPN connection uses static routes only. Static routes must be used for devices that don't support BGP.
+ default: False
+ type: bool
+ required: no
+ tunnel_options:
+ description:
+ - An optional list object containing no more than two dict members, each of which may contain I(TunnelInsideCidr)
+ and/or I(PreSharedKey) keys with appropriate string values. AWS defaults will apply in absence of either of
+ the aforementioned keys.
+ required: no
+ type: list
+ elements: dict
+ suboptions:
+ TunnelInsideCidr:
+ type: str
+ description: The range of inside IP addresses for the tunnel.
+ PreSharedKey:
+ type: str
+ description: The pre-shared key (PSK) to establish initial authentication between the virtual private gateway and customer gateway.
+ filters:
+ description:
+ - An alternative to using I(vpn_connection_id). If multiple matches are found, vpn_connection_id is required.
+ If one of the following suboptions is a list of items to filter by, only one item needs to match to find the VPN
+ that correlates. e.g. if the filter I(cidr) is C(['194.168.2.0/24', '192.168.2.0/24']) and the VPN route only has the
+ destination cidr block of C(192.168.2.0/24) it will be found with this filter (assuming there are not multiple
+ VPNs that are matched). Another example, if the filter I(vpn) is equal to C(['vpn-ccf7e7ad', 'vpn-cb0ae2a2']) and one
+ of of the VPNs has the state deleted (exists but is unmodifiable) and the other exists and is not deleted,
+ it will be found via this filter. See examples.
+ suboptions:
+ cgw-config:
+ description:
+ - The customer gateway configuration of the VPN as a string (in the format of the return value) or a list of those strings.
+ static-routes-only:
+ description:
+ - The type of routing; C(true) or C(false).
+ cidr:
+ description:
+ - The destination cidr of the VPN's route as a string or a list of those strings.
+ bgp:
+ description:
+ - The BGP ASN number associated with a BGP device. Only works if the connection is attached.
+ This filtering option is currently not working.
+ vpn:
+ description:
+ - The VPN connection id as a string or a list of those strings.
+ vgw:
+ description:
+ - The virtual private gateway as a string or a list of those strings.
+ tag-keys:
+ description:
+ - The key of a tag as a string or a list of those strings.
+ tag-values:
+ description:
+ - The value of a tag as a string or a list of those strings.
+ tags:
+ description:
+ - A dict of key value pairs.
+ cgw:
+ description:
+ - The customer gateway id as a string or a list of those strings.
+ type: dict
+ routes:
+ description:
+ - Routes to add to the connection.
+ type: list
+ elements: str
+ purge_routes:
+ description:
+ - Whether or not to delete VPN connections routes that are not specified in the task.
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - How long, in seconds, before wait gives up.
+ default: 600
+ type: int
+ required: false
+ delay:
+ description:
+ - The time, in seconds, to wait before checking operation again.
+ required: false
+ type: int
+ default: 15
+'''
+
+EXAMPLES = r"""
+# Note: None of these examples set aws_access_key, aws_secret_key, or region.
+# It is assumed that their matching environment variables are set.
+
+- name: create a VPN connection
+ community.aws.ec2_vpc_vpn:
+ state: present
+ vpn_gateway_id: vgw-XXXXXXXX
+ customer_gateway_id: cgw-XXXXXXXX
+
+- name: modify VPN connection tags
+ community.aws.ec2_vpc_vpn:
+ state: present
+ vpn_connection_id: vpn-XXXXXXXX
+ tags:
+ Name: ansible-tag-1
+ Other: ansible-tag-2
+
+- name: delete a connection
+ community.aws.ec2_vpc_vpn:
+ vpn_connection_id: vpn-XXXXXXXX
+ state: absent
+
+- name: modify VPN tags (identifying VPN by filters)
+ community.aws.ec2_vpc_vpn:
+ state: present
+ filters:
+ cidr: 194.168.1.0/24
+ tag-keys:
+ - Ansible
+ - Other
+ tags:
+ New: Tag
+ purge_tags: true
+ static_only: true
+
+- name: set up VPN with tunnel options utilizing 'TunnelInsideCidr' only
+ community.aws.ec2_vpc_vpn:
+ state: present
+ filters:
+ vpn: vpn-XXXXXXXX
+ static_only: true
+ tunnel_options:
+ -
+ TunnelInsideCidr: '169.254.100.1/30'
+ -
+ TunnelInsideCidr: '169.254.100.5/30'
+
+- name: add routes and remove any preexisting ones
+ community.aws.ec2_vpc_vpn:
+ state: present
+ filters:
+ vpn: vpn-XXXXXXXX
+ routes:
+ - 195.168.2.0/24
+ - 196.168.2.0/24
+ purge_routes: true
+
+- name: remove all routes
+ community.aws.ec2_vpc_vpn:
+ state: present
+ vpn_connection_id: vpn-XXXXXXXX
+ routes: []
+ purge_routes: true
+
+- name: delete a VPN identified by filters
+ community.aws.ec2_vpc_vpn:
+ state: absent
+ filters:
+ tags:
+ Ansible: Tag
+"""
+
+RETURN = r"""
+changed:
+ description: If the VPN connection has changed.
+ type: bool
+ returned: always
+ sample:
+ changed: true
+customer_gateway_configuration:
+ description: The configuration of the VPN connection.
+ returned: I(state=present)
+ type: str
+customer_gateway_id:
+ description: The customer gateway connected via the connection.
+ type: str
+ returned: I(state=present)
+ sample:
+ customer_gateway_id: cgw-1220c87b
+vpn_gateway_id:
+ description: The virtual private gateway connected via the connection.
+ type: str
+ returned: I(state=present)
+ sample:
+ vpn_gateway_id: vgw-cb0ae2a2
+options:
+ description: The VPN connection options (currently only containing static_routes_only).
+ type: complex
+ returned: I(state=present)
+ contains:
+ static_routes_only:
+ description: If the VPN connection only allows static routes.
+ returned: I(state=present)
+ type: str
+ sample:
+ static_routes_only: true
+routes:
+ description: The routes of the VPN connection.
+ type: list
+ returned: I(state=present)
+ sample:
+ routes: [{
+ 'destination_cidr_block': '192.168.1.0/24',
+ 'state': 'available'
+ }]
+state:
+ description: The status of the VPN connection.
+ type: str
+ returned: I(state=present)
+ sample:
+ state: available
+tags:
+ description: The tags associated with the connection.
+ type: dict
+ returned: I(state=present)
+ sample:
+ tags:
+ name: ansible-test
+ other: tag
+type:
+ description: The type of VPN connection (currently only ipsec.1 is available).
+ type: str
+ returned: I(state=present)
+ sample:
+ type: "ipsec.1"
+vgw_telemetry:
+ type: list
+ returned: I(state=present)
+ description: The telemetry for the VPN tunnel.
+ sample:
+ vgw_telemetry: [{
+ 'outside_ip_address': 'string',
+ 'status': 'up',
+ 'last_status_change': 'datetime(2015, 1, 1)',
+ 'status_message': 'string',
+ 'accepted_route_count': 123
+ }]
+vpn_connection_id:
+ description: The identifier for the VPN connection.
+ type: str
+ returned: I(state=present)
+ sample:
+ vpn_connection_id: vpn-781e0e19
+"""
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible.module_utils._text import to_text
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (
+ camel_dict_to_snake_dict,
+ boto3_tag_list_to_ansible_dict,
+ compare_aws_tags,
+ ansible_dict_to_boto3_tag_list,
+)
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError, WaiterError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+
+class VPNConnectionException(Exception):
+ def __init__(self, msg, exception=None):
+ self.msg = msg
+ self.exception = exception
+
+
+def find_connection(connection, module_params, vpn_connection_id=None):
+ ''' Looks for a unique VPN connection. Uses find_connection_response() to return the connection found, None,
+ or raise an error if there were multiple viable connections. '''
+
+ filters = module_params.get('filters')
+
+ # vpn_connection_id may be provided via module option; takes precedence over any filter values
+ if not vpn_connection_id and module_params.get('vpn_connection_id'):
+ vpn_connection_id = module_params.get('vpn_connection_id')
+
+ if not isinstance(vpn_connection_id, list) and vpn_connection_id:
+ vpn_connection_id = [to_text(vpn_connection_id)]
+ elif isinstance(vpn_connection_id, list):
+ vpn_connection_id = [to_text(connection) for connection in vpn_connection_id]
+
+ formatted_filter = []
+ # if vpn_connection_id is provided it will take precedence over any filters since it is a unique identifier
+ if not vpn_connection_id:
+ formatted_filter = create_filter(module_params, provided_filters=filters)
+
+ # see if there is a unique matching connection
+ try:
+ if vpn_connection_id:
+ existing_conn = connection.describe_vpn_connections(VpnConnectionIds=vpn_connection_id,
+ Filters=formatted_filter)
+ else:
+ existing_conn = connection.describe_vpn_connections(Filters=formatted_filter)
+ except (BotoCoreError, ClientError) as e:
+ raise VPNConnectionException(msg="Failed while describing VPN connection.",
+ exception=e)
+
+ return find_connection_response(connections=existing_conn)
+
+
+def add_routes(connection, vpn_connection_id, routes_to_add):
+ for route in routes_to_add:
+ try:
+ connection.create_vpn_connection_route(VpnConnectionId=vpn_connection_id,
+ DestinationCidrBlock=route)
+ except (BotoCoreError, ClientError) as e:
+ raise VPNConnectionException(msg="Failed while adding route {0} to the VPN connection {1}.".format(route, vpn_connection_id),
+ exception=e)
+
+
+def remove_routes(connection, vpn_connection_id, routes_to_remove):
+ for route in routes_to_remove:
+ try:
+ connection.delete_vpn_connection_route(VpnConnectionId=vpn_connection_id,
+ DestinationCidrBlock=route)
+ except (BotoCoreError, ClientError) as e:
+ raise VPNConnectionException(msg="Failed to remove route {0} from the VPN connection {1}.".format(route, vpn_connection_id),
+ exception=e)
+
+
+def create_filter(module_params, provided_filters):
+ """ Creates a filter using the user-specified parameters and unmodifiable options that may have been specified in the task """
+ boto3ify_filter = {'cgw-config': 'customer-gateway-configuration',
+ 'static-routes-only': 'option.static-routes-only',
+ 'cidr': 'route.destination-cidr-block',
+ 'bgp': 'bgp-asn',
+ 'vpn': 'vpn-connection-id',
+ 'vgw': 'vpn-gateway-id',
+ 'tag-keys': 'tag-key',
+ 'tag-values': 'tag-value',
+ 'tags': 'tag',
+ 'cgw': 'customer-gateway-id'}
+
+ # unmodifiable options and their filter name counterpart
+ param_to_filter = {"customer_gateway_id": "customer-gateway-id",
+ "vpn_gateway_id": "vpn-gateway-id",
+ "vpn_connection_id": "vpn-connection-id"}
+
+ flat_filter_dict = {}
+ formatted_filter = []
+
+ for raw_param in dict(provided_filters):
+
+ # fix filter names to be recognized by boto3
+ if raw_param in boto3ify_filter:
+ param = boto3ify_filter[raw_param]
+ provided_filters[param] = provided_filters.pop(raw_param)
+ elif raw_param in list(boto3ify_filter.items()):
+ param = raw_param
+ else:
+ raise VPNConnectionException(msg="{0} is not a valid filter.".format(raw_param))
+
+ # reformat filters with special formats
+ if param == 'tag':
+ for key in provided_filters[param]:
+ formatted_key = 'tag:' + key
+ if isinstance(provided_filters[param][key], list):
+ flat_filter_dict[formatted_key] = str(provided_filters[param][key])
+ else:
+ flat_filter_dict[formatted_key] = [str(provided_filters[param][key])]
+ elif param == 'option.static-routes-only':
+ flat_filter_dict[param] = [str(provided_filters[param]).lower()]
+ else:
+ if isinstance(provided_filters[param], list):
+ flat_filter_dict[param] = provided_filters[param]
+ else:
+ flat_filter_dict[param] = [str(provided_filters[param])]
+
+ # if customer_gateway, vpn_gateway, or vpn_connection was specified in the task but not the filter, add it
+ for param in param_to_filter:
+ if param_to_filter[param] not in flat_filter_dict and module_params.get(param):
+ flat_filter_dict[param_to_filter[param]] = [module_params.get(param)]
+
+ # change the flat dict into something boto3 will understand
+ formatted_filter = [{'Name': key, 'Values': value} for key, value in flat_filter_dict.items()]
+
+ return formatted_filter
+
+
+def find_connection_response(connections=None):
+ """ Determine if there is a viable unique match in the connections described. Returns the unique VPN connection if one is found,
+ returns None if the connection does not exist, raise an error if multiple matches are found. """
+
+ # Found no connections
+ if not connections or 'VpnConnections' not in connections:
+ return None
+
+ # Too many results
+ elif connections and len(connections['VpnConnections']) > 1:
+ viable = []
+ for each in connections['VpnConnections']:
+ # deleted connections are not modifiable
+ if each['State'] not in ("deleted", "deleting"):
+ viable.append(each)
+ if len(viable) == 1:
+ # Found one viable result; return unique match
+ return viable[0]
+ elif len(viable) == 0:
+ # Found a result but it was deleted already; since there was only one viable result create a new one
+ return None
+ else:
+ raise VPNConnectionException(msg="More than one matching VPN connection was found. "
+ "To modify or delete a VPN please specify vpn_connection_id or add filters.")
+
+ # Found unique match
+ elif connections and len(connections['VpnConnections']) == 1:
+ # deleted connections are not modifiable
+ if connections['VpnConnections'][0]['State'] not in ("deleted", "deleting"):
+ return connections['VpnConnections'][0]
+
+
+def create_connection(connection, customer_gateway_id, static_only, vpn_gateway_id, connection_type, max_attempts, delay, tunnel_options=None):
+ """ Creates a VPN connection """
+
+ options = {'StaticRoutesOnly': static_only}
+ if tunnel_options and len(tunnel_options) <= 2:
+ t_opt = []
+ for m in tunnel_options:
+ # See Boto3 docs regarding 'create_vpn_connection'
+ # tunnel options for allowed 'TunnelOptions' keys.
+ if not isinstance(m, dict):
+ raise TypeError("non-dict list member")
+ t_opt.append(m)
+ if t_opt:
+ options['TunnelOptions'] = t_opt
+
+ if not (customer_gateway_id and vpn_gateway_id):
+ raise VPNConnectionException(msg="No matching connection was found. To create a new connection you must provide "
+ "both vpn_gateway_id and customer_gateway_id.")
+ try:
+ vpn = connection.create_vpn_connection(Type=connection_type,
+ CustomerGatewayId=customer_gateway_id,
+ VpnGatewayId=vpn_gateway_id,
+ Options=options)
+ connection.get_waiter('vpn_connection_available').wait(
+ VpnConnectionIds=[vpn['VpnConnection']['VpnConnectionId']],
+ WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}
+ )
+ except WaiterError as e:
+ raise VPNConnectionException(msg="Failed to wait for VPN connection {0} to be available".format(vpn['VpnConnection']['VpnConnectionId']),
+ exception=e)
+ except (BotoCoreError, ClientError) as e:
+ raise VPNConnectionException(msg="Failed to create VPN connection",
+ exception=e)
+
+ return vpn['VpnConnection']
+
+
+def delete_connection(connection, vpn_connection_id, delay, max_attempts):
+ """ Deletes a VPN connection """
+ try:
+ connection.delete_vpn_connection(VpnConnectionId=vpn_connection_id)
+ connection.get_waiter('vpn_connection_deleted').wait(
+ VpnConnectionIds=[vpn_connection_id],
+ WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}
+ )
+ except WaiterError as e:
+ raise VPNConnectionException(msg="Failed to wait for VPN connection {0} to be removed".format(vpn_connection_id),
+ exception=e)
+ except (BotoCoreError, ClientError) as e:
+ raise VPNConnectionException(msg="Failed to delete the VPN connection: {0}".format(vpn_connection_id),
+ exception=e)
+
+
+def add_tags(connection, vpn_connection_id, add):
+ try:
+ connection.create_tags(Resources=[vpn_connection_id],
+ Tags=add)
+ except (BotoCoreError, ClientError) as e:
+ raise VPNConnectionException(msg="Failed to add the tags: {0}.".format(add),
+ exception=e)
+
+
+def remove_tags(connection, vpn_connection_id, remove):
+ # format tags since they are a list in the format ['tag1', 'tag2', 'tag3']
+ key_dict_list = [{'Key': tag} for tag in remove]
+ try:
+ connection.delete_tags(Resources=[vpn_connection_id],
+ Tags=key_dict_list)
+ except (BotoCoreError, ClientError) as e:
+ raise VPNConnectionException(msg="Failed to remove the tags: {0}.".format(remove),
+ exception=e)
+
+
+def check_for_update(connection, module_params, vpn_connection_id):
+ """ Determines if there are any tags or routes that need to be updated. Ensures non-modifiable attributes aren't expected to change. """
+ tags = module_params.get('tags')
+ routes = module_params.get('routes')
+ purge_tags = module_params.get('purge_tags')
+ purge_routes = module_params.get('purge_routes')
+
+ vpn_connection = find_connection(connection, module_params, vpn_connection_id=vpn_connection_id)
+ current_attrs = camel_dict_to_snake_dict(vpn_connection)
+
+ # Initialize changes dict
+ changes = {'tags_to_add': [],
+ 'tags_to_remove': [],
+ 'routes_to_add': [],
+ 'routes_to_remove': []}
+
+ # Get changes to tags
+ current_tags = boto3_tag_list_to_ansible_dict(current_attrs.get('tags', []), u'key', u'value')
+ tags_to_add, changes['tags_to_remove'] = compare_aws_tags(current_tags, tags, purge_tags)
+ changes['tags_to_add'] = ansible_dict_to_boto3_tag_list(tags_to_add)
+ # Get changes to routes
+ if 'Routes' in vpn_connection:
+ current_routes = [route['DestinationCidrBlock'] for route in vpn_connection['Routes']]
+ if purge_routes:
+ changes['routes_to_remove'] = [old_route for old_route in current_routes if old_route not in routes]
+ changes['routes_to_add'] = [new_route for new_route in routes if new_route not in current_routes]
+
+ # Check if nonmodifiable attributes are attempted to be modified
+ for attribute in current_attrs:
+ if attribute in ("tags", "routes", "state"):
+ continue
+ elif attribute == 'options':
+ will_be = module_params.get('static_only', None)
+ is_now = bool(current_attrs[attribute]['static_routes_only'])
+ attribute = 'static_only'
+ elif attribute == 'type':
+ will_be = module_params.get("connection_type", None)
+ is_now = current_attrs[attribute]
+ else:
+ is_now = current_attrs[attribute]
+ will_be = module_params.get(attribute, None)
+
+ if will_be is not None and to_text(will_be) != to_text(is_now):
+ raise VPNConnectionException(msg="You cannot modify {0}, the current value of which is {1}. Modifiable VPN "
+ "connection attributes are tags and routes. The value you tried to change it to "
+ "is {2}.".format(attribute, is_now, will_be))
+
+ return changes
+
+
+def make_changes(connection, vpn_connection_id, changes):
+ """ changes is a dict with the keys 'tags_to_add', 'tags_to_remove', 'routes_to_add', 'routes_to_remove',
+ the values of which are lists (generated by check_for_update()).
+ """
+ changed = False
+
+ if changes['tags_to_add']:
+ changed = True
+ add_tags(connection, vpn_connection_id, changes['tags_to_add'])
+
+ if changes['tags_to_remove']:
+ changed = True
+ remove_tags(connection, vpn_connection_id, changes['tags_to_remove'])
+
+ if changes['routes_to_add']:
+ changed = True
+ add_routes(connection, vpn_connection_id, changes['routes_to_add'])
+
+ if changes['routes_to_remove']:
+ changed = True
+ remove_routes(connection, vpn_connection_id, changes['routes_to_remove'])
+
+ return changed
+
+
+def get_check_mode_results(connection, module_params, vpn_connection_id=None, current_state=None):
+ """ Returns the changes that would be made to a VPN Connection """
+ state = module_params.get('state')
+ if state == 'absent':
+ if vpn_connection_id:
+ return True, {}
+ else:
+ return False, {}
+
+ changed = False
+ results = {'customer_gateway_configuration': '',
+ 'customer_gateway_id': module_params.get('customer_gateway_id'),
+ 'vpn_gateway_id': module_params.get('vpn_gateway_id'),
+ 'options': {'static_routes_only': module_params.get('static_only')},
+ 'routes': [module_params.get('routes')]}
+
+ # get combined current tags and tags to set
+ present_tags = module_params.get('tags')
+ if current_state and 'Tags' in current_state:
+ current_tags = boto3_tag_list_to_ansible_dict(current_state['Tags'])
+ if module_params.get('purge_tags'):
+ if current_tags != present_tags:
+ changed = True
+ elif current_tags != present_tags:
+ if not set(present_tags.keys()) < set(current_tags.keys()):
+ changed = True
+ # add preexisting tags that new tags didn't overwrite
+ present_tags.update((tag, current_tags[tag]) for tag in current_tags if tag not in present_tags)
+ elif current_tags.keys() == present_tags.keys() and set(present_tags.values()) != set(current_tags.values()):
+ changed = True
+ elif module_params.get('tags'):
+ changed = True
+ if present_tags:
+ results['tags'] = present_tags
+
+ # get combined current routes and routes to add
+ present_routes = module_params.get('routes')
+ if current_state and 'Routes' in current_state:
+ current_routes = [route['DestinationCidrBlock'] for route in current_state['Routes']]
+ if module_params.get('purge_routes'):
+ if set(current_routes) != set(present_routes):
+ changed = True
+ elif set(present_routes) != set(current_routes):
+ if not set(present_routes) < set(current_routes):
+ changed = True
+ present_routes.extend([route for route in current_routes if route not in present_routes])
+ elif module_params.get('routes'):
+ changed = True
+ results['routes'] = [{"destination_cidr_block": cidr, "state": "available"} for cidr in present_routes]
+
+ # return the vpn_connection_id if it's known
+ if vpn_connection_id:
+ results['vpn_connection_id'] = vpn_connection_id
+ else:
+ changed = True
+ results['vpn_connection_id'] = 'vpn-XXXXXXXX'
+
+ return changed, results
+
+
+def ensure_present(connection, module_params, check_mode=False):
+ """ Creates and adds tags to a VPN connection. If the connection already exists update tags. """
+ vpn_connection = find_connection(connection, module_params)
+ changed = False
+ delay = module_params.get('delay')
+ max_attempts = module_params.get('wait_timeout') // delay
+
+ # No match but vpn_connection_id was specified.
+ if not vpn_connection and module_params.get('vpn_connection_id'):
+ raise VPNConnectionException(msg="There is no VPN connection available or pending with that id. Did you delete it?")
+
+ # Unique match was found. Check if attributes provided differ.
+ elif vpn_connection:
+ vpn_connection_id = vpn_connection['VpnConnectionId']
+ # check_for_update returns a dict with the keys tags_to_add, tags_to_remove, routes_to_add, routes_to_remove
+ changes = check_for_update(connection, module_params, vpn_connection_id)
+ if check_mode:
+ return get_check_mode_results(connection, module_params, vpn_connection_id, current_state=vpn_connection)
+ changed = make_changes(connection, vpn_connection_id, changes)
+
+ # No match was found. Create and tag a connection and add routes.
+ else:
+ changed = True
+ if check_mode:
+ return get_check_mode_results(connection, module_params)
+ vpn_connection = create_connection(connection,
+ customer_gateway_id=module_params.get('customer_gateway_id'),
+ static_only=module_params.get('static_only'),
+ vpn_gateway_id=module_params.get('vpn_gateway_id'),
+ connection_type=module_params.get('connection_type'),
+ tunnel_options=module_params.get('tunnel_options'),
+ max_attempts=max_attempts,
+ delay=delay)
+ changes = check_for_update(connection, module_params, vpn_connection['VpnConnectionId'])
+ make_changes(connection, vpn_connection['VpnConnectionId'], changes)
+
+ # get latest version if a change has been made and make tags output nice before returning it
+ if vpn_connection:
+ vpn_connection = find_connection(connection, module_params, vpn_connection['VpnConnectionId'])
+ if 'Tags' in vpn_connection:
+ vpn_connection['Tags'] = boto3_tag_list_to_ansible_dict(vpn_connection['Tags'])
+
+ return changed, vpn_connection
+
+
+def ensure_absent(connection, module_params, check_mode=False):
+ """ Deletes a VPN connection if it exists. """
+ vpn_connection = find_connection(connection, module_params)
+
+ if check_mode:
+ return get_check_mode_results(connection, module_params, vpn_connection['VpnConnectionId'] if vpn_connection else None)
+
+ delay = module_params.get('delay')
+ max_attempts = module_params.get('wait_timeout') // delay
+
+ if vpn_connection:
+ delete_connection(connection, vpn_connection['VpnConnectionId'], delay=delay, max_attempts=max_attempts)
+ changed = True
+ else:
+ changed = False
+
+ return changed, {}
+
+
+def main():
+ argument_spec = dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ filters=dict(type='dict', default={}),
+ vpn_gateway_id=dict(type='str'),
+ tags=dict(default={}, type='dict'),
+ connection_type=dict(default='ipsec.1', type='str'),
+ tunnel_options=dict(no_log=True, type='list', default=[], elements='dict'),
+ static_only=dict(default=False, type='bool'),
+ customer_gateway_id=dict(type='str'),
+ vpn_connection_id=dict(type='str'),
+ purge_tags=dict(type='bool', default=False),
+ routes=dict(type='list', default=[], elements='str'),
+ purge_routes=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=600),
+ delay=dict(type='int', default=15),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+ connection = module.client('ec2')
+
+ state = module.params.get('state')
+ parameters = dict(module.params)
+
+ try:
+ if state == 'present':
+ changed, response = ensure_present(connection, parameters, module.check_mode)
+ elif state == 'absent':
+ changed, response = ensure_absent(connection, parameters, module.check_mode)
+ except VPNConnectionException as e:
+ if e.exception:
+ module.fail_json_aws(e.exception, msg=e.msg)
+ else:
+ module.fail_json(msg=e.msg)
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn_facts.py
new file mode 100644
index 00000000..e96583f6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn_facts.py
@@ -0,0 +1,217 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_vpc_vpn_info
+version_added: 1.0.0
+short_description: Gather information about VPN Connections in AWS.
+description:
+ - Gather information about VPN Connections in AWS.
+ - This module was called C(ec2_vpc_vpn_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+author: Madhura Naniwadekar (@Madhura-CSI)
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpnConnections.html) for possible filters.
+ required: false
+ type: dict
+ vpn_connection_ids:
+ description:
+ - Get details of a specific VPN connections using vpn connection ID/IDs. This value should be provided as a list.
+ required: false
+ type: list
+ elements: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# # Note: These examples do not set authentication details, see the AWS Guide for details.
+- name: Gather information about all vpn connections
+ community.aws.ec2_vpc_vpn_info:
+
+- name: Gather information about a filtered list of vpn connections, based on tags
+ community.aws.ec2_vpc_vpn_info:
+ filters:
+ "tag:Name": test-connection
+ register: vpn_conn_info
+
+- name: Gather information about vpn connections by specifying connection IDs.
+ community.aws.ec2_vpc_vpn_info:
+ filters:
+ vpn-gateway-id: vgw-cbe66beb
+ register: vpn_conn_info
+'''
+
+RETURN = r'''
+vpn_connections:
+ description: List of one or more VPN Connections.
+ returned: always
+ type: complex
+ contains:
+ category:
+ description: The category of the VPN connection.
+ returned: always
+ type: str
+ sample: VPN
+ customer_gatway_configuration:
+ description: The configuration information for the VPN connection's customer gateway (in the native XML format).
+ returned: always
+ type: str
+ customer_gateway_id:
+ description: The ID of the customer gateway at your end of the VPN connection.
+ returned: always
+ type: str
+ sample: cgw-17a53c37
+ options:
+ description: The VPN connection options.
+ returned: always
+ type: dict
+ sample: {
+ "static_routes_only": false
+ }
+ routes:
+ description: List of static routes associated with the VPN connection.
+ returned: always
+ type: complex
+ contains:
+ destination_cidr_block:
+ description: The CIDR block associated with the local subnet of the customer data center.
+ returned: always
+ type: str
+ sample: 10.0.0.0/16
+ state:
+ description: The current state of the static route.
+ returned: always
+ type: str
+ sample: available
+ state:
+ description: The current state of the VPN connection.
+ returned: always
+ type: str
+ sample: available
+ tags:
+ description: Any tags assigned to the VPN connection.
+ returned: always
+ type: dict
+ sample: {
+ "Name": "test-conn"
+ }
+ type:
+ description: The type of VPN connection.
+ returned: always
+ type: str
+ sample: ipsec.1
+ vgw_telemetry:
+ description: Information about the VPN tunnel.
+ returned: always
+ type: complex
+ contains:
+ accepted_route_count:
+ description: The number of accepted routes.
+ returned: always
+ type: int
+ sample: 0
+ last_status_change:
+ description: The date and time of the last change in status.
+ returned: always
+ type: str
+ sample: "2018-02-09T14:35:27+00:00"
+ outside_ip_address:
+ description: The Internet-routable IP address of the virtual private gateway's outside interface.
+ returned: always
+ type: str
+ sample: 13.127.79.191
+ status:
+ description: The status of the VPN tunnel.
+ returned: always
+ type: str
+ sample: DOWN
+ status_message:
+ description: If an error occurs, a description of the error.
+ returned: always
+ type: str
+ sample: IPSEC IS DOWN
+ certificate_arn:
+ description: The Amazon Resource Name of the virtual private gateway tunnel endpoint certificate.
+ returned: when a private certificate is used for authentication
+ type: str
+ sample: "arn:aws:acm:us-east-1:123456789101:certificate/c544d8ce-20b8-4fff-98b0-example"
+ vpn_connection_id:
+ description: The ID of the VPN connection.
+ returned: always
+ type: str
+ sample: vpn-f700d5c0
+ vpn_gateway_id:
+ description: The ID of the virtual private gateway at the AWS side of the VPN connection.
+ returned: always
+ type: str
+ sample: vgw-cbe56bfb
+'''
+
+import json
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
+ boto3_tag_list_to_ansible_dict,
+ camel_dict_to_snake_dict,
+ )
+
+
+def date_handler(obj):
+ return obj.isoformat() if hasattr(obj, 'isoformat') else obj
+
+
+def list_vpn_connections(connection, module):
+ params = dict()
+
+ params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ params['VpnConnectionIds'] = module.params.get('vpn_connection_ids')
+
+ try:
+ result = json.loads(json.dumps(connection.describe_vpn_connections(**params), default=date_handler))
+ except ValueError as e:
+ module.fail_json_aws(e, msg="Cannot validate JSON data")
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Could not describe customer gateways")
+ snaked_vpn_connections = [camel_dict_to_snake_dict(vpn_connection) for vpn_connection in result['VpnConnections']]
+ if snaked_vpn_connections:
+ for vpn_connection in snaked_vpn_connections:
+ vpn_connection['tags'] = boto3_tag_list_to_ansible_dict(vpn_connection.get('tags', []))
+ module.exit_json(changed=False, vpn_connections=snaked_vpn_connections)
+
+
+def main():
+
+ argument_spec = dict(
+ vpn_connection_ids=dict(default=[], type='list', elements='str'),
+ filters=dict(default={}, type='dict')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ mutually_exclusive=[['vpn_connection_ids', 'filters']],
+ supports_check_mode=True)
+ if module._module._name == 'ec2_vpc_vpn_facts':
+ module._module.deprecate("The 'ec2_vpc_vpn_facts' module has been renamed to 'ec2_vpc_vpn_info'", date='2021-12-01', collection_name='community.aws')
+
+ connection = module.client('ec2')
+
+ list_vpn_connections(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn_info.py
new file mode 100644
index 00000000..e96583f6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn_info.py
@@ -0,0 +1,217 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_vpc_vpn_info
+version_added: 1.0.0
+short_description: Gather information about VPN Connections in AWS.
+description:
+ - Gather information about VPN Connections in AWS.
+ - This module was called C(ec2_vpc_vpn_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+author: Madhura Naniwadekar (@Madhura-CSI)
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpnConnections.html) for possible filters.
+ required: false
+ type: dict
+ vpn_connection_ids:
+ description:
+ - Get details of a specific VPN connections using vpn connection ID/IDs. This value should be provided as a list.
+ required: false
+ type: list
+ elements: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# # Note: These examples do not set authentication details, see the AWS Guide for details.
+- name: Gather information about all vpn connections
+ community.aws.ec2_vpc_vpn_info:
+
+- name: Gather information about a filtered list of vpn connections, based on tags
+ community.aws.ec2_vpc_vpn_info:
+ filters:
+ "tag:Name": test-connection
+ register: vpn_conn_info
+
+- name: Gather information about vpn connections by specifying connection IDs.
+ community.aws.ec2_vpc_vpn_info:
+ filters:
+ vpn-gateway-id: vgw-cbe66beb
+ register: vpn_conn_info
+'''
+
+RETURN = r'''
+vpn_connections:
+ description: List of one or more VPN Connections.
+ returned: always
+ type: complex
+ contains:
+ category:
+ description: The category of the VPN connection.
+ returned: always
+ type: str
+ sample: VPN
+ customer_gatway_configuration:
+ description: The configuration information for the VPN connection's customer gateway (in the native XML format).
+ returned: always
+ type: str
+ customer_gateway_id:
+ description: The ID of the customer gateway at your end of the VPN connection.
+ returned: always
+ type: str
+ sample: cgw-17a53c37
+ options:
+ description: The VPN connection options.
+ returned: always
+ type: dict
+ sample: {
+ "static_routes_only": false
+ }
+ routes:
+ description: List of static routes associated with the VPN connection.
+ returned: always
+ type: complex
+ contains:
+ destination_cidr_block:
+ description: The CIDR block associated with the local subnet of the customer data center.
+ returned: always
+ type: str
+ sample: 10.0.0.0/16
+ state:
+ description: The current state of the static route.
+ returned: always
+ type: str
+ sample: available
+ state:
+ description: The current state of the VPN connection.
+ returned: always
+ type: str
+ sample: available
+ tags:
+ description: Any tags assigned to the VPN connection.
+ returned: always
+ type: dict
+ sample: {
+ "Name": "test-conn"
+ }
+ type:
+ description: The type of VPN connection.
+ returned: always
+ type: str
+ sample: ipsec.1
+ vgw_telemetry:
+ description: Information about the VPN tunnel.
+ returned: always
+ type: complex
+ contains:
+ accepted_route_count:
+ description: The number of accepted routes.
+ returned: always
+ type: int
+ sample: 0
+ last_status_change:
+ description: The date and time of the last change in status.
+ returned: always
+ type: str
+ sample: "2018-02-09T14:35:27+00:00"
+ outside_ip_address:
+ description: The Internet-routable IP address of the virtual private gateway's outside interface.
+ returned: always
+ type: str
+ sample: 13.127.79.191
+ status:
+ description: The status of the VPN tunnel.
+ returned: always
+ type: str
+ sample: DOWN
+ status_message:
+ description: If an error occurs, a description of the error.
+ returned: always
+ type: str
+ sample: IPSEC IS DOWN
+ certificate_arn:
+ description: The Amazon Resource Name of the virtual private gateway tunnel endpoint certificate.
+ returned: when a private certificate is used for authentication
+ type: str
+ sample: "arn:aws:acm:us-east-1:123456789101:certificate/c544d8ce-20b8-4fff-98b0-example"
+ vpn_connection_id:
+ description: The ID of the VPN connection.
+ returned: always
+ type: str
+ sample: vpn-f700d5c0
+ vpn_gateway_id:
+ description: The ID of the virtual private gateway at the AWS side of the VPN connection.
+ returned: always
+ type: str
+ sample: vgw-cbe56bfb
+'''
+
+import json
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
+ boto3_tag_list_to_ansible_dict,
+ camel_dict_to_snake_dict,
+ )
+
+
+def date_handler(obj):
+ return obj.isoformat() if hasattr(obj, 'isoformat') else obj
+
+
+def list_vpn_connections(connection, module):
+ params = dict()
+
+ params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ params['VpnConnectionIds'] = module.params.get('vpn_connection_ids')
+
+ try:
+ result = json.loads(json.dumps(connection.describe_vpn_connections(**params), default=date_handler))
+ except ValueError as e:
+ module.fail_json_aws(e, msg="Cannot validate JSON data")
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Could not describe customer gateways")
+ snaked_vpn_connections = [camel_dict_to_snake_dict(vpn_connection) for vpn_connection in result['VpnConnections']]
+ if snaked_vpn_connections:
+ for vpn_connection in snaked_vpn_connections:
+ vpn_connection['tags'] = boto3_tag_list_to_ansible_dict(vpn_connection.get('tags', []))
+ module.exit_json(changed=False, vpn_connections=snaked_vpn_connections)
+
+
+def main():
+
+ argument_spec = dict(
+ vpn_connection_ids=dict(default=[], type='list', elements='str'),
+ filters=dict(default={}, type='dict')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ mutually_exclusive=[['vpn_connection_ids', 'filters']],
+ supports_check_mode=True)
+ if module._module._name == 'ec2_vpc_vpn_facts':
+ module._module.deprecate("The 'ec2_vpc_vpn_facts' module has been renamed to 'ec2_vpc_vpn_info'", date='2021-12-01', collection_name='community.aws')
+
+ connection = module.client('ec2')
+
+ list_vpn_connections(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_win_password.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_win_password.py
new file mode 100644
index 00000000..ed06f705
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ec2_win_password.py
@@ -0,0 +1,212 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_win_password
+version_added: 1.0.0
+short_description: Gets the default administrator password for ec2 windows instances
+description:
+ - Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. C(i-XXXXXXX)).
+ - This module has a dependency on python-boto.
+author: "Rick Mendes (@rickmendes)"
+options:
+ instance_id:
+ description:
+ - The instance id to get the password data from.
+ required: true
+ type: str
+ key_file:
+ description:
+ - Path to the file containing the key pair used on the instance.
+ - Conflicts with I(key_data).
+ required: false
+ type: path
+ key_data:
+ description:
+ - The private key (usually stored in vault).
+ - Conflicts with I(key_file),
+ required: false
+ type: str
+ key_passphrase:
+ description:
+ - The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to
+ convert your password protected keys if they do not use DES or 3DES. ex) C(openssl rsa -in current_key -out new_key -des3).
+ type: str
+ wait:
+ description:
+ - Whether or not to wait for the password to be available before returning.
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - Number of seconds to wait before giving up.
+ default: 120
+ type: int
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+
+requirements:
+ - cryptography
+
+notes:
+ - As of Ansible 2.4, this module requires the python cryptography module rather than the
+ older pycrypto module.
+'''
+
+EXAMPLES = '''
+# Example of getting a password
+- name: get the Administrator password
+ community.aws.ec2_win_password:
+ profile: my-boto-profile
+ instance_id: i-XXXXXX
+ region: us-east-1
+ key_file: "~/aws-creds/my_test_key.pem"
+
+# Example of getting a password using a variable
+- name: get the Administrator password
+ community.aws.ec2_win_password:
+ profile: my-boto-profile
+ instance_id: i-XXXXXX
+ region: us-east-1
+ key_data: "{{ ec2_private_key }}"
+
+# Example of getting a password with a password protected key
+- name: get the Administrator password
+ community.aws.ec2_win_password:
+ profile: my-boto-profile
+ instance_id: i-XXXXXX
+ region: us-east-1
+ key_file: "~/aws-creds/my_protected_test_key.pem"
+ key_passphrase: "secret"
+
+# Example of waiting for a password
+- name: get the Administrator password
+ community.aws.ec2_win_password:
+ profile: my-boto-profile
+ instance_id: i-XXXXXX
+ region: us-east-1
+ key_file: "~/aws-creds/my_test_key.pem"
+ wait: yes
+ wait_timeout: 45
+'''
+
+import datetime
+import time
+from base64 import b64decode
+
+try:
+ from cryptography.hazmat.backends import default_backend
+ from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
+ from cryptography.hazmat.primitives.serialization import load_pem_private_key
+ HAS_CRYPTOGRAPHY = True
+except ImportError:
+ HAS_CRYPTOGRAPHY = False
+
+from ansible.module_utils._text import to_bytes
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ec2_connect
+
+
+def setup_module_object():
+ argument_spec = dict(
+ instance_id=dict(required=True),
+ key_file=dict(required=False, default=None, type='path'),
+ key_passphrase=dict(no_log=True, default=None, required=False),
+ key_data=dict(no_log=True, default=None, required=False),
+ wait=dict(type='bool', default=False, required=False),
+ wait_timeout=dict(default=120, required=False, type='int'),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+ return module
+
+
+def ec2_win_password(module):
+ instance_id = module.params.get('instance_id')
+ key_file = module.params.get('key_file')
+ if module.params.get('key_passphrase') is None:
+ b_key_passphrase = None
+ else:
+ b_key_passphrase = to_bytes(module.params.get('key_passphrase'), errors='surrogate_or_strict')
+ if module.params.get('key_data') is None:
+ b_key_data = None
+ else:
+ b_key_data = to_bytes(module.params.get('key_data'), errors='surrogate_or_strict')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ ec2 = ec2_connect(module)
+
+ if wait:
+ start = datetime.datetime.now()
+ end = start + datetime.timedelta(seconds=wait_timeout)
+
+ while datetime.datetime.now() < end:
+ data = ec2.get_password_data(instance_id)
+ decoded = b64decode(data)
+ if not decoded:
+ time.sleep(5)
+ else:
+ break
+ else:
+ data = ec2.get_password_data(instance_id)
+ decoded = b64decode(data)
+
+ if wait and datetime.datetime.now() >= end:
+ module.fail_json(msg="wait for password timeout after %d seconds" % wait_timeout)
+
+ if key_file is not None and b_key_data is None:
+ try:
+ with open(key_file, 'rb') as f:
+ key = load_pem_private_key(f.read(), b_key_passphrase, default_backend())
+ except IOError as e:
+ # Handle bad files
+ module.fail_json(msg="I/O error (%d) opening key file: %s" % (e.errno, e.strerror))
+ except (ValueError, TypeError) as e:
+ # Handle issues loading key
+ module.fail_json(msg="unable to parse key file")
+ elif b_key_data is not None and key_file is None:
+ try:
+ key = load_pem_private_key(b_key_data, b_key_passphrase, default_backend())
+ except (ValueError, TypeError) as e:
+ module.fail_json(msg="unable to parse key data")
+
+ try:
+ decrypted = key.decrypt(decoded, PKCS1v15())
+ except ValueError as e:
+ decrypted = None
+
+ if decrypted is None:
+ module.fail_json(msg="unable to decrypt password", win_password='', changed=False)
+ else:
+ if wait:
+ elapsed = datetime.datetime.now() - start
+ module.exit_json(win_password=decrypted, changed=False, elapsed=elapsed.seconds)
+ else:
+ module.exit_json(win_password=decrypted, changed=False)
+
+
+def main():
+ module = setup_module_object()
+
+ if not HAS_BOTO:
+ module.fail_json(msg='Boto required for this module.')
+
+ if not HAS_CRYPTOGRAPHY:
+ module.fail_json(msg='cryptography package required for this module.')
+
+ ec2_win_password(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_attribute.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_attribute.py
new file mode 100644
index 00000000..552747ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_attribute.py
@@ -0,0 +1,303 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ecs_attribute
+version_added: 1.0.0
+short_description: manage ecs attributes
+description:
+ - Create, update or delete ECS container instance attributes.
+author: Andrej Svenke (@anryko)
+requirements: [ botocore, boto3 ]
+options:
+ cluster:
+ description:
+ - The short name or full Amazon Resource Name (ARN) of the cluster
+ that contains the resource to apply attributes.
+ required: true
+ type: str
+ state:
+ description:
+ - The desired state of the attributes.
+ required: false
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ attributes:
+ description:
+ - List of attributes.
+ required: true
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the attribute. Up to 128 letters (uppercase and lowercase),
+ numbers, hyphens, underscores, and periods are allowed.
+ required: true
+ type: str
+ value:
+ description:
+ - The value of the attribute. Up to 128 letters (uppercase and lowercase),
+ numbers, hyphens, underscores, periods, at signs (@), forward slashes, colons,
+ and spaces are allowed.
+ required: false
+ type: str
+ ec2_instance_id:
+ description:
+ - EC2 instance ID of ECS cluster container instance.
+ required: true
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Set attributes
+ community.aws.ecs_attribute:
+ state: present
+ cluster: test-cluster
+ ec2_instance_id: "{{ ec2_id }}"
+ attributes:
+ - flavor: test
+ - migrated
+ delegate_to: localhost
+
+- name: Delete attributes
+ community.aws.ecs_attribute:
+ state: absent
+ cluster: test-cluster
+ ec2_instance_id: "{{ ec2_id }}"
+ attributes:
+ - flavor: test
+ - migrated
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+attributes:
+ description: attributes
+ type: complex
+ returned: always
+ contains:
+ cluster:
+ description: cluster name
+ type: str
+ ec2_instance_id:
+ description: ec2 instance id of ecs container instance
+ type: str
+ attributes:
+ description: list of attributes
+ type: list
+ elements: dict
+ contains:
+ name:
+ description: name of the attribute
+ type: str
+ value:
+ description: value of the attribute
+ returned: if present
+ type: str
+'''
+
+try:
+ import boto3
+ import botocore
+ from botocore.exceptions import ClientError, EndpointConnectionError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+class EcsAttributes(object):
+ """Handles ECS Cluster Attribute"""
+
+ def __init__(self, module, attributes):
+ self.module = module
+ self.attributes = attributes if self._validate_attrs(attributes) else self._parse_attrs(attributes)
+
+ def __bool__(self):
+ return bool(self.attributes)
+
+ __nonzero__ = __bool__
+
+ def __iter__(self):
+ return iter(self.attributes)
+
+ @staticmethod
+ def _validate_attrs(attrs):
+ return all(tuple(attr.keys()) in (('name', 'value'), ('value', 'name')) for attr in attrs)
+
+ def _parse_attrs(self, attrs):
+ attrs_parsed = []
+ for attr in attrs:
+ if isinstance(attr, dict):
+ if len(attr) != 1:
+ self.module.fail_json(msg="Incorrect attribute format - %s" % str(attr))
+ name, value = list(attr.items())[0]
+ attrs_parsed.append({'name': name, 'value': value})
+ elif isinstance(attr, str):
+ attrs_parsed.append({'name': attr, 'value': None})
+ else:
+ self.module.fail_json(msg="Incorrect attributes format - %s" % str(attrs))
+
+ return attrs_parsed
+
+ def _setup_attr_obj(self, ecs_arn, name, value=None, skip_value=False):
+ attr_obj = {'targetType': 'container-instance',
+ 'targetId': ecs_arn,
+ 'name': name}
+ if not skip_value and value is not None:
+ attr_obj['value'] = value
+
+ return attr_obj
+
+ def get_for_ecs_arn(self, ecs_arn, skip_value=False):
+ """
+ Returns list of attribute dicts ready to be passed to boto3
+ attributes put/delete methods.
+ """
+ return [self._setup_attr_obj(ecs_arn, skip_value=skip_value, **attr) for attr in self.attributes]
+
+ def diff(self, attrs):
+ """
+ Returns EcsAttributes Object containing attributes which are present
+ in self but are absent in passed attrs (EcsAttributes Object).
+ """
+ attrs_diff = [attr for attr in self.attributes if attr not in attrs]
+ return EcsAttributes(self.module, attrs_diff)
+
+
+class Ec2EcsInstance(object):
+ """Handle ECS Cluster Remote Operations"""
+
+ def __init__(self, module, cluster, ec2_id):
+ self.module = module
+ self.cluster = cluster
+ self.ec2_id = ec2_id
+
+ try:
+ self.ecs = module.client('ecs')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ self.ecs_arn = self._get_ecs_arn()
+
+ def _get_ecs_arn(self):
+ try:
+ ecs_instances_arns = self.ecs.list_container_instances(cluster=self.cluster)['containerInstanceArns']
+ ec2_instances = self.ecs.describe_container_instances(cluster=self.cluster,
+ containerInstances=ecs_instances_arns)['containerInstances']
+ except (ClientError, EndpointConnectionError) as e:
+ self.module.fail_json(msg="Can't connect to the cluster - %s" % str(e))
+
+ try:
+ ecs_arn = next(inst for inst in ec2_instances
+ if inst['ec2InstanceId'] == self.ec2_id)['containerInstanceArn']
+ except StopIteration:
+ self.module.fail_json(msg="EC2 instance Id not found in ECS cluster - %s" % str(self.cluster))
+
+ return ecs_arn
+
+ def attrs_put(self, attrs):
+ """Puts attributes on ECS container instance"""
+ try:
+ self.ecs.put_attributes(cluster=self.cluster,
+ attributes=attrs.get_for_ecs_arn(self.ecs_arn))
+ except ClientError as e:
+ self.module.fail_json(msg=str(e))
+
+ def attrs_delete(self, attrs):
+ """Deletes attributes from ECS container instance."""
+ try:
+ self.ecs.delete_attributes(cluster=self.cluster,
+ attributes=attrs.get_for_ecs_arn(self.ecs_arn, skip_value=True))
+ except ClientError as e:
+ self.module.fail_json(msg=str(e))
+
+ def attrs_get_by_name(self, attrs):
+ """
+ Returns EcsAttributes object containing attributes from ECS container instance with names
+ matching to attrs.attributes (EcsAttributes Object).
+ """
+ attr_objs = [{'targetType': 'container-instance', 'attributeName': attr['name']}
+ for attr in attrs]
+
+ try:
+ matched_ecs_targets = [attr_found for attr_obj in attr_objs
+ for attr_found in self.ecs.list_attributes(cluster=self.cluster, **attr_obj)['attributes']]
+ except ClientError as e:
+ self.module.fail_json(msg="Can't connect to the cluster - %s" % str(e))
+
+ matched_objs = [target for target in matched_ecs_targets
+ if target['targetId'] == self.ecs_arn]
+
+ results = [{'name': match['name'], 'value': match.get('value', None)}
+ for match in matched_objs]
+
+ return EcsAttributes(self.module, results)
+
+
+def main():
+ argument_spec = dict(
+ state=dict(required=False, default='present', choices=['present', 'absent']),
+ cluster=dict(required=True, type='str'),
+ ec2_instance_id=dict(required=True, type='str'),
+ attributes=dict(required=True, type='list', elements='dict'),
+ )
+
+ required_together = [['cluster', 'ec2_instance_id', 'attributes']]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_together=required_together,
+ )
+
+ cluster = module.params['cluster']
+ ec2_instance_id = module.params['ec2_instance_id']
+ attributes = module.params['attributes']
+
+ conti = Ec2EcsInstance(module, cluster, ec2_instance_id)
+ attrs = EcsAttributes(module, attributes)
+
+ results = {'changed': False,
+ 'attributes': [
+ {'cluster': cluster,
+ 'ec2_instance_id': ec2_instance_id,
+ 'attributes': attributes}
+ ]}
+
+ attrs_present = conti.attrs_get_by_name(attrs)
+
+ if module.params['state'] == 'present':
+ attrs_diff = attrs.diff(attrs_present)
+ if not attrs_diff:
+ module.exit_json(**results)
+
+ conti.attrs_put(attrs_diff)
+ results['changed'] = True
+
+ elif module.params['state'] == 'absent':
+ if not attrs_present:
+ module.exit_json(**results)
+
+ conti.attrs_delete(attrs_present)
+ results['changed'] = True
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_cluster.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_cluster.py
new file mode 100644
index 00000000..ed0dc1c7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_cluster.py
@@ -0,0 +1,227 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ecs_cluster
+version_added: 1.0.0
+short_description: Create or terminate ECS clusters.
+notes:
+ - When deleting a cluster, the information returned is the state of the cluster prior to deletion.
+ - It will also wait for a cluster to have instances registered to it.
+description:
+ - Creates or terminates ecs clusters.
+author: Mark Chance (@Java1Guy)
+requirements: [ boto3 ]
+options:
+ state:
+ description:
+ - The desired state of the cluster.
+ required: true
+ choices: ['present', 'absent', 'has_instances']
+ type: str
+ name:
+ description:
+ - The cluster name.
+ required: true
+ type: str
+ delay:
+ description:
+ - Number of seconds to wait.
+ required: false
+ type: int
+ default: 10
+ repeat:
+ description:
+ - The number of times to wait for the cluster to have an instance.
+ required: false
+ type: int
+ default: 10
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Cluster creation
+ community.aws.ecs_cluster:
+ name: default
+ state: present
+
+- name: Cluster deletion
+ community.aws.ecs_cluster:
+ name: default
+ state: absent
+
+- name: Wait for register
+ community.aws.ecs_cluster:
+ name: "{{ new_cluster }}"
+ state: has_instances
+ delay: 10
+ repeat: 10
+ register: task_output
+
+'''
+RETURN = '''
+activeServicesCount:
+ description: how many services are active in this cluster
+ returned: 0 if a new cluster
+ type: int
+clusterArn:
+ description: the ARN of the cluster just created
+ type: str
+ returned: 0 if a new cluster
+ sample: arn:aws:ecs:us-west-2:172139249013:cluster/test-cluster-mfshcdok
+clusterName:
+ description: name of the cluster just created (should match the input argument)
+ type: str
+ returned: always
+ sample: test-cluster-mfshcdok
+pendingTasksCount:
+ description: how many tasks are waiting to run in this cluster
+ returned: 0 if a new cluster
+ type: int
+registeredContainerInstancesCount:
+ description: how many container instances are available in this cluster
+ returned: 0 if a new cluster
+ type: int
+runningTasksCount:
+ description: how many tasks are running in this cluster
+ returned: 0 if a new cluster
+ type: int
+status:
+ description: the status of the new cluster
+ returned: always
+ type: str
+ sample: ACTIVE
+'''
+import time
+
+try:
+ import boto3
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+class EcsClusterManager:
+ """Handles ECS Clusters"""
+
+ def __init__(self, module):
+ self.module = module
+ try:
+ self.ecs = module.client('ecs')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ def find_in_array(self, array_of_clusters, cluster_name, field_name='clusterArn'):
+ for c in array_of_clusters:
+ if c[field_name].endswith(cluster_name):
+ return c
+ return None
+
+ def describe_cluster(self, cluster_name):
+ response = self.ecs.describe_clusters(clusters=[
+ cluster_name
+ ])
+ if len(response['failures']) > 0:
+ c = self.find_in_array(response['failures'], cluster_name, 'arn')
+ if c and c['reason'] == 'MISSING':
+ return None
+ # fall thru and look through found ones
+ if len(response['clusters']) > 0:
+ c = self.find_in_array(response['clusters'], cluster_name)
+ if c:
+ return c
+ raise Exception("Unknown problem describing cluster %s." % cluster_name)
+
+ def create_cluster(self, clusterName='default'):
+ response = self.ecs.create_cluster(clusterName=clusterName)
+ return response['cluster']
+
+ def delete_cluster(self, clusterName):
+ return self.ecs.delete_cluster(cluster=clusterName)
+
+
+def main():
+
+ argument_spec = dict(
+ state=dict(required=True, choices=['present', 'absent', 'has_instances']),
+ name=dict(required=True, type='str'),
+ delay=dict(required=False, type='int', default=10),
+ repeat=dict(required=False, type='int', default=10)
+ )
+ required_together = [['state', 'name']]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_together=required_together,
+ )
+
+ cluster_mgr = EcsClusterManager(module)
+ try:
+ existing = cluster_mgr.describe_cluster(module.params['name'])
+ except Exception as e:
+ module.fail_json(msg="Exception describing cluster '" + module.params['name'] + "': " + str(e))
+
+ results = dict(changed=False)
+ if module.params['state'] == 'present':
+ if existing and 'status' in existing and existing['status'] == "ACTIVE":
+ results['cluster'] = existing
+ else:
+ if not module.check_mode:
+ # doesn't exist. create it.
+ results['cluster'] = cluster_mgr.create_cluster(module.params['name'])
+ results['changed'] = True
+
+ # delete the cluster
+ elif module.params['state'] == 'absent':
+ if not existing:
+ pass
+ else:
+ # it exists, so we should delete it and mark changed.
+ # return info about the cluster deleted
+ results['cluster'] = existing
+ if 'status' in existing and existing['status'] == "INACTIVE":
+ results['changed'] = False
+ else:
+ if not module.check_mode:
+ cluster_mgr.delete_cluster(module.params['name'])
+ results['changed'] = True
+ elif module.params['state'] == 'has_instances':
+ if not existing:
+ module.fail_json(msg="Cluster '" + module.params['name'] + " not found.")
+ return
+ # it exists, so we should delete it and mark changed.
+ # return info about the cluster deleted
+ delay = module.params['delay']
+ repeat = module.params['repeat']
+ time.sleep(delay)
+ count = 0
+ for i in range(repeat):
+ existing = cluster_mgr.describe_cluster(module.params['name'])
+ count = existing['registeredContainerInstancesCount']
+ if count > 0:
+ results['changed'] = True
+ break
+ time.sleep(delay)
+ if count == 0 and i is repeat - 1:
+ module.fail_json(msg="Cluster instance count still zero after " + str(repeat) + " tries of " + str(delay) + " seconds each.")
+ return
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_ecr.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_ecr.py
new file mode 100644
index 00000000..4ae7d40c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_ecr.py
@@ -0,0 +1,568 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ecs_ecr
+version_added: 1.0.0
+short_description: Manage Elastic Container Registry repositories
+description:
+ - Manage Elastic Container Registry repositories.
+requirements: [ boto3 ]
+options:
+ name:
+ description:
+ - The name of the repository.
+ required: true
+ type: str
+ registry_id:
+ description:
+ - AWS account id associated with the registry.
+ - If not specified, the default registry is assumed.
+ required: false
+ type: str
+ policy:
+ description:
+ - JSON or dict that represents the new policy.
+ required: false
+ type: json
+ force_set_policy:
+ description:
+ - If I(force_set_policy=false), it prevents setting a policy that would prevent you from
+ setting another policy in the future.
+ required: false
+ default: false
+ type: bool
+ purge_policy:
+ description:
+ - If yes, remove the policy from the repository.
+ - Alias C(delete_policy) has been deprecated and will be removed after 2022-06-01.
+ - Defaults to C(false).
+ required: false
+ type: bool
+ aliases: [ delete_policy ]
+ image_tag_mutability:
+ description:
+ - Configure whether repository should be mutable (ie. an already existing tag can be overwritten) or not.
+ required: false
+ choices: [mutable, immutable]
+ default: 'mutable'
+ type: str
+ lifecycle_policy:
+ description:
+ - JSON or dict that represents the new lifecycle policy.
+ required: false
+ type: json
+ purge_lifecycle_policy:
+ description:
+ - if C(true), remove the lifecycle policy from the repository.
+ - Defaults to C(false).
+ required: false
+ type: bool
+ state:
+ description:
+ - Create or destroy the repository.
+ required: false
+ choices: [present, absent]
+ default: 'present'
+ type: str
+ scan_on_push:
+ description:
+ - if C(true), images are scanned for known vulnerabilities after being pushed to the repository.
+ - I(scan_on_push) requires botocore >= 1.13.3
+ required: false
+ default: false
+ type: bool
+ version_added: 1.3.0
+author:
+ - David M. Lee (@leedm777)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# If the repository does not exist, it is created. If it does exist, would not
+# affect any policies already on it.
+- name: ecr-repo
+ community.aws.ecs_ecr:
+ name: super/cool
+
+- name: destroy-ecr-repo
+ community.aws.ecs_ecr:
+ name: old/busted
+ state: absent
+
+- name: Cross account ecr-repo
+ community.aws.ecs_ecr:
+ registry_id: 999999999999
+ name: cross/account
+
+- name: set-policy as object
+ community.aws.ecs_ecr:
+ name: needs-policy-object
+ policy:
+ Version: '2008-10-17'
+ Statement:
+ - Sid: read-only
+ Effect: Allow
+ Principal:
+ AWS: '{{ read_only_arn }}'
+ Action:
+ - ecr:GetDownloadUrlForLayer
+ - ecr:BatchGetImage
+ - ecr:BatchCheckLayerAvailability
+
+- name: set-policy as string
+ community.aws.ecs_ecr:
+ name: needs-policy-string
+ policy: "{{ lookup('template', 'policy.json.j2') }}"
+
+- name: delete-policy
+ community.aws.ecs_ecr:
+ name: needs-no-policy
+ purge_policy: yes
+
+- name: create immutable ecr-repo
+ community.aws.ecs_ecr:
+ name: super/cool
+ image_tag_mutability: immutable
+
+- name: set-lifecycle-policy
+ community.aws.ecs_ecr:
+ name: needs-lifecycle-policy
+ scan_on_push: yes
+ lifecycle_policy:
+ rules:
+ - rulePriority: 1
+ description: new policy
+ selection:
+ tagStatus: untagged
+ countType: sinceImagePushed
+ countUnit: days
+ countNumber: 365
+ action:
+ type: expire
+
+- name: purge-lifecycle-policy
+ community.aws.ecs_ecr:
+ name: needs-no-lifecycle-policy
+ purge_lifecycle_policy: true
+'''
+
+RETURN = '''
+state:
+ type: str
+ description: The asserted state of the repository (present, absent)
+ returned: always
+created:
+ type: bool
+ description: If true, the repository was created
+ returned: always
+name:
+ type: str
+ description: The name of the repository
+ returned: "when state == 'absent'"
+repository:
+ type: dict
+ description: The created or updated repository
+ returned: "when state == 'present'"
+ sample:
+ createdAt: '2017-01-17T08:41:32-06:00'
+ registryId: '999999999999'
+ repositoryArn: arn:aws:ecr:us-east-1:999999999999:repository/ecr-test-1484664090
+ repositoryName: ecr-test-1484664090
+ repositoryUri: 999999999999.dkr.ecr.us-east-1.amazonaws.com/ecr-test-1484664090
+'''
+
+import json
+import traceback
+
+try:
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto_exception, compare_policies, sort_json_policy_dict
+from ansible.module_utils.six import string_types
+
+
+def build_kwargs(registry_id):
+ """
+ Builds a kwargs dict which may contain the optional registryId.
+
+ :param registry_id: Optional string containing the registryId.
+ :return: kwargs dict with registryId, if given
+ """
+ if not registry_id:
+ return dict()
+ else:
+ return dict(registryId=registry_id)
+
+
+class EcsEcr:
+ def __init__(self, module):
+ self.ecr = module.client('ecr')
+ self.sts = module.client('sts')
+ self.check_mode = module.check_mode
+ self.changed = False
+ self.skipped = False
+
+ def get_repository(self, registry_id, name):
+ try:
+ res = self.ecr.describe_repositories(
+ repositoryNames=[name], **build_kwargs(registry_id))
+ repos = res.get('repositories')
+ return repos and repos[0]
+ except ClientError as err:
+ code = err.response['Error'].get('Code', 'Unknown')
+ if code == 'RepositoryNotFoundException':
+ return None
+ raise
+
+ def get_repository_policy(self, registry_id, name):
+ try:
+ res = self.ecr.get_repository_policy(
+ repositoryName=name, **build_kwargs(registry_id))
+ text = res.get('policyText')
+ return text and json.loads(text)
+ except ClientError as err:
+ code = err.response['Error'].get('Code', 'Unknown')
+ if code == 'RepositoryPolicyNotFoundException':
+ return None
+ raise
+
+ def create_repository(self, registry_id, name, image_tag_mutability):
+ if registry_id:
+ default_registry_id = self.sts.get_caller_identity().get('Account')
+ if registry_id != default_registry_id:
+ raise Exception('Cannot create repository in registry {0}.'
+ 'Would be created in {1} instead.'.format(registry_id, default_registry_id))
+
+ if not self.check_mode:
+ repo = self.ecr.create_repository(
+ repositoryName=name,
+ imageTagMutability=image_tag_mutability).get('repository')
+ self.changed = True
+ return repo
+ else:
+ self.skipped = True
+ return dict(repositoryName=name)
+
+ def set_repository_policy(self, registry_id, name, policy_text, force):
+ if not self.check_mode:
+ policy = self.ecr.set_repository_policy(
+ repositoryName=name,
+ policyText=policy_text,
+ force=force,
+ **build_kwargs(registry_id))
+ self.changed = True
+ return policy
+ else:
+ self.skipped = True
+ if self.get_repository(registry_id, name) is None:
+ printable = name
+ if registry_id:
+ printable = '{0}:{1}'.format(registry_id, name)
+ raise Exception(
+ 'could not find repository {0}'.format(printable))
+ return
+
+ def delete_repository(self, registry_id, name):
+ if not self.check_mode:
+ repo = self.ecr.delete_repository(
+ repositoryName=name, **build_kwargs(registry_id))
+ self.changed = True
+ return repo
+ else:
+ repo = self.get_repository(registry_id, name)
+ if repo:
+ self.skipped = True
+ return repo
+ return None
+
+ def delete_repository_policy(self, registry_id, name):
+ if not self.check_mode:
+ policy = self.ecr.delete_repository_policy(
+ repositoryName=name, **build_kwargs(registry_id))
+ self.changed = True
+ return policy
+ else:
+ policy = self.get_repository_policy(registry_id, name)
+ if policy:
+ self.skipped = True
+ return policy
+ return None
+
+ def put_image_tag_mutability(self, registry_id, name, new_mutability_configuration):
+ repo = self.get_repository(registry_id, name)
+ current_mutability_configuration = repo.get('imageTagMutability')
+
+ if current_mutability_configuration != new_mutability_configuration:
+ if not self.check_mode:
+ self.ecr.put_image_tag_mutability(
+ repositoryName=name,
+ imageTagMutability=new_mutability_configuration,
+ **build_kwargs(registry_id))
+ else:
+ self.skipped = True
+ self.changed = True
+
+ repo['imageTagMutability'] = new_mutability_configuration
+ return repo
+
+ def get_lifecycle_policy(self, registry_id, name):
+ try:
+ res = self.ecr.get_lifecycle_policy(
+ repositoryName=name, **build_kwargs(registry_id))
+ text = res.get('lifecyclePolicyText')
+ return text and json.loads(text)
+ except ClientError as err:
+ code = err.response['Error'].get('Code', 'Unknown')
+ if code == 'LifecyclePolicyNotFoundException':
+ return None
+ raise
+
+ def put_lifecycle_policy(self, registry_id, name, policy_text):
+ if not self.check_mode:
+ policy = self.ecr.put_lifecycle_policy(
+ repositoryName=name,
+ lifecyclePolicyText=policy_text,
+ **build_kwargs(registry_id))
+ self.changed = True
+ return policy
+ else:
+ self.skipped = True
+ if self.get_repository(registry_id, name) is None:
+ printable = name
+ if registry_id:
+ printable = '{0}:{1}'.format(registry_id, name)
+ raise Exception(
+ 'could not find repository {0}'.format(printable))
+ return
+
+ def purge_lifecycle_policy(self, registry_id, name):
+ if not self.check_mode:
+ policy = self.ecr.delete_lifecycle_policy(
+ repositoryName=name, **build_kwargs(registry_id))
+ self.changed = True
+ return policy
+ else:
+ policy = self.get_lifecycle_policy(registry_id, name)
+ if policy:
+ self.skipped = True
+ return policy
+ return None
+
+ def put_image_scanning_configuration(self, registry_id, name, scan_on_push):
+ if not self.check_mode:
+ if registry_id:
+ scan = self.ecr.put_image_scanning_configuration(
+ registryId=registry_id,
+ repositoryName=name,
+ imageScanningConfiguration={'scanOnPush': scan_on_push}
+ )
+ else:
+ scan = self.ecr.put_image_scanning_configuration(
+ repositoryName=name,
+ imageScanningConfiguration={'scanOnPush': scan_on_push}
+ )
+ self.changed = True
+ return scan
+ else:
+ self.skipped = True
+ return None
+
+
+def sort_lists_of_strings(policy):
+ for statement_index in range(0, len(policy.get('Statement', []))):
+ for key in policy['Statement'][statement_index]:
+ value = policy['Statement'][statement_index][key]
+ if isinstance(value, list) and all(isinstance(item, string_types) for item in value):
+ policy['Statement'][statement_index][key] = sorted(value)
+ return policy
+
+
+def run(ecr, params):
+ # type: (EcsEcr, dict, int) -> Tuple[bool, dict]
+ result = {}
+ try:
+ name = params['name']
+ state = params['state']
+ policy_text = params['policy']
+ purge_policy = params['purge_policy']
+ registry_id = params['registry_id']
+ force_set_policy = params['force_set_policy']
+ image_tag_mutability = params['image_tag_mutability'].upper()
+ lifecycle_policy_text = params['lifecycle_policy']
+ purge_lifecycle_policy = params['purge_lifecycle_policy']
+ scan_on_push = params['scan_on_push']
+
+ # Parse policies, if they are given
+ try:
+ policy = policy_text and json.loads(policy_text)
+ except ValueError:
+ result['policy'] = policy_text
+ result['msg'] = 'Could not parse policy'
+ return False, result
+
+ try:
+ lifecycle_policy = \
+ lifecycle_policy_text and json.loads(lifecycle_policy_text)
+ except ValueError:
+ result['lifecycle_policy'] = lifecycle_policy_text
+ result['msg'] = 'Could not parse lifecycle_policy'
+ return False, result
+
+ result['state'] = state
+ result['created'] = False
+
+ repo = ecr.get_repository(registry_id, name)
+
+ if state == 'present':
+ result['created'] = False
+
+ if not repo:
+ repo = ecr.create_repository(registry_id, name, image_tag_mutability)
+ result['changed'] = True
+ result['created'] = True
+ else:
+ repo = ecr.put_image_tag_mutability(registry_id, name, image_tag_mutability)
+ result['repository'] = repo
+
+ if purge_lifecycle_policy:
+ original_lifecycle_policy = \
+ ecr.get_lifecycle_policy(registry_id, name)
+
+ result['lifecycle_policy'] = None
+
+ if original_lifecycle_policy:
+ ecr.purge_lifecycle_policy(registry_id, name)
+ result['changed'] = True
+
+ elif lifecycle_policy_text is not None:
+ try:
+ lifecycle_policy = sort_json_policy_dict(lifecycle_policy)
+ result['lifecycle_policy'] = lifecycle_policy
+
+ original_lifecycle_policy = ecr.get_lifecycle_policy(
+ registry_id, name)
+
+ if original_lifecycle_policy:
+ original_lifecycle_policy = sort_json_policy_dict(
+ original_lifecycle_policy)
+
+ if original_lifecycle_policy != lifecycle_policy:
+ ecr.put_lifecycle_policy(registry_id, name,
+ lifecycle_policy_text)
+ result['changed'] = True
+ except Exception:
+ # Some failure w/ the policy. It's helpful to know what the
+ # policy is.
+ result['lifecycle_policy'] = lifecycle_policy_text
+ raise
+
+ if purge_policy:
+ original_policy = ecr.get_repository_policy(registry_id, name)
+
+ result['policy'] = None
+
+ if original_policy:
+ ecr.delete_repository_policy(registry_id, name)
+ result['changed'] = True
+
+ elif policy_text is not None:
+ try:
+ # Sort any lists containing only string types
+ policy = sort_lists_of_strings(policy)
+
+ result['policy'] = policy
+
+ original_policy = ecr.get_repository_policy(
+ registry_id, name)
+ if original_policy:
+ original_policy = sort_lists_of_strings(original_policy)
+
+ if compare_policies(original_policy, policy):
+ ecr.set_repository_policy(
+ registry_id, name, policy_text, force_set_policy)
+ result['changed'] = True
+ except Exception:
+ # Some failure w/ the policy. It's helpful to know what the
+ # policy is.
+ result['policy'] = policy_text
+ raise
+
+ original_scan_on_push = ecr.get_repository(registry_id, name)
+ if original_scan_on_push is not None:
+ if scan_on_push != original_scan_on_push['imageScanningConfiguration']['scanOnPush']:
+ result['changed'] = True
+ result['repository']['imageScanningConfiguration']['scanOnPush'] = scan_on_push
+ response = ecr.put_image_scanning_configuration(registry_id, name, scan_on_push)
+
+ elif state == 'absent':
+ result['name'] = name
+ if repo:
+ ecr.delete_repository(registry_id, name)
+ result['changed'] = True
+
+ except Exception as err:
+ msg = str(err)
+ if isinstance(err, ClientError):
+ msg = boto_exception(err)
+ result['msg'] = msg
+ result['exception'] = traceback.format_exc()
+ return False, result
+
+ if ecr.skipped:
+ result['skipped'] = True
+
+ if ecr.changed:
+ result['changed'] = True
+
+ return True, result
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ registry_id=dict(required=False),
+ state=dict(required=False, choices=['present', 'absent'],
+ default='present'),
+ force_set_policy=dict(required=False, type='bool', default=False),
+ policy=dict(required=False, type='json'),
+ image_tag_mutability=dict(required=False, choices=['mutable', 'immutable'],
+ default='mutable'),
+ purge_policy=dict(required=False, type='bool', aliases=['delete_policy'],
+ deprecated_aliases=[dict(name='delete_policy', date='2022-06-01', collection_name='community.aws')]),
+ lifecycle_policy=dict(required=False, type='json'),
+ purge_lifecycle_policy=dict(required=False, type='bool'),
+ scan_on_push=(dict(required=False, type='bool', default=False))
+ )
+ mutually_exclusive = [
+ ['policy', 'purge_policy'],
+ ['lifecycle_policy', 'purge_lifecycle_policy']]
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=mutually_exclusive)
+
+ ecr = EcsEcr(module)
+ passed, result = run(ecr, module.params)
+
+ if passed:
+ module.exit_json(**result)
+ else:
+ module.fail_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_service.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_service.py
new file mode 100644
index 00000000..7bc3d467
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_service.py
@@ -0,0 +1,862 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ecs_service
+version_added: 1.0.0
+short_description: Create, terminate, start or stop a service in ECS
+description:
+ - Creates or terminates ECS. services.
+notes:
+ - The service role specified must be assumable. (i.e. have a trust relationship for the ecs service, ecs.amazonaws.com)
+ - For details of the parameters and returns see U(https://boto3.readthedocs.io/en/latest/reference/services/ecs.html).
+ - An IAM role must have been previously created.
+author:
+ - "Mark Chance (@Java1Guy)"
+ - "Darek Kaczynski (@kaczynskid)"
+ - "Stephane Maarek (@simplesteph)"
+ - "Zac Blazic (@zacblazic)"
+
+requirements: [ json, botocore, boto3 ]
+options:
+ state:
+ description:
+ - The desired state of the service.
+ required: true
+ choices: ["present", "absent", "deleting"]
+ type: str
+ name:
+ description:
+ - The name of the service.
+ required: true
+ type: str
+ cluster:
+ description:
+ - The name of the cluster in which the service exists.
+ required: false
+ type: str
+ task_definition:
+ description:
+ - The task definition the service will run.
+ - This parameter is required when I(state=present).
+ required: false
+ type: str
+ load_balancers:
+ description:
+ - The list of ELBs defined for this service.
+ required: false
+ type: list
+ elements: dict
+ desired_count:
+ description:
+ - The count of how many instances of the service.
+ - This parameter is required when I(state=present).
+ required: false
+ type: int
+ client_token:
+ description:
+ - Unique, case-sensitive identifier you provide to ensure the idempotency of the request. Up to 32 ASCII characters are allowed.
+ required: false
+ type: str
+ role:
+ description:
+ - The name or full Amazon Resource Name (ARN) of the IAM role that allows your Amazon ECS container agent to make calls to your load balancer
+ on your behalf.
+ - This parameter is only required if you are using a load balancer with your service in a network mode other than C(awsvpc).
+ required: false
+ type: str
+ delay:
+ description:
+ - The time to wait before checking that the service is available.
+ required: false
+ default: 10
+ type: int
+ repeat:
+ description:
+ - The number of times to check that the service is available.
+ required: false
+ default: 10
+ type: int
+ force_new_deployment:
+ description:
+ - Force deployment of service even if there are no changes.
+ required: false
+ type: bool
+ default: false
+ deployment_configuration:
+ description:
+ - Optional parameters that control the deployment_configuration.
+ - Format is '{"maximum_percent":<integer>, "minimum_healthy_percent":<integer>}
+ required: false
+ type: dict
+ suboptions:
+ maximum_percent:
+ type: int
+ description: Upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment.
+ minimum_healthy_percent:
+ type: int
+ description: A lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment.
+ placement_constraints:
+ description:
+ - The placement constraints for the tasks in the service.
+ - See U(https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PlacementConstraint.html) for more details.
+ required: false
+ type: list
+ elements: dict
+ suboptions:
+ type:
+ description: The type of constraint.
+ type: str
+ expression:
+ description: A cluster query language expression to apply to the constraint.
+ type: str
+ placement_strategy:
+ description:
+ - The placement strategy objects to use for tasks in your service. You can specify a maximum of 5 strategy rules per service.
+ required: false
+ type: list
+ elements: dict
+ suboptions:
+ type:
+ description: The type of placement strategy.
+ type: str
+ field:
+ description: The field to apply the placement strategy against.
+ type: str
+ network_configuration:
+ description:
+ - Network configuration of the service. Only applicable for task definitions created with I(network_mode=awsvpc).
+ - I(assign_public_ip) requires botocore >= 1.8.4
+ type: dict
+ suboptions:
+ subnets:
+ description:
+ - A list of subnet IDs to associate with the task.
+ type: list
+ elements: str
+ security_groups:
+ description:
+ - A list of security group names or group IDs to associate with the task.
+ type: list
+ elements: str
+ assign_public_ip:
+ description:
+ - Whether the task's elastic network interface receives a public IP address.
+ - This option requires botocore >= 1.8.4.
+ type: bool
+ launch_type:
+ description:
+ - The launch type on which to run your service.
+ required: false
+ choices: ["EC2", "FARGATE"]
+ type: str
+ health_check_grace_period_seconds:
+ description:
+ - Seconds to wait before health checking the freshly added/updated services.
+ - This option requires botocore >= 1.8.20.
+ required: false
+ type: int
+ service_registries:
+ description:
+ - Describes service discovery registries this service will register with.
+ type: list
+ elements: dict
+ required: false
+ suboptions:
+ container_name:
+ description:
+ - Container name for service discovery registration.
+ type: str
+ container_port:
+ description:
+ - Container port for service discovery registration.
+ type: int
+ arn:
+ description:
+ - Service discovery registry ARN.
+ type: str
+ scheduling_strategy:
+ description:
+ - The scheduling strategy.
+ - Defaults to C(REPLICA) if not given to preserve previous behavior.
+ required: false
+ choices: ["DAEMON", "REPLICA"]
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Basic provisioning example
+- community.aws.ecs_service:
+ state: present
+ name: console-test-service
+ cluster: new_cluster
+ task_definition: 'new_cluster-task:1'
+ desired_count: 0
+
+- name: create ECS service on VPC network
+ community.aws.ecs_service:
+ state: present
+ name: console-test-service
+ cluster: new_cluster
+ task_definition: 'new_cluster-task:1'
+ desired_count: 0
+ network_configuration:
+ subnets:
+ - subnet-abcd1234
+ security_groups:
+ - sg-aaaa1111
+ - my_security_group
+
+# Simple example to delete
+- community.aws.ecs_service:
+ name: default
+ state: absent
+ cluster: new_cluster
+
+# With custom deployment configuration (added in version 2.3), placement constraints and strategy (added in version 2.4)
+- community.aws.ecs_service:
+ state: present
+ name: test-service
+ cluster: test-cluster
+ task_definition: test-task-definition
+ desired_count: 3
+ deployment_configuration:
+ minimum_healthy_percent: 75
+ maximum_percent: 150
+ placement_constraints:
+ - type: memberOf
+ expression: 'attribute:flavor==test'
+ placement_strategy:
+ - type: binpack
+ field: memory
+'''
+
+RETURN = r'''
+service:
+ description: Details of created service.
+ returned: when creating a service
+ type: complex
+ contains:
+ clusterArn:
+ description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
+ returned: always
+ type: str
+ desiredCount:
+ description: The desired number of instantiations of the task definition to keep running on the service.
+ returned: always
+ type: int
+ loadBalancers:
+ description: A list of load balancer objects
+ returned: always
+ type: complex
+ contains:
+ loadBalancerName:
+ description: the name
+ returned: always
+ type: str
+ containerName:
+ description: The name of the container to associate with the load balancer.
+ returned: always
+ type: str
+ containerPort:
+ description: The port on the container to associate with the load balancer.
+ returned: always
+ type: int
+ pendingCount:
+ description: The number of tasks in the cluster that are in the PENDING state.
+ returned: always
+ type: int
+ runningCount:
+ description: The number of tasks in the cluster that are in the RUNNING state.
+ returned: always
+ type: int
+ serviceArn:
+ description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region
+ of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example,
+ arn:aws:ecs:region :012345678910 :service/my-service .
+ returned: always
+ type: str
+ serviceName:
+ description: A user-generated string used to identify the service
+ returned: always
+ type: str
+ status:
+ description: The valid values are ACTIVE, DRAINING, or INACTIVE.
+ returned: always
+ type: str
+ taskDefinition:
+ description: The ARN of a task definition to use for tasks in the service.
+ returned: always
+ type: str
+ deployments:
+ description: list of service deployments
+ returned: always
+ type: list
+ elements: dict
+ deploymentConfiguration:
+ description: dictionary of deploymentConfiguration
+ returned: always
+ type: complex
+ contains:
+ maximumPercent:
+ description: maximumPercent param
+ returned: always
+ type: int
+ minimumHealthyPercent:
+ description: minimumHealthyPercent param
+ returned: always
+ type: int
+ events:
+ description: list of service events
+ returned: always
+ type: list
+ elements: dict
+ placementConstraints:
+ description: List of placement constraints objects
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ type:
+ description: The type of constraint. Valid values are distinctInstance and memberOf.
+ returned: always
+ type: str
+ expression:
+ description: A cluster query language expression to apply to the constraint. Note you cannot specify an expression if the constraint type is
+ distinctInstance.
+ returned: always
+ type: str
+ placementStrategy:
+ description: List of placement strategy objects
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ type:
+ description: The type of placement strategy. Valid values are random, spread and binpack.
+ returned: always
+ type: str
+ field:
+ description: The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId
+ (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance,
+ such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are CPU and MEMORY.
+ returned: always
+ type: str
+
+ansible_facts:
+ description: Facts about deleted service.
+ returned: when deleting a service
+ type: complex
+ contains:
+ service:
+ description: Details of deleted service.
+ returned: when service existed and was deleted
+ type: complex
+ contains:
+ clusterArn:
+ description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
+ returned: always
+ type: str
+ desiredCount:
+ description: The desired number of instantiations of the task definition to keep running on the service.
+ returned: always
+ type: int
+ loadBalancers:
+ description: A list of load balancer objects
+ returned: always
+ type: complex
+ contains:
+ loadBalancerName:
+ description: the name
+ returned: always
+ type: str
+ containerName:
+ description: The name of the container to associate with the load balancer.
+ returned: always
+ type: str
+ containerPort:
+ description: The port on the container to associate with the load balancer.
+ returned: always
+ type: int
+ pendingCount:
+ description: The number of tasks in the cluster that are in the PENDING state.
+ returned: always
+ type: int
+ runningCount:
+ description: The number of tasks in the cluster that are in the RUNNING state.
+ returned: always
+ type: int
+ serviceArn:
+ description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region
+ of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example,
+ arn:aws:ecs:region :012345678910 :service/my-service .
+ returned: always
+ type: str
+ serviceName:
+ description: A user-generated string used to identify the service
+ returned: always
+ type: str
+ status:
+ description: The valid values are ACTIVE, DRAINING, or INACTIVE.
+ returned: always
+ type: str
+ taskDefinition:
+ description: The ARN of a task definition to use for tasks in the service.
+ returned: always
+ type: str
+ deployments:
+ description: list of service deployments
+ returned: always
+ type: list
+ elements: dict
+ deploymentConfiguration:
+ description: dictionary of deploymentConfiguration
+ returned: always
+ type: complex
+ contains:
+ maximumPercent:
+ description: maximumPercent param
+ returned: always
+ type: int
+ minimumHealthyPercent:
+ description: minimumHealthyPercent param
+ returned: always
+ type: int
+ events:
+ description: list of service events
+ returned: always
+ type: list
+ elements: dict
+ placementConstraints:
+ description: List of placement constraints objects
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ type:
+ description: The type of constraint. Valid values are distinctInstance and memberOf.
+ returned: always
+ type: str
+ expression:
+ description: A cluster query language expression to apply to the constraint. Note you cannot specify an expression if
+ the constraint type is distinctInstance.
+ returned: always
+ type: str
+ placementStrategy:
+ description: List of placement strategy objects
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ type:
+ description: The type of placement strategy. Valid values are random, spread and binpack.
+ returned: always
+ type: str
+ field:
+ description: The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId
+ (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance,
+ such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are CPU and MEMORY.
+ returned: always
+ type: str
+'''
+import time
+
+DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
+ 'maximum_percent': 'int',
+ 'minimum_healthy_percent': 'int'
+}
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, map_complex_type, get_ec2_security_group_ids_from_names
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+class EcsServiceManager:
+ """Handles ECS Services"""
+
+ def __init__(self, module):
+ self.module = module
+ self.ecs = module.client('ecs')
+ self.ec2 = module.client('ec2')
+
+ def format_network_configuration(self, network_config):
+ result = dict()
+ if network_config['subnets'] is not None:
+ result['subnets'] = network_config['subnets']
+ else:
+ self.module.fail_json(msg="Network configuration must include subnets")
+ if network_config['security_groups'] is not None:
+ groups = network_config['security_groups']
+ if any(not sg.startswith('sg-') for sg in groups):
+ try:
+ vpc_id = self.ec2.describe_subnets(SubnetIds=[result['subnets'][0]])['Subnets'][0]['VpcId']
+ groups = get_ec2_security_group_ids_from_names(groups, self.ec2, vpc_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't look up security groups")
+ result['securityGroups'] = groups
+ if network_config['assign_public_ip'] is not None:
+ if self.module.botocore_at_least('1.8.4'):
+ if network_config['assign_public_ip'] is True:
+ result['assignPublicIp'] = "ENABLED"
+ else:
+ result['assignPublicIp'] = "DISABLED"
+ else:
+ self.module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use assign_public_ip in network_configuration')
+ return dict(awsvpcConfiguration=result)
+
+ def find_in_array(self, array_of_services, service_name, field_name='serviceArn'):
+ for c in array_of_services:
+ if c[field_name].endswith(service_name):
+ return c
+ return None
+
+ def describe_service(self, cluster_name, service_name):
+ response = self.ecs.describe_services(
+ cluster=cluster_name,
+ services=[service_name])
+ msg = ''
+ if len(response['failures']) > 0:
+ c = self.find_in_array(response['failures'], service_name, 'arn')
+ msg += ", failure reason is " + c['reason']
+ if c and c['reason'] == 'MISSING':
+ return None
+ # fall thru and look through found ones
+ if len(response['services']) > 0:
+ c = self.find_in_array(response['services'], service_name)
+ if c:
+ return c
+ raise Exception("Unknown problem describing service %s." % service_name)
+
+ def is_matching_service(self, expected, existing):
+ if expected['task_definition'] != existing['taskDefinition']:
+ return False
+
+ if (expected['load_balancers'] or []) != existing['loadBalancers']:
+ return False
+
+ # expected is params. DAEMON scheduling strategy returns desired count equal to
+ # number of instances running; don't check desired count if scheduling strat is daemon
+ if (expected['scheduling_strategy'] != 'DAEMON'):
+ if (expected['desired_count'] or 0) != existing['desiredCount']:
+ return False
+
+ return True
+
+ def create_service(self, service_name, cluster_name, task_definition, load_balancers,
+ desired_count, client_token, role, deployment_configuration,
+ placement_constraints, placement_strategy, health_check_grace_period_seconds,
+ network_configuration, service_registries, launch_type, scheduling_strategy):
+
+ params = dict(
+ cluster=cluster_name,
+ serviceName=service_name,
+ taskDefinition=task_definition,
+ loadBalancers=load_balancers,
+ clientToken=client_token,
+ role=role,
+ deploymentConfiguration=deployment_configuration,
+ placementConstraints=placement_constraints,
+ placementStrategy=placement_strategy
+ )
+ if network_configuration:
+ params['networkConfiguration'] = network_configuration
+ if launch_type:
+ params['launchType'] = launch_type
+ if self.health_check_setable(params) and health_check_grace_period_seconds is not None:
+ params['healthCheckGracePeriodSeconds'] = health_check_grace_period_seconds
+ if service_registries:
+ params['serviceRegistries'] = service_registries
+ # desired count is not required if scheduling strategy is daemon
+ if desired_count is not None:
+ params['desiredCount'] = desired_count
+
+ if scheduling_strategy:
+ params['schedulingStrategy'] = scheduling_strategy
+ response = self.ecs.create_service(**params)
+ return self.jsonize(response['service'])
+
+ def update_service(self, service_name, cluster_name, task_definition,
+ desired_count, deployment_configuration, network_configuration,
+ health_check_grace_period_seconds, force_new_deployment):
+ params = dict(
+ cluster=cluster_name,
+ service=service_name,
+ taskDefinition=task_definition,
+ deploymentConfiguration=deployment_configuration)
+ if network_configuration:
+ params['networkConfiguration'] = network_configuration
+ if force_new_deployment:
+ params['forceNewDeployment'] = force_new_deployment
+ if health_check_grace_period_seconds is not None:
+ params['healthCheckGracePeriodSeconds'] = health_check_grace_period_seconds
+ # desired count is not required if scheduling strategy is daemon
+ if desired_count is not None:
+ params['desiredCount'] = desired_count
+
+ response = self.ecs.update_service(**params)
+ return self.jsonize(response['service'])
+
+ def jsonize(self, service):
+ # some fields are datetime which is not JSON serializable
+ # make them strings
+ if 'createdAt' in service:
+ service['createdAt'] = str(service['createdAt'])
+ if 'deployments' in service:
+ for d in service['deployments']:
+ if 'createdAt' in d:
+ d['createdAt'] = str(d['createdAt'])
+ if 'updatedAt' in d:
+ d['updatedAt'] = str(d['updatedAt'])
+ if 'events' in service:
+ for e in service['events']:
+ if 'createdAt' in e:
+ e['createdAt'] = str(e['createdAt'])
+ return service
+
+ def delete_service(self, service, cluster=None):
+ return self.ecs.delete_service(cluster=cluster, service=service)
+
+ def ecs_api_handles_network_configuration(self):
+ # There doesn't seem to be a nice way to inspect botocore to look
+ # for attributes (and networkConfiguration is not an explicit argument
+ # to e.g. ecs.run_task, it's just passed as a keyword argument)
+ return self.module.botocore_at_least('1.7.44')
+
+ def health_check_setable(self, params):
+ load_balancers = params.get('loadBalancers', [])
+ # check if botocore (and thus boto3) is new enough for using the healthCheckGracePeriodSeconds parameter
+ return len(load_balancers) > 0 and self.module.botocore_at_least('1.8.20')
+
+
+def main():
+ argument_spec = dict(
+ state=dict(required=True, choices=['present', 'absent', 'deleting']),
+ name=dict(required=True, type='str'),
+ cluster=dict(required=False, type='str'),
+ task_definition=dict(required=False, type='str'),
+ load_balancers=dict(required=False, default=[], type='list', elements='dict'),
+ desired_count=dict(required=False, type='int'),
+ client_token=dict(required=False, default='', type='str'),
+ role=dict(required=False, default='', type='str'),
+ delay=dict(required=False, type='int', default=10),
+ repeat=dict(required=False, type='int', default=10),
+ force_new_deployment=dict(required=False, default=False, type='bool'),
+ deployment_configuration=dict(required=False, default={}, type='dict'),
+ placement_constraints=dict(
+ required=False,
+ default=[],
+ type='list',
+ elements='dict',
+ options=dict(
+ type=dict(type='str'),
+ expression=dict(type='str')
+ )
+ ),
+ placement_strategy=dict(
+ required=False,
+ default=[],
+ type='list',
+ elements='dict',
+ options=dict(
+ type=dict(type='str'),
+ field=dict(type='str'),
+ )
+ ),
+ health_check_grace_period_seconds=dict(required=False, type='int'),
+ network_configuration=dict(required=False, type='dict', options=dict(
+ subnets=dict(type='list', elements='str'),
+ security_groups=dict(type='list', elements='str'),
+ assign_public_ip=dict(type='bool')
+ )),
+ launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
+ service_registries=dict(required=False, type='list', default=[], elements='dict'),
+ scheduling_strategy=dict(required=False, choices=['DAEMON', 'REPLICA'])
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[('state', 'present', ['task_definition']),
+ ('launch_type', 'FARGATE', ['network_configuration'])],
+ required_together=[['load_balancers', 'role']])
+
+ if module.params['state'] == 'present' and module.params['scheduling_strategy'] == 'REPLICA':
+ if module.params['desired_count'] is None:
+ module.fail_json(msg='state is present, scheduling_strategy is REPLICA; missing desired_count')
+
+ service_mgr = EcsServiceManager(module)
+ if module.params['network_configuration']:
+ if not service_mgr.ecs_api_handles_network_configuration():
+ module.fail_json(msg='botocore needs to be version 1.7.44 or higher to use network configuration')
+ network_configuration = service_mgr.format_network_configuration(module.params['network_configuration'])
+ else:
+ network_configuration = None
+
+ deployment_configuration = map_complex_type(module.params['deployment_configuration'],
+ DEPLOYMENT_CONFIGURATION_TYPE_MAP)
+
+ deploymentConfiguration = snake_dict_to_camel_dict(deployment_configuration)
+ serviceRegistries = list(map(snake_dict_to_camel_dict, module.params['service_registries']))
+
+ try:
+ existing = service_mgr.describe_service(module.params['cluster'], module.params['name'])
+ except Exception as e:
+ module.fail_json(msg="Exception describing service '" + module.params['name'] + "' in cluster '" + module.params['cluster'] + "': " + str(e))
+
+ results = dict(changed=False)
+
+ if module.params['launch_type']:
+ if not module.botocore_at_least('1.8.4'):
+ module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use launch_type')
+ if module.params['force_new_deployment']:
+ if not module.botocore_at_least('1.8.4'):
+ module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use force_new_deployment')
+ if module.params['health_check_grace_period_seconds']:
+ if not module.botocore_at_least('1.8.20'):
+ module.fail_json(msg='botocore needs to be version 1.8.20 or higher to use health_check_grace_period_seconds')
+
+ if module.params['state'] == 'present':
+
+ matching = False
+ update = False
+
+ if existing and 'status' in existing and existing['status'] == "ACTIVE":
+ if module.params['force_new_deployment']:
+ update = True
+ elif service_mgr.is_matching_service(module.params, existing):
+ matching = True
+ results['service'] = existing
+ else:
+ update = True
+
+ if not matching:
+ if not module.check_mode:
+
+ role = module.params['role']
+ clientToken = module.params['client_token']
+
+ loadBalancers = []
+ for loadBalancer in module.params['load_balancers']:
+ if 'containerPort' in loadBalancer:
+ loadBalancer['containerPort'] = int(loadBalancer['containerPort'])
+ loadBalancers.append(loadBalancer)
+
+ for loadBalancer in loadBalancers:
+ if 'containerPort' in loadBalancer:
+ loadBalancer['containerPort'] = int(loadBalancer['containerPort'])
+
+ if update:
+ # check various parameters and boto versions and give a helpful error in boto is not new enough for feature
+
+ if module.params['scheduling_strategy']:
+ if not module.botocore_at_least('1.10.37'):
+ module.fail_json(msg='botocore needs to be version 1.10.37 or higher to use scheduling_strategy')
+ elif (existing['schedulingStrategy']) != module.params['scheduling_strategy']:
+ module.fail_json(msg="It is not possible to update the scheduling strategy of an existing service")
+
+ if module.params['service_registries']:
+ if not module.botocore_at_least('1.9.15'):
+ module.fail_json(msg='botocore needs to be version 1.9.15 or higher to use service_registries')
+ elif (existing['serviceRegistries'] or []) != serviceRegistries:
+ module.fail_json(msg="It is not possible to update the service registries of an existing service")
+
+ if (existing['loadBalancers'] or []) != loadBalancers:
+ module.fail_json(msg="It is not possible to update the load balancers of an existing service")
+
+ # update required
+ response = service_mgr.update_service(module.params['name'],
+ module.params['cluster'],
+ module.params['task_definition'],
+ module.params['desired_count'],
+ deploymentConfiguration,
+ network_configuration,
+ module.params['health_check_grace_period_seconds'],
+ module.params['force_new_deployment'])
+
+ else:
+ try:
+ response = service_mgr.create_service(module.params['name'],
+ module.params['cluster'],
+ module.params['task_definition'],
+ loadBalancers,
+ module.params['desired_count'],
+ clientToken,
+ role,
+ deploymentConfiguration,
+ module.params['placement_constraints'],
+ module.params['placement_strategy'],
+ module.params['health_check_grace_period_seconds'],
+ network_configuration,
+ serviceRegistries,
+ module.params['launch_type'],
+ module.params['scheduling_strategy']
+ )
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e, msg="Couldn't create service")
+
+ results['service'] = response
+
+ results['changed'] = True
+
+ elif module.params['state'] == 'absent':
+ if not existing:
+ pass
+ else:
+ # it exists, so we should delete it and mark changed.
+ # return info about the cluster deleted
+ del existing['deployments']
+ del existing['events']
+ results['ansible_facts'] = existing
+ if 'status' in existing and existing['status'] == "INACTIVE":
+ results['changed'] = False
+ else:
+ if not module.check_mode:
+ try:
+ service_mgr.delete_service(
+ module.params['name'],
+ module.params['cluster']
+ )
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e, msg="Couldn't delete service")
+ results['changed'] = True
+
+ elif module.params['state'] == 'deleting':
+ if not existing:
+ module.fail_json(msg="Service '" + module.params['name'] + " not found.")
+ return
+ # it exists, so we should delete it and mark changed.
+ # return info about the cluster deleted
+ delay = module.params['delay']
+ repeat = module.params['repeat']
+ time.sleep(delay)
+ for i in range(repeat):
+ existing = service_mgr.describe_service(module.params['cluster'], module.params['name'])
+ status = existing['status']
+ if status == "INACTIVE":
+ results['changed'] = True
+ break
+ time.sleep(delay)
+ if i is repeat - 1:
+ module.fail_json(msg="Service still not deleted after " + str(repeat) + " tries of " + str(delay) + " seconds each.")
+ return
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_service_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_service_facts.py
new file mode 100644
index 00000000..d428dde8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_service_facts.py
@@ -0,0 +1,254 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ecs_service_info
+version_added: 1.0.0
+short_description: List or describe services in ECS
+description:
+ - Lists or describes services in ECS.
+ - This module was called C(ecs_service_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.aws.ecs_service_info) module no longer returns C(ansible_facts)!
+author:
+ - "Mark Chance (@Java1Guy)"
+ - "Darek Kaczynski (@kaczynskid)"
+requirements: [ json, botocore, boto3 ]
+options:
+ details:
+ description:
+ - Set this to true if you want detailed information about the services.
+ required: false
+ default: false
+ type: bool
+ events:
+ description:
+ - Whether to return ECS service events. Only has an effect if I(details=true).
+ required: false
+ default: true
+ type: bool
+ cluster:
+ description:
+ - The cluster ARNS in which to list the services.
+ required: false
+ type: str
+ service:
+ description:
+ - One or more services to get details for
+ required: false
+ type: list
+ elements: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Basic listing example
+- community.aws.ecs_service_info:
+ cluster: test-cluster
+ service: console-test-service
+ details: true
+ register: output
+
+# Basic listing example
+- community.aws.ecs_service_info:
+ cluster: test-cluster
+ register: output
+'''
+
+RETURN = r'''
+services:
+ description: When details is false, returns an array of service ARNs, otherwise an array of complex objects as described below.
+ returned: success
+ type: complex
+ contains:
+ clusterArn:
+ description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
+ returned: always
+ type: str
+ desiredCount:
+ description: The desired number of instantiations of the task definition to keep running on the service.
+ returned: always
+ type: int
+ loadBalancers:
+ description: A list of load balancer objects
+ returned: always
+ type: complex
+ contains:
+ loadBalancerName:
+ description: the name
+ returned: always
+ type: str
+ containerName:
+ description: The name of the container to associate with the load balancer.
+ returned: always
+ type: str
+ containerPort:
+ description: The port on the container to associate with the load balancer.
+ returned: always
+ type: int
+ pendingCount:
+ description: The number of tasks in the cluster that are in the PENDING state.
+ returned: always
+ type: int
+ runningCount:
+ description: The number of tasks in the cluster that are in the RUNNING state.
+ returned: always
+ type: int
+ serviceArn:
+ description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region :012345678910 :service/my-service .
+ returned: always
+ type: str
+ serviceName:
+ description: A user-generated string used to identify the service
+ returned: always
+ type: str
+ status:
+ description: The valid values are ACTIVE, DRAINING, or INACTIVE.
+ returned: always
+ type: str
+ taskDefinition:
+ description: The ARN of a task definition to use for tasks in the service.
+ returned: always
+ type: str
+ deployments:
+ description: list of service deployments
+ returned: always
+ type: list
+ elements: dict
+ events:
+ description: list of service events
+ returned: when events is true
+ type: list
+ elements: dict
+''' # NOQA
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+class EcsServiceManager:
+ """Handles ECS Services"""
+
+ def __init__(self, module):
+ self.module = module
+ self.ecs = module.client('ecs')
+
+ @AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+ def list_services_with_backoff(self, **kwargs):
+ paginator = self.ecs.get_paginator('list_services')
+ try:
+ return paginator.paginate(**kwargs).build_full_result()
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'ClusterNotFoundException':
+ self.module.fail_json_aws(e, "Could not find cluster to list services")
+ else:
+ raise
+
+ @AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+ def describe_services_with_backoff(self, **kwargs):
+ return self.ecs.describe_services(**kwargs)
+
+ def list_services(self, cluster):
+ fn_args = dict()
+ if cluster and cluster is not None:
+ fn_args['cluster'] = cluster
+ try:
+ response = self.list_services_with_backoff(**fn_args)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't list ECS services")
+ relevant_response = dict(services=response['serviceArns'])
+ return relevant_response
+
+ def describe_services(self, cluster, services):
+ fn_args = dict()
+ if cluster and cluster is not None:
+ fn_args['cluster'] = cluster
+ fn_args['services'] = services
+ try:
+ response = self.describe_services_with_backoff(**fn_args)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't describe ECS services")
+ running_services = [self.extract_service_from(service) for service in response.get('services', [])]
+ services_not_running = response.get('failures', [])
+ return running_services, services_not_running
+
+ def extract_service_from(self, service):
+ # some fields are datetime which is not JSON serializable
+ # make them strings
+ if 'deployments' in service:
+ for d in service['deployments']:
+ if 'createdAt' in d:
+ d['createdAt'] = str(d['createdAt'])
+ if 'updatedAt' in d:
+ d['updatedAt'] = str(d['updatedAt'])
+ if 'events' in service:
+ if not self.module.params['events']:
+ del service['events']
+ else:
+ for e in service['events']:
+ if 'createdAt' in e:
+ e['createdAt'] = str(e['createdAt'])
+ return service
+
+
+def chunks(l, n):
+ """Yield successive n-sized chunks from l."""
+ """ https://stackoverflow.com/a/312464 """
+ for i in range(0, len(l), n):
+ yield l[i:i + n]
+
+
+def main():
+
+ argument_spec = dict(
+ details=dict(type='bool', default=False),
+ events=dict(type='bool', default=True),
+ cluster=dict(),
+ service=dict(type='list', elements='str')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ is_old_facts = module._name == 'ecs_service_facts'
+ if is_old_facts:
+ module.deprecate("The 'ecs_service_facts' module has been renamed to 'ecs_service_info', "
+ "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws')
+
+ show_details = module.params.get('details')
+
+ task_mgr = EcsServiceManager(module)
+ if show_details:
+ if module.params['service']:
+ services = module.params['service']
+ else:
+ services = task_mgr.list_services(module.params['cluster'])['services']
+ ecs_info = dict(services=[], services_not_running=[])
+ for chunk in chunks(services, 10):
+ running_services, services_not_running = task_mgr.describe_services(module.params['cluster'], chunk)
+ ecs_info['services'].extend(running_services)
+ ecs_info['services_not_running'].extend(services_not_running)
+ else:
+ ecs_info = task_mgr.list_services(module.params['cluster'])
+
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=ecs_info, **ecs_info)
+ else:
+ module.exit_json(changed=False, **ecs_info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_service_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_service_info.py
new file mode 100644
index 00000000..d428dde8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_service_info.py
@@ -0,0 +1,254 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ecs_service_info
+version_added: 1.0.0
+short_description: List or describe services in ECS
+description:
+ - Lists or describes services in ECS.
+ - This module was called C(ecs_service_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.aws.ecs_service_info) module no longer returns C(ansible_facts)!
+author:
+ - "Mark Chance (@Java1Guy)"
+ - "Darek Kaczynski (@kaczynskid)"
+requirements: [ json, botocore, boto3 ]
+options:
+ details:
+ description:
+ - Set this to true if you want detailed information about the services.
+ required: false
+ default: false
+ type: bool
+ events:
+ description:
+ - Whether to return ECS service events. Only has an effect if I(details=true).
+ required: false
+ default: true
+ type: bool
+ cluster:
+ description:
+ - The cluster ARNS in which to list the services.
+ required: false
+ type: str
+ service:
+ description:
+ - One or more services to get details for
+ required: false
+ type: list
+ elements: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Basic listing example
+- community.aws.ecs_service_info:
+ cluster: test-cluster
+ service: console-test-service
+ details: true
+ register: output
+
+# Basic listing example
+- community.aws.ecs_service_info:
+ cluster: test-cluster
+ register: output
+'''
+
+RETURN = r'''
+services:
+ description: When details is false, returns an array of service ARNs, otherwise an array of complex objects as described below.
+ returned: success
+ type: complex
+ contains:
+ clusterArn:
+ description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
+ returned: always
+ type: str
+ desiredCount:
+ description: The desired number of instantiations of the task definition to keep running on the service.
+ returned: always
+ type: int
+ loadBalancers:
+ description: A list of load balancer objects
+ returned: always
+ type: complex
+ contains:
+ loadBalancerName:
+ description: the name
+ returned: always
+ type: str
+ containerName:
+ description: The name of the container to associate with the load balancer.
+ returned: always
+ type: str
+ containerPort:
+ description: The port on the container to associate with the load balancer.
+ returned: always
+ type: int
+ pendingCount:
+ description: The number of tasks in the cluster that are in the PENDING state.
+ returned: always
+ type: int
+ runningCount:
+ description: The number of tasks in the cluster that are in the RUNNING state.
+ returned: always
+ type: int
+ serviceArn:
+ description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region :012345678910 :service/my-service .
+ returned: always
+ type: str
+ serviceName:
+ description: A user-generated string used to identify the service
+ returned: always
+ type: str
+ status:
+ description: The valid values are ACTIVE, DRAINING, or INACTIVE.
+ returned: always
+ type: str
+ taskDefinition:
+ description: The ARN of a task definition to use for tasks in the service.
+ returned: always
+ type: str
+ deployments:
+ description: list of service deployments
+ returned: always
+ type: list
+ elements: dict
+ events:
+ description: list of service events
+ returned: when events is true
+ type: list
+ elements: dict
+''' # NOQA
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+class EcsServiceManager:
+ """Handles ECS Services"""
+
+ def __init__(self, module):
+ self.module = module
+ self.ecs = module.client('ecs')
+
+ @AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+ def list_services_with_backoff(self, **kwargs):
+ paginator = self.ecs.get_paginator('list_services')
+ try:
+ return paginator.paginate(**kwargs).build_full_result()
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'ClusterNotFoundException':
+ self.module.fail_json_aws(e, "Could not find cluster to list services")
+ else:
+ raise
+
+ @AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+ def describe_services_with_backoff(self, **kwargs):
+ return self.ecs.describe_services(**kwargs)
+
+ def list_services(self, cluster):
+ fn_args = dict()
+ if cluster and cluster is not None:
+ fn_args['cluster'] = cluster
+ try:
+ response = self.list_services_with_backoff(**fn_args)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't list ECS services")
+ relevant_response = dict(services=response['serviceArns'])
+ return relevant_response
+
+ def describe_services(self, cluster, services):
+ fn_args = dict()
+ if cluster and cluster is not None:
+ fn_args['cluster'] = cluster
+ fn_args['services'] = services
+ try:
+ response = self.describe_services_with_backoff(**fn_args)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't describe ECS services")
+ running_services = [self.extract_service_from(service) for service in response.get('services', [])]
+ services_not_running = response.get('failures', [])
+ return running_services, services_not_running
+
+ def extract_service_from(self, service):
+ # some fields are datetime which is not JSON serializable
+ # make them strings
+ if 'deployments' in service:
+ for d in service['deployments']:
+ if 'createdAt' in d:
+ d['createdAt'] = str(d['createdAt'])
+ if 'updatedAt' in d:
+ d['updatedAt'] = str(d['updatedAt'])
+ if 'events' in service:
+ if not self.module.params['events']:
+ del service['events']
+ else:
+ for e in service['events']:
+ if 'createdAt' in e:
+ e['createdAt'] = str(e['createdAt'])
+ return service
+
+
+def chunks(l, n):
+ """Yield successive n-sized chunks from l."""
+ """ https://stackoverflow.com/a/312464 """
+ for i in range(0, len(l), n):
+ yield l[i:i + n]
+
+
+def main():
+
+ argument_spec = dict(
+ details=dict(type='bool', default=False),
+ events=dict(type='bool', default=True),
+ cluster=dict(),
+ service=dict(type='list', elements='str')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ is_old_facts = module._name == 'ecs_service_facts'
+ if is_old_facts:
+ module.deprecate("The 'ecs_service_facts' module has been renamed to 'ecs_service_info', "
+ "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws')
+
+ show_details = module.params.get('details')
+
+ task_mgr = EcsServiceManager(module)
+ if show_details:
+ if module.params['service']:
+ services = module.params['service']
+ else:
+ services = task_mgr.list_services(module.params['cluster'])['services']
+ ecs_info = dict(services=[], services_not_running=[])
+ for chunk in chunks(services, 10):
+ running_services, services_not_running = task_mgr.describe_services(module.params['cluster'], chunk)
+ ecs_info['services'].extend(running_services)
+ ecs_info['services_not_running'].extend(services_not_running)
+ else:
+ ecs_info = task_mgr.list_services(module.params['cluster'])
+
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=ecs_info, **ecs_info)
+ else:
+ module.exit_json(changed=False, **ecs_info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_tag.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_tag.py
new file mode 100644
index 00000000..9e4f9798
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_tag.py
@@ -0,0 +1,222 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2019, Michael Pechner <mikey@mikey.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+
+DOCUMENTATION = r'''
+---
+module: ecs_tag
+version_added: 1.0.0
+short_description: create and remove tags on Amazon ECS resources
+notes:
+ - none
+description:
+ - Creates and removes tags for Amazon ECS resources.
+ - Resources are referenced by their cluster name.
+author:
+ - Michael Pechner (@mpechner)
+requirements: [ boto3, botocore ]
+options:
+ cluster_name:
+ description:
+ - The name of the cluster whose resources we are tagging.
+ required: true
+ type: str
+ resource:
+ description:
+ - The ECS resource name.
+ - Required unless I(resource_type=cluster).
+ type: str
+ resource_type:
+ description:
+ - The type of resource.
+ default: cluster
+ choices: ['cluster', 'task', 'service', 'task_definition', 'container']
+ type: str
+ state:
+ description:
+ - Whether the tags should be present or absent on the resource.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ tags:
+ description:
+ - A dictionary of tags to add or remove from the resource.
+ - If the value provided for a tag is null and I(state=absent), the tag will be removed regardless of its current value.
+ type: dict
+ purge_tags:
+ description:
+ - Whether unspecified tags should be removed from the resource.
+ - Note that when combined with I(state=absent), specified tags with non-matching values are not purged.
+ type: bool
+ default: false
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure tags are present on a resource
+ community.aws.ecs_tag:
+ cluster_name: mycluster
+ resource_type: cluster
+ state: present
+ tags:
+ Name: ubervol
+ env: prod
+
+- name: Remove the Env tag
+ community.aws.ecs_tag:
+ cluster_name: mycluster
+ resource_type: cluster
+ tags:
+ Env:
+ state: absent
+
+- name: Remove the Env tag if it's currently 'development'
+ community.aws.ecs_tag:
+ cluster_name: mycluster
+ resource_type: cluster
+ tags:
+ Env: development
+ state: absent
+
+- name: Remove all tags except for Name from a cluster
+ community.aws.ecs_tag:
+ cluster_name: mycluster
+ resource_type: cluster
+ tags:
+ Name: foo
+ state: absent
+ purge_tags: true
+'''
+
+RETURN = r'''
+tags:
+ description: A dict containing the tags on the resource
+ returned: always
+ type: dict
+added_tags:
+ description: A dict of tags that were added to the resource
+ returned: If tags were added
+ type: dict
+removed_tags:
+ description: A dict of tags that were removed from the resource
+ returned: If tags were removed
+ type: dict
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+__metaclass__ = type
+
+
+def get_tags(ecs, module, resource):
+ try:
+ return boto3_tag_list_to_ansible_dict(ecs.list_tags_for_resource(resourceArn=resource)['tags'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to fetch tags for resource {0}'.format(resource))
+
+
+def get_arn(ecs, module, cluster_name, resource_type, resource):
+
+ try:
+ if resource_type == 'cluster':
+ description = ecs.describe_clusters(clusters=[resource])
+ resource_arn = description['clusters'][0]['clusterArn']
+ elif resource_type == 'task':
+ description = ecs.describe_tasks(cluster=cluster_name, tasks=[resource])
+ resource_arn = description['tasks'][0]['taskArn']
+ elif resource_type == 'service':
+ description = ecs.describe_services(cluster=cluster_name, services=[resource])
+ resource_arn = description['services'][0]['serviceArn']
+ elif resource_type == 'task_definition':
+ description = ecs.describe_task_definition(taskDefinition=resource)
+ resource_arn = description['taskDefinition']['taskDefinitionArn']
+ elif resource_type == 'container':
+ description = ecs.describe_container_instances(clusters=[resource])
+ resource_arn = description['containerInstances'][0]['containerInstanceArn']
+ except (IndexError, KeyError):
+ module.fail_json(msg='Failed to find {0} {1}'.format(resource_type, resource))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to find {0} {1}'.format(resource_type, resource))
+
+ return resource_arn
+
+
+def main():
+ argument_spec = dict(
+ cluster_name=dict(required=True),
+ resource=dict(required=False),
+ tags=dict(type='dict'),
+ purge_tags=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ resource_type=dict(default='cluster', choices=['cluster', 'task', 'service', 'task_definition', 'container'])
+ )
+ required_if = [('state', 'present', ['tags']), ('state', 'absent', ['tags'])]
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True)
+
+ resource_type = module.params['resource_type']
+ cluster_name = module.params['cluster_name']
+ if resource_type == 'cluster':
+ resource = cluster_name
+ else:
+ resource = module.params['resource']
+ tags = module.params['tags']
+ state = module.params['state']
+ purge_tags = module.params['purge_tags']
+
+ result = {'changed': False}
+
+ ecs = module.client('ecs')
+
+ resource_arn = get_arn(ecs, module, cluster_name, resource_type, resource)
+
+ current_tags = get_tags(ecs, module, resource_arn)
+
+ add_tags, remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags)
+
+ remove_tags = {}
+ if state == 'absent':
+ for key in tags:
+ if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]):
+ remove_tags[key] = current_tags[key]
+
+ for key in remove:
+ remove_tags[key] = current_tags[key]
+
+ if remove_tags:
+ result['changed'] = True
+ result['removed_tags'] = remove_tags
+ if not module.check_mode:
+ try:
+ ecs.untag_resource(resourceArn=resource_arn, tagKeys=list(remove_tags.keys()))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to remove tags {0} from resource {1}'.format(remove_tags, resource))
+
+ if state == 'present' and add_tags:
+ result['changed'] = True
+ result['added_tags'] = add_tags
+ current_tags.update(add_tags)
+ if not module.check_mode:
+ try:
+ tags = ansible_dict_to_boto3_tag_list(add_tags, tag_name_key_name='key', tag_value_key_name='value')
+ ecs.tag_resource(resourceArn=resource_arn, tags=tags)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to set tags {0} on resource {1}'.format(add_tags, resource))
+
+ result['tags'] = get_tags(ecs, module, resource_arn)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_task.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_task.py
new file mode 100644
index 00000000..f43cd700
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_task.py
@@ -0,0 +1,443 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ecs_task
+version_added: 1.0.0
+short_description: Run, start or stop a task in ecs
+description:
+ - Creates or deletes instances of task definitions.
+author: Mark Chance (@Java1Guy)
+requirements: [ json, botocore, boto3 ]
+options:
+ operation:
+ description:
+ - Which task operation to execute.
+ required: True
+ choices: ['run', 'start', 'stop']
+ type: str
+ cluster:
+ description:
+ - The name of the cluster to run the task on.
+ required: False
+ type: str
+ task_definition:
+ description:
+ - The task definition to start or run.
+ required: False
+ type: str
+ overrides:
+ description:
+ - A dictionary of values to pass to the new instances.
+ required: False
+ type: dict
+ count:
+ description:
+ - How many new instances to start.
+ required: False
+ type: int
+ task:
+ description:
+ - The task to stop.
+ required: False
+ type: str
+ container_instances:
+ description:
+ - The list of container instances on which to deploy the task.
+ required: False
+ type: list
+ elements: str
+ started_by:
+ description:
+ - A value showing who or what started the task (for informational purposes).
+ required: False
+ type: str
+ network_configuration:
+ description:
+ - Network configuration of the service. Only applicable for task definitions created with I(network_mode=awsvpc).
+ type: dict
+ suboptions:
+ subnets:
+ description: A list of subnet IDs to which the task is attached.
+ type: list
+ elements: str
+ security_groups:
+ description: A list of group names or group IDs for the task.
+ type: list
+ elements: str
+ launch_type:
+ description:
+ - The launch type on which to run your service.
+ required: false
+ choices: ["EC2", "FARGATE"]
+ type: str
+ tags:
+ type: dict
+ description:
+ - Tags that will be added to ecs tasks on start and run
+ required: false
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Simple example of run task
+- name: Run task
+ community.aws.ecs_task:
+ operation: run
+ cluster: console-sample-app-static-cluster
+ task_definition: console-sample-app-static-taskdef
+ count: 1
+ started_by: ansible_user
+ register: task_output
+
+# Simple example of start task
+
+- name: Start a task
+ community.aws.ecs_task:
+ operation: start
+ cluster: console-sample-app-static-cluster
+ task_definition: console-sample-app-static-taskdef
+ task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
+ tags:
+ resourceName: a_task_for_ansible_to_run
+ type: long_running_task
+ network: internal
+ version: 1.4
+ container_instances:
+ - arn:aws:ecs:us-west-2:172139249013:container-instance/79c23f22-876c-438a-bddf-55c98a3538a8
+ started_by: ansible_user
+ network_configuration:
+ subnets:
+ - subnet-abcd1234
+ security_groups:
+ - sg-aaaa1111
+ - my_security_group
+ register: task_output
+
+- name: RUN a task on Fargate
+ community.aws.ecs_task:
+ operation: run
+ cluster: console-sample-app-static-cluster
+ task_definition: console-sample-app-static-taskdef
+ task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
+ started_by: ansible_user
+ launch_type: FARGATE
+ network_configuration:
+ subnets:
+ - subnet-abcd1234
+ security_groups:
+ - sg-aaaa1111
+ - my_security_group
+ register: task_output
+
+- name: Stop a task
+ community.aws.ecs_task:
+ operation: stop
+ cluster: console-sample-app-static-cluster
+ task_definition: console-sample-app-static-taskdef
+ task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
+'''
+RETURN = r'''
+task:
+ description: details about the task that was started
+ returned: success
+ type: complex
+ contains:
+ taskArn:
+ description: The Amazon Resource Name (ARN) that identifies the task.
+ returned: always
+ type: str
+ clusterArn:
+ description: The Amazon Resource Name (ARN) of the of the cluster that hosts the task.
+ returned: only when details is true
+ type: str
+ taskDefinitionArn:
+ description: The Amazon Resource Name (ARN) of the task definition.
+ returned: only when details is true
+ type: str
+ containerInstanceArn:
+ description: The Amazon Resource Name (ARN) of the container running the task.
+ returned: only when details is true
+ type: str
+ overrides:
+ description: The container overrides set for this task.
+ returned: only when details is true
+ type: list
+ elements: dict
+ lastStatus:
+ description: The last recorded status of the task.
+ returned: only when details is true
+ type: str
+ desiredStatus:
+ description: The desired status of the task.
+ returned: only when details is true
+ type: str
+ containers:
+ description: The container details.
+ returned: only when details is true
+ type: list
+ elements: dict
+ startedBy:
+ description: The used who started the task.
+ returned: only when details is true
+ type: str
+ stoppedReason:
+ description: The reason why the task was stopped.
+ returned: only when details is true
+ type: str
+ createdAt:
+ description: The timestamp of when the task was created.
+ returned: only when details is true
+ type: str
+ startedAt:
+ description: The timestamp of when the task was started.
+ returned: only when details is true
+ type: str
+ stoppedAt:
+ description: The timestamp of when the task was stopped.
+ returned: only when details is true
+ type: str
+ launchType:
+ description: The launch type on which to run your task.
+ returned: always
+ type: str
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible.module_utils.basic import missing_required_lib
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names, ansible_dict_to_boto3_tag_list
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+class EcsExecManager:
+ """Handles ECS Tasks"""
+
+ def __init__(self, module):
+ self.module = module
+ self.ecs = module.client('ecs')
+ self.ec2 = module.client('ec2')
+
+ def format_network_configuration(self, network_config):
+ result = dict()
+ if 'subnets' in network_config:
+ result['subnets'] = network_config['subnets']
+ else:
+ self.module.fail_json(msg="Network configuration must include subnets")
+ if 'security_groups' in network_config:
+ groups = network_config['security_groups']
+ if any(not sg.startswith('sg-') for sg in groups):
+ try:
+ vpc_id = self.ec2.describe_subnets(SubnetIds=[result['subnets'][0]])['Subnets'][0]['VpcId']
+ groups = get_ec2_security_group_ids_from_names(groups, self.ec2, vpc_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't look up security groups")
+ result['securityGroups'] = groups
+ return dict(awsvpcConfiguration=result)
+
+ def list_tasks(self, cluster_name, service_name, status):
+ response = self.ecs.list_tasks(
+ cluster=cluster_name,
+ family=service_name,
+ desiredStatus=status
+ )
+ if len(response['taskArns']) > 0:
+ for c in response['taskArns']:
+ if c.endswith(service_name):
+ return c
+ return None
+
+ def run_task(self, cluster, task_definition, overrides, count, startedBy, launch_type, tags):
+ if overrides is None:
+ overrides = dict()
+ params = dict(cluster=cluster, taskDefinition=task_definition,
+ overrides=overrides, count=count, startedBy=startedBy)
+ if self.module.params['network_configuration']:
+ params['networkConfiguration'] = self.format_network_configuration(self.module.params['network_configuration'])
+ if launch_type:
+ params['launchType'] = launch_type
+ if tags:
+ params['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value')
+
+ # TODO: need to check if long arn format enabled.
+ try:
+ response = self.ecs.run_task(**params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't run task")
+ # include tasks and failures
+ return response['tasks']
+
+ def start_task(self, cluster, task_definition, overrides, container_instances, startedBy, tags):
+ args = dict()
+ if cluster:
+ args['cluster'] = cluster
+ if task_definition:
+ args['taskDefinition'] = task_definition
+ if overrides:
+ args['overrides'] = overrides
+ if container_instances:
+ args['containerInstances'] = container_instances
+ if startedBy:
+ args['startedBy'] = startedBy
+ if self.module.params['network_configuration']:
+ args['networkConfiguration'] = self.format_network_configuration(self.module.params['network_configuration'])
+ if tags:
+ args['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value')
+ try:
+ response = self.ecs.start_task(**args)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't start task")
+ # include tasks and failures
+ return response['tasks']
+
+ def stop_task(self, cluster, task):
+ response = self.ecs.stop_task(cluster=cluster, task=task)
+ return response['task']
+
+ def ecs_api_handles_launch_type(self):
+ from distutils.version import LooseVersion
+ # There doesn't seem to be a nice way to inspect botocore to look
+ # for attributes (and networkConfiguration is not an explicit argument
+ # to e.g. ecs.run_task, it's just passed as a keyword argument)
+ return LooseVersion(botocore.__version__) >= LooseVersion('1.8.4')
+
+ def ecs_task_long_format_enabled(self):
+ account_support = self.ecs.list_account_settings(name='taskLongArnFormat', effectiveSettings=True)
+ return account_support['settings'][0]['value'] == 'enabled'
+
+ def ecs_api_handles_tags(self):
+ from distutils.version import LooseVersion
+ # There doesn't seem to be a nice way to inspect botocore to look
+ # for attributes (and networkConfiguration is not an explicit argument
+ # to e.g. ecs.run_task, it's just passed as a keyword argument)
+ return LooseVersion(botocore.__version__) >= LooseVersion('1.12.46')
+
+ def ecs_api_handles_network_configuration(self):
+ from distutils.version import LooseVersion
+ # There doesn't seem to be a nice way to inspect botocore to look
+ # for attributes (and networkConfiguration is not an explicit argument
+ # to e.g. ecs.run_task, it's just passed as a keyword argument)
+ return LooseVersion(botocore.__version__) >= LooseVersion('1.7.44')
+
+
+def main():
+ argument_spec = dict(
+ operation=dict(required=True, choices=['run', 'start', 'stop']),
+ cluster=dict(required=False, type='str'), # R S P
+ task_definition=dict(required=False, type='str'), # R* S*
+ overrides=dict(required=False, type='dict'), # R S
+ count=dict(required=False, type='int'), # R
+ task=dict(required=False, type='str'), # P*
+ container_instances=dict(required=False, type='list', elements='str'), # S*
+ started_by=dict(required=False, type='str'), # R S
+ network_configuration=dict(required=False, type='dict'),
+ launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
+ tags=dict(required=False, type='dict')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True,
+ required_if=[('launch_type', 'FARGATE', ['network_configuration'])])
+
+ # Validate Inputs
+ if module.params['operation'] == 'run':
+ if 'task_definition' not in module.params and module.params['task_definition'] is None:
+ module.fail_json(msg="To run a task, a task_definition must be specified")
+ task_to_list = module.params['task_definition']
+ status_type = "RUNNING"
+
+ if module.params['operation'] == 'start':
+ if 'task_definition' not in module.params and module.params['task_definition'] is None:
+ module.fail_json(msg="To start a task, a task_definition must be specified")
+ if 'container_instances' not in module.params and module.params['container_instances'] is None:
+ module.fail_json(msg="To start a task, container instances must be specified")
+ task_to_list = module.params['task']
+ status_type = "RUNNING"
+
+ if module.params['operation'] == 'stop':
+ if 'task' not in module.params and module.params['task'] is None:
+ module.fail_json(msg="To stop a task, a task must be specified")
+ if 'task_definition' not in module.params and module.params['task_definition'] is None:
+ module.fail_json(msg="To stop a task, a task definition must be specified")
+ task_to_list = module.params['task_definition']
+ status_type = "STOPPED"
+
+ service_mgr = EcsExecManager(module)
+
+ if module.params['network_configuration'] and not service_mgr.ecs_api_handles_network_configuration():
+ module.fail_json(msg='botocore needs to be version 1.7.44 or higher to use network configuration')
+
+ if module.params['launch_type'] and not service_mgr.ecs_api_handles_launch_type():
+ module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use launch type')
+
+ if module.params['tags']:
+ if not service_mgr.ecs_api_handles_tags():
+ module.fail_json(msg=missing_required_lib("botocore >= 1.12.46", reason="to use tags"))
+ if not service_mgr.ecs_task_long_format_enabled():
+ module.fail_json(msg="Cannot set task tags: long format task arns are required to set tags")
+
+ existing = service_mgr.list_tasks(module.params['cluster'], task_to_list, status_type)
+
+ results = dict(changed=False)
+ if module.params['operation'] == 'run':
+ if existing:
+ # TBD - validate the rest of the details
+ results['task'] = existing
+ else:
+ if not module.check_mode:
+ results['task'] = service_mgr.run_task(
+ module.params['cluster'],
+ module.params['task_definition'],
+ module.params['overrides'],
+ module.params['count'],
+ module.params['started_by'],
+ module.params['launch_type'],
+ module.params['tags'],
+ )
+ results['changed'] = True
+
+ elif module.params['operation'] == 'start':
+ if existing:
+ # TBD - validate the rest of the details
+ results['task'] = existing
+ else:
+ if not module.check_mode:
+ results['task'] = service_mgr.start_task(
+ module.params['cluster'],
+ module.params['task_definition'],
+ module.params['overrides'],
+ module.params['container_instances'],
+ module.params['started_by'],
+ module.params['tags'],
+ )
+ results['changed'] = True
+
+ elif module.params['operation'] == 'stop':
+ if existing:
+ results['task'] = existing
+ else:
+ if not module.check_mode:
+ # it exists, so we should delete it and mark changed.
+ # return info about the cluster deleted
+ results['task'] = service_mgr.stop_task(
+ module.params['cluster'],
+ module.params['task']
+ )
+ results['changed'] = True
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition.py
new file mode 100644
index 00000000..6158fb4e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition.py
@@ -0,0 +1,518 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ecs_taskdefinition
+version_added: 1.0.0
+short_description: register a task definition in ecs
+description:
+ - Registers or deregisters task definitions in the Amazon Web Services (AWS) EC2 Container Service (ECS).
+author: Mark Chance (@Java1Guy)
+requirements: [ json, botocore, boto3 ]
+options:
+ state:
+ description:
+ - State whether the task definition should exist or be deleted.
+ required: true
+ choices: ['present', 'absent']
+ type: str
+ arn:
+ description:
+ - The ARN of the task description to delete.
+ required: false
+ type: str
+ family:
+ description:
+ - A Name that would be given to the task definition.
+ required: false
+ type: str
+ revision:
+ description:
+ - A revision number for the task definition.
+ required: False
+ type: int
+ force_create:
+ description:
+ - Always create new task definition.
+ required: False
+ type: bool
+ default: false
+ containers:
+ description:
+ - A list of containers definitions.
+ required: False
+ type: list
+ elements: dict
+ network_mode:
+ description:
+ - The Docker networking mode to use for the containers in the task.
+ - C(awsvpc) mode was added in Ansible 2.5
+ - Windows containers must use I(network_mode=default), which will utilize docker NAT networking.
+ - Setting I(network_mode=default) for a Linux container will use bridge mode.
+ required: false
+ default: bridge
+ choices: [ 'default', 'bridge', 'host', 'none', 'awsvpc' ]
+ type: str
+ task_role_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted
+ the permissions that are specified in this role.
+ required: false
+ type: str
+ execution_role_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the task execution role that the Amazon ECS container agent and the Docker daemon can assume.
+ required: false
+ type: str
+ volumes:
+ description:
+ - A list of names of volumes to be attached.
+ required: False
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ type: str
+ description: The name of the volume.
+ required: true
+ launch_type:
+ description:
+ - The launch type on which to run your task.
+ required: false
+ type: str
+ choices: ["EC2", "FARGATE"]
+ cpu:
+ description:
+ - The number of cpu units used by the task. If using the EC2 launch type, this field is optional and any value can be used.
+ - If using the Fargate launch type, this field is required and you must use one of C(256), C(512), C(1024), C(2048), C(4096).
+ required: false
+ type: str
+ memory:
+ description:
+ - The amount (in MiB) of memory used by the task. If using the EC2 launch type, this field is optional and any value can be used.
+ - If using the Fargate launch type, this field is required and is limited by the CPU.
+ required: false
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+- name: Create task definition
+ community.aws.ecs_taskdefinition:
+ containers:
+ - name: simple-app
+ cpu: 10
+ essential: true
+ image: "httpd:2.4"
+ memory: 300
+ mountPoints:
+ - containerPath: /usr/local/apache2/htdocs
+ sourceVolume: my-vol
+ portMappings:
+ - containerPort: 80
+ hostPort: 80
+ logConfiguration:
+ logDriver: awslogs
+ options:
+ awslogs-group: /ecs/test-cluster-taskdef
+ awslogs-region: us-west-2
+ awslogs-stream-prefix: ecs
+ - name: busybox
+ command:
+ - >
+ /bin/sh -c "while true; do echo '<html><head><title>Amazon ECS Sample App</title></head><body><div><h1>Amazon ECS Sample App</h1><h2>Congratulations!
+ </h2><p>Your application is now running on a container in Amazon ECS.</p>' > top; /bin/date > date ; echo '</div></body></html>' > bottom;
+ cat top date bottom > /usr/local/apache2/htdocs/index.html ; sleep 1; done"
+ cpu: 10
+ entryPoint:
+ - sh
+ - "-c"
+ essential: false
+ image: busybox
+ memory: 200
+ volumesFrom:
+ - sourceContainer: simple-app
+ volumes:
+ - name: my-vol
+ family: test-cluster-taskdef
+ state: present
+ register: task_output
+
+- name: Create task definition
+ community.aws.ecs_taskdefinition:
+ family: nginx
+ containers:
+ - name: nginx
+ essential: true
+ image: "nginx"
+ portMappings:
+ - containerPort: 8080
+ hostPort: 8080
+ cpu: 512
+ memory: 1024
+ state: present
+
+- name: Create task definition
+ community.aws.ecs_taskdefinition:
+ family: nginx
+ containers:
+ - name: nginx
+ essential: true
+ image: "nginx"
+ portMappings:
+ - containerPort: 8080
+ hostPort: 8080
+ launch_type: FARGATE
+ cpu: 512
+ memory: 1024
+ state: present
+ network_mode: awsvpc
+
+# Create Task Definition with Environment Variables and Secrets
+- name: Create task definition
+ community.aws.ecs_taskdefinition:
+ family: nginx
+ containers:
+ - name: nginx
+ essential: true
+ image: "nginx"
+ environment:
+ - name: "PORT"
+ value: "8080"
+ secrets:
+ # For variables stored in Secrets Manager
+ - name: "NGINX_HOST"
+ valueFrom: "arn:aws:secretsmanager:us-west-2:123456789012:secret:nginx/NGINX_HOST"
+ # For variables stored in Parameter Store
+ - name: "API_KEY"
+ valueFrom: "arn:aws:ssm:us-west-2:123456789012:parameter/nginx/API_KEY"
+ launch_type: FARGATE
+ cpu: 512
+ memory: 1GB
+ state: present
+ network_mode: awsvpc
+'''
+RETURN = r'''
+taskdefinition:
+ description: a reflection of the input parameters
+ type: dict
+ returned: always
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils._text import to_text
+
+
+class EcsTaskManager:
+ """Handles ECS Tasks"""
+
+ def __init__(self, module):
+ self.module = module
+
+ self.ecs = module.client('ecs')
+
+ def describe_task(self, task_name):
+ try:
+ response = self.ecs.describe_task_definition(taskDefinition=task_name)
+ return response['taskDefinition']
+ except botocore.exceptions.ClientError:
+ return None
+
+ def register_task(self, family, task_role_arn, execution_role_arn, network_mode, container_definitions, volumes, launch_type, cpu, memory):
+ validated_containers = []
+
+ # Ensures the number parameters are int as required by boto
+ for container in container_definitions:
+ for param in ('memory', 'cpu', 'memoryReservation'):
+ if param in container:
+ container[param] = int(container[param])
+
+ if 'portMappings' in container:
+ for port_mapping in container['portMappings']:
+ for port in ('hostPort', 'containerPort'):
+ if port in port_mapping:
+ port_mapping[port] = int(port_mapping[port])
+ if network_mode == 'awsvpc' and 'hostPort' in port_mapping:
+ if port_mapping['hostPort'] != port_mapping.get('containerPort'):
+ self.module.fail_json(msg="In awsvpc network mode, host port must be set to the same as "
+ "container port or not be set")
+
+ validated_containers.append(container)
+
+ params = dict(
+ family=family,
+ taskRoleArn=task_role_arn,
+ containerDefinitions=container_definitions,
+ volumes=volumes
+ )
+ if network_mode != 'default':
+ params['networkMode'] = network_mode
+ if cpu:
+ params['cpu'] = cpu
+ if memory:
+ params['memory'] = memory
+ if launch_type:
+ params['requiresCompatibilities'] = [launch_type]
+ if execution_role_arn:
+ params['executionRoleArn'] = execution_role_arn
+
+ try:
+ response = self.ecs.register_task_definition(**params)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Failed to register task")
+
+ return response['taskDefinition']
+
+ def describe_task_definitions(self, family):
+ data = {
+ "taskDefinitionArns": [],
+ "nextToken": None
+ }
+
+ def fetch():
+ # Boto3 is weird about params passed, so only pass nextToken if we have a value
+ params = {
+ 'familyPrefix': family
+ }
+
+ if data['nextToken']:
+ params['nextToken'] = data['nextToken']
+
+ result = self.ecs.list_task_definitions(**params)
+ data['taskDefinitionArns'] += result['taskDefinitionArns']
+ data['nextToken'] = result.get('nextToken', None)
+ return data['nextToken'] is not None
+
+ # Fetch all the arns, possibly across multiple pages
+ while fetch():
+ pass
+
+ # Return the full descriptions of the task definitions, sorted ascending by revision
+ return list(
+ sorted(
+ [self.ecs.describe_task_definition(taskDefinition=arn)['taskDefinition'] for arn in data['taskDefinitionArns']],
+ key=lambda td: td['revision']
+ )
+ )
+
+ def deregister_task(self, taskArn):
+ response = self.ecs.deregister_task_definition(taskDefinition=taskArn)
+ return response['taskDefinition']
+
+
+def main():
+ argument_spec = dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ arn=dict(required=False, type='str'),
+ family=dict(required=False, type='str'),
+ revision=dict(required=False, type='int'),
+ force_create=dict(required=False, default=False, type='bool'),
+ containers=dict(required=False, type='list', elements='dict'),
+ network_mode=dict(required=False, default='bridge', choices=['default', 'bridge', 'host', 'none', 'awsvpc'], type='str'),
+ task_role_arn=dict(required=False, default='', type='str'),
+ execution_role_arn=dict(required=False, default='', type='str'),
+ volumes=dict(required=False, type='list', elements='dict'),
+ launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
+ cpu=dict(),
+ memory=dict(required=False, type='str')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[('launch_type', 'FARGATE', ['cpu', 'memory'])]
+ )
+
+ task_to_describe = None
+ task_mgr = EcsTaskManager(module)
+ results = dict(changed=False)
+
+ if module.params['launch_type']:
+ if not module.botocore_at_least('1.8.4'):
+ module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use launch_type')
+
+ if module.params['execution_role_arn']:
+ if not module.botocore_at_least('1.10.44'):
+ module.fail_json(msg='botocore needs to be version 1.10.44 or higher to use execution_role_arn')
+
+ if module.params['containers']:
+ for container in module.params['containers']:
+ for environment in container.get('environment', []):
+ environment['value'] = to_text(environment['value'])
+
+ if module.params['state'] == 'present':
+ if 'containers' not in module.params or not module.params['containers']:
+ module.fail_json(msg="To use task definitions, a list of containers must be specified")
+
+ if 'family' not in module.params or not module.params['family']:
+ module.fail_json(msg="To use task definitions, a family must be specified")
+
+ network_mode = module.params['network_mode']
+ launch_type = module.params['launch_type']
+ if launch_type == 'FARGATE' and network_mode != 'awsvpc':
+ module.fail_json(msg="To use FARGATE launch type, network_mode must be awsvpc")
+
+ family = module.params['family']
+ existing_definitions_in_family = task_mgr.describe_task_definitions(module.params['family'])
+
+ if 'revision' in module.params and module.params['revision']:
+ # The definition specifies revision. We must guarantee that an active revision of that number will result from this.
+ revision = int(module.params['revision'])
+
+ # A revision has been explicitly specified. Attempt to locate a matching revision
+ tasks_defs_for_revision = [td for td in existing_definitions_in_family if td['revision'] == revision]
+ existing = tasks_defs_for_revision[0] if len(tasks_defs_for_revision) > 0 else None
+
+ if existing and existing['status'] != "ACTIVE":
+ # We cannot reactivate an inactive revision
+ module.fail_json(msg="A task in family '%s' already exists for revision %d, but it is inactive" % (family, revision))
+ elif not existing:
+ if not existing_definitions_in_family and revision != 1:
+ module.fail_json(msg="You have specified a revision of %d but a created revision would be 1" % revision)
+ elif existing_definitions_in_family and existing_definitions_in_family[-1]['revision'] + 1 != revision:
+ module.fail_json(msg="You have specified a revision of %d but a created revision would be %d" %
+ (revision, existing_definitions_in_family[-1]['revision'] + 1))
+ else:
+ existing = None
+
+ def _right_has_values_of_left(left, right):
+ # Make sure the values are equivalent for everything left has
+ for k, v in left.items():
+ if not ((not v and (k not in right or not right[k])) or (k in right and v == right[k])):
+ # We don't care about list ordering because ECS can change things
+ if isinstance(v, list) and k in right:
+ left_list = v
+ right_list = right[k] or []
+
+ if len(left_list) != len(right_list):
+ return False
+
+ for list_val in left_list:
+ if list_val not in right_list:
+ return False
+ else:
+ return False
+
+ # Make sure right doesn't have anything that left doesn't
+ for k, v in right.items():
+ if v and k not in left:
+ return False
+
+ return True
+
+ def _task_definition_matches(requested_volumes, requested_containers, requested_task_role_arn, existing_task_definition):
+ if td['status'] != "ACTIVE":
+ return None
+
+ if requested_task_role_arn != td.get('taskRoleArn', ""):
+ return None
+
+ existing_volumes = td.get('volumes', []) or []
+
+ if len(requested_volumes) != len(existing_volumes):
+ # Nope.
+ return None
+
+ if len(requested_volumes) > 0:
+ for requested_vol in requested_volumes:
+ found = False
+
+ for actual_vol in existing_volumes:
+ if _right_has_values_of_left(requested_vol, actual_vol):
+ found = True
+ break
+
+ if not found:
+ return None
+
+ existing_containers = td.get('containerDefinitions', []) or []
+
+ if len(requested_containers) != len(existing_containers):
+ # Nope.
+ return None
+
+ for requested_container in requested_containers:
+ found = False
+
+ for actual_container in existing_containers:
+ if _right_has_values_of_left(requested_container, actual_container):
+ found = True
+ break
+
+ if not found:
+ return None
+
+ return existing_task_definition
+
+ # No revision explicitly specified. Attempt to find an active, matching revision that has all the properties requested
+ for td in existing_definitions_in_family:
+ requested_volumes = module.params['volumes'] or []
+ requested_containers = module.params['containers'] or []
+ requested_task_role_arn = module.params['task_role_arn']
+ existing = _task_definition_matches(requested_volumes, requested_containers, requested_task_role_arn, td)
+
+ if existing:
+ break
+
+ if existing and not module.params.get('force_create'):
+ # Awesome. Have an existing one. Nothing to do.
+ results['taskdefinition'] = existing
+ else:
+ if not module.check_mode:
+ # Doesn't exist. create it.
+ volumes = module.params.get('volumes', []) or []
+ results['taskdefinition'] = task_mgr.register_task(module.params['family'],
+ module.params['task_role_arn'],
+ module.params['execution_role_arn'],
+ module.params['network_mode'],
+ module.params['containers'],
+ volumes,
+ module.params['launch_type'],
+ module.params['cpu'],
+ module.params['memory'])
+ results['changed'] = True
+
+ elif module.params['state'] == 'absent':
+ # When de-registering a task definition, we can specify the ARN OR the family and revision.
+ if module.params['state'] == 'absent':
+ if 'arn' in module.params and module.params['arn'] is not None:
+ task_to_describe = module.params['arn']
+ elif 'family' in module.params and module.params['family'] is not None and 'revision' in module.params and \
+ module.params['revision'] is not None:
+ task_to_describe = module.params['family'] + ":" + str(module.params['revision'])
+ else:
+ module.fail_json(msg="To use task definitions, an arn or family and revision must be specified")
+
+ existing = task_mgr.describe_task(task_to_describe)
+
+ if not existing:
+ pass
+ else:
+ # It exists, so we should delete it and mark changed. Return info about the task definition deleted
+ results['taskdefinition'] = existing
+ if 'status' in existing and existing['status'] == "INACTIVE":
+ results['changed'] = False
+ else:
+ if not module.check_mode:
+ task_mgr.deregister_task(task_to_describe)
+ results['changed'] = True
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition_facts.py
new file mode 100644
index 00000000..ef5b20c4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition_facts.py
@@ -0,0 +1,332 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ecs_taskdefinition_info
+version_added: 1.0.0
+short_description: Describe a task definition in ECS
+notes:
+ - For details of the parameters and returns see
+ U(http://boto3.readthedocs.io/en/latest/reference/services/ecs.html#ECS.Client.describe_task_definition)
+ - This module was called C(ecs_taskdefinition_facts) before Ansible 2.9. The usage did not change.
+description:
+ - Describes a task definition in ECS.
+author:
+ - Gustavo Maia (@gurumaia)
+ - Mark Chance (@Java1Guy)
+ - Darek Kaczynski (@kaczynskid)
+requirements: [ json, botocore, boto3 ]
+options:
+ task_definition:
+ description:
+ - The name of the task definition to get details for
+ required: true
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- community.aws.ecs_taskdefinition_info:
+ task_definition: test-td
+'''
+
+RETURN = '''
+container_definitions:
+ description: Returns a list of complex objects representing the containers
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: The name of a container.
+ returned: always
+ type: str
+ image:
+ description: The image used to start a container.
+ returned: always
+ type: str
+ cpu:
+ description: The number of cpu units reserved for the container.
+ returned: always
+ type: int
+ memoryReservation:
+ description: The soft limit (in MiB) of memory to reserve for the container.
+ returned: when present
+ type: int
+ links:
+ description: Links to other containers.
+ returned: when present
+ type: str
+ portMappings:
+ description: The list of port mappings for the container.
+ returned: always
+ type: complex
+ contains:
+ containerPort:
+ description: The port number on the container.
+ returned: when present
+ type: int
+ hostPort:
+ description: The port number on the container instance to reserve for your container.
+ returned: when present
+ type: int
+ protocol:
+ description: The protocol used for the port mapping.
+ returned: when present
+ type: str
+ essential:
+ description: Whether this is an essential container or not.
+ returned: always
+ type: bool
+ entryPoint:
+ description: The entry point that is passed to the container.
+ returned: when present
+ type: str
+ command:
+ description: The command that is passed to the container.
+ returned: when present
+ type: str
+ environment:
+ description: The environment variables to pass to a container.
+ returned: always
+ type: complex
+ contains:
+ name:
+ description: The name of the environment variable.
+ returned: when present
+ type: str
+ value:
+ description: The value of the environment variable.
+ returned: when present
+ type: str
+ mountPoints:
+ description: The mount points for data volumes in your container.
+ returned: always
+ type: complex
+ contains:
+ sourceVolume:
+ description: The name of the volume to mount.
+ returned: when present
+ type: str
+ containerPath:
+ description: The path on the container to mount the host volume at.
+ returned: when present
+ type: str
+ readOnly:
+ description: If this value is true , the container has read-only access to the volume.
+ If this value is false , then the container can write to the volume.
+ returned: when present
+ type: bool
+ volumesFrom:
+ description: Data volumes to mount from another container.
+ returned: always
+ type: complex
+ contains:
+ sourceContainer:
+ description: The name of another container within the same task definition to mount volumes from.
+ returned: when present
+ type: str
+ readOnly:
+ description: If this value is true , the container has read-only access to the volume.
+ If this value is false , then the container can write to the volume.
+ returned: when present
+ type: bool
+ hostname:
+ description: The hostname to use for your container.
+ returned: when present
+ type: str
+ user:
+ description: The user name to use inside the container.
+ returned: when present
+ type: str
+ workingDirectory:
+ description: The working directory in which to run commands inside the container.
+ returned: when present
+ type: str
+ disableNetworking:
+ description: When this parameter is true, networking is disabled within the container.
+ returned: when present
+ type: bool
+ privileged:
+ description: When this parameter is true, the container is given elevated
+ privileges on the host container instance (similar to the root user).
+ returned: when present
+ type: bool
+ readonlyRootFilesystem:
+ description: When this parameter is true, the container is given read-only access to its root file system.
+ returned: when present
+ type: bool
+ dnsServers:
+ description: A list of DNS servers that are presented to the container.
+ returned: when present
+ type: str
+ dnsSearchDomains:
+ description: A list of DNS search domains that are presented to the container.
+ returned: when present
+ type: str
+ extraHosts:
+ description: A list of hostnames and IP address mappings to append to the /etc/hosts file on the container.
+ returned: when present
+ type: complex
+ contains:
+ hostname:
+ description: The hostname to use in the /etc/hosts entry.
+ returned: when present
+ type: str
+ ipAddress:
+ description: The IP address to use in the /etc/hosts entry.
+ returned: when present
+ type: str
+ dockerSecurityOptions:
+ description: A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems.
+ returned: when present
+ type: str
+ dockerLabels:
+ description: A key/value map of labels to add to the container.
+ returned: when present
+ type: str
+ ulimits:
+ description: A list of ulimits to set in the container.
+ returned: when present
+ type: complex
+ contains:
+ name:
+ description: The type of the ulimit .
+ returned: when present
+ type: str
+ softLimit:
+ description: The soft limit for the ulimit type.
+ returned: when present
+ type: int
+ hardLimit:
+ description: The hard limit for the ulimit type.
+ returned: when present
+ type: int
+ logConfiguration:
+ description: The log configuration specification for the container.
+ returned: when present
+ type: str
+ options:
+ description: The configuration options to send to the log driver.
+ returned: when present
+ type: str
+
+family:
+ description: The family of your task definition, used as the definition name
+ returned: always
+ type: str
+task_definition_arn:
+ description: ARN of the task definition
+ returned: always
+ type: str
+task_role_arn:
+ description: The ARN of the IAM role that containers in this task can assume
+ returned: when role is set
+ type: str
+network_mode:
+ description: Network mode for the containers
+ returned: always
+ type: str
+revision:
+ description: Revision number that was queried
+ returned: always
+ type: int
+volumes:
+ description: The list of volumes in a task
+ returned: always
+ type: complex
+ contains:
+ name:
+ description: The name of the volume.
+ returned: when present
+ type: str
+ host:
+ description: The contents of the host parameter determine whether your data volume
+ persists on the host container instance and where it is stored.
+ returned: when present
+ type: bool
+ source_path:
+ description: The path on the host container instance that is presented to the container.
+ returned: when present
+ type: str
+status:
+ description: The status of the task definition
+ returned: always
+ type: str
+requires_attributes:
+ description: The container instance attributes required by your task
+ returned: when present
+ type: complex
+ contains:
+ name:
+ description: The name of the attribute.
+ returned: when present
+ type: str
+ value:
+ description: The value of the attribute.
+ returned: when present
+ type: str
+ targetType:
+ description: The type of the target with which to attach the attribute.
+ returned: when present
+ type: str
+ targetId:
+ description: The ID of the target.
+ returned: when present
+ type: str
+placement_constraints:
+ description: A list of placement constraint objects to use for tasks
+ returned: always
+ type: complex
+ contains:
+ type:
+ description: The type of constraint.
+ returned: when present
+ type: str
+ expression:
+ description: A cluster query language expression to apply to the constraint.
+ returned: when present
+ type: str
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def main():
+ argument_spec = dict(
+ task_definition=dict(required=True, type='str')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'ecs_taskdefinition_facts':
+ module.deprecate("The 'ecs_taskdefinition_facts' module has been renamed to 'ecs_taskdefinition_info'",
+ date='2021-12-01', collection_name='community.aws')
+
+ ecs = module.client('ecs')
+
+ try:
+ ecs_td = ecs.describe_task_definition(taskDefinition=module.params['task_definition'])['taskDefinition']
+ except botocore.exceptions.ClientError:
+ ecs_td = {}
+
+ module.exit_json(changed=False, **camel_dict_to_snake_dict(ecs_td))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition_info.py
new file mode 100644
index 00000000..ef5b20c4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition_info.py
@@ -0,0 +1,332 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ecs_taskdefinition_info
+version_added: 1.0.0
+short_description: Describe a task definition in ECS
+notes:
+ - For details of the parameters and returns see
+ U(http://boto3.readthedocs.io/en/latest/reference/services/ecs.html#ECS.Client.describe_task_definition)
+ - This module was called C(ecs_taskdefinition_facts) before Ansible 2.9. The usage did not change.
+description:
+ - Describes a task definition in ECS.
+author:
+ - Gustavo Maia (@gurumaia)
+ - Mark Chance (@Java1Guy)
+ - Darek Kaczynski (@kaczynskid)
+requirements: [ json, botocore, boto3 ]
+options:
+ task_definition:
+ description:
+ - The name of the task definition to get details for
+ required: true
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- community.aws.ecs_taskdefinition_info:
+ task_definition: test-td
+'''
+
+RETURN = '''
+container_definitions:
+ description: Returns a list of complex objects representing the containers
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: The name of a container.
+ returned: always
+ type: str
+ image:
+ description: The image used to start a container.
+ returned: always
+ type: str
+ cpu:
+ description: The number of cpu units reserved for the container.
+ returned: always
+ type: int
+ memoryReservation:
+ description: The soft limit (in MiB) of memory to reserve for the container.
+ returned: when present
+ type: int
+ links:
+ description: Links to other containers.
+ returned: when present
+ type: str
+ portMappings:
+ description: The list of port mappings for the container.
+ returned: always
+ type: complex
+ contains:
+ containerPort:
+ description: The port number on the container.
+ returned: when present
+ type: int
+ hostPort:
+ description: The port number on the container instance to reserve for your container.
+ returned: when present
+ type: int
+ protocol:
+ description: The protocol used for the port mapping.
+ returned: when present
+ type: str
+ essential:
+ description: Whether this is an essential container or not.
+ returned: always
+ type: bool
+ entryPoint:
+ description: The entry point that is passed to the container.
+ returned: when present
+ type: str
+ command:
+ description: The command that is passed to the container.
+ returned: when present
+ type: str
+ environment:
+ description: The environment variables to pass to a container.
+ returned: always
+ type: complex
+ contains:
+ name:
+ description: The name of the environment variable.
+ returned: when present
+ type: str
+ value:
+ description: The value of the environment variable.
+ returned: when present
+ type: str
+ mountPoints:
+ description: The mount points for data volumes in your container.
+ returned: always
+ type: complex
+ contains:
+ sourceVolume:
+ description: The name of the volume to mount.
+ returned: when present
+ type: str
+ containerPath:
+ description: The path on the container to mount the host volume at.
+ returned: when present
+ type: str
+ readOnly:
+ description: If this value is true , the container has read-only access to the volume.
+ If this value is false , then the container can write to the volume.
+ returned: when present
+ type: bool
+ volumesFrom:
+ description: Data volumes to mount from another container.
+ returned: always
+ type: complex
+ contains:
+ sourceContainer:
+ description: The name of another container within the same task definition to mount volumes from.
+ returned: when present
+ type: str
+ readOnly:
+ description: If this value is true , the container has read-only access to the volume.
+ If this value is false , then the container can write to the volume.
+ returned: when present
+ type: bool
+ hostname:
+ description: The hostname to use for your container.
+ returned: when present
+ type: str
+ user:
+ description: The user name to use inside the container.
+ returned: when present
+ type: str
+ workingDirectory:
+ description: The working directory in which to run commands inside the container.
+ returned: when present
+ type: str
+ disableNetworking:
+ description: When this parameter is true, networking is disabled within the container.
+ returned: when present
+ type: bool
+ privileged:
+ description: When this parameter is true, the container is given elevated
+ privileges on the host container instance (similar to the root user).
+ returned: when present
+ type: bool
+ readonlyRootFilesystem:
+ description: When this parameter is true, the container is given read-only access to its root file system.
+ returned: when present
+ type: bool
+ dnsServers:
+ description: A list of DNS servers that are presented to the container.
+ returned: when present
+ type: str
+ dnsSearchDomains:
+ description: A list of DNS search domains that are presented to the container.
+ returned: when present
+ type: str
+ extraHosts:
+ description: A list of hostnames and IP address mappings to append to the /etc/hosts file on the container.
+ returned: when present
+ type: complex
+ contains:
+ hostname:
+ description: The hostname to use in the /etc/hosts entry.
+ returned: when present
+ type: str
+ ipAddress:
+ description: The IP address to use in the /etc/hosts entry.
+ returned: when present
+ type: str
+ dockerSecurityOptions:
+ description: A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems.
+ returned: when present
+ type: str
+ dockerLabels:
+ description: A key/value map of labels to add to the container.
+ returned: when present
+ type: str
+ ulimits:
+ description: A list of ulimits to set in the container.
+ returned: when present
+ type: complex
+ contains:
+ name:
+ description: The type of the ulimit .
+ returned: when present
+ type: str
+ softLimit:
+ description: The soft limit for the ulimit type.
+ returned: when present
+ type: int
+ hardLimit:
+ description: The hard limit for the ulimit type.
+ returned: when present
+ type: int
+ logConfiguration:
+ description: The log configuration specification for the container.
+ returned: when present
+ type: str
+ options:
+ description: The configuration options to send to the log driver.
+ returned: when present
+ type: str
+
+family:
+ description: The family of your task definition, used as the definition name
+ returned: always
+ type: str
+task_definition_arn:
+ description: ARN of the task definition
+ returned: always
+ type: str
+task_role_arn:
+ description: The ARN of the IAM role that containers in this task can assume
+ returned: when role is set
+ type: str
+network_mode:
+ description: Network mode for the containers
+ returned: always
+ type: str
+revision:
+ description: Revision number that was queried
+ returned: always
+ type: int
+volumes:
+ description: The list of volumes in a task
+ returned: always
+ type: complex
+ contains:
+ name:
+ description: The name of the volume.
+ returned: when present
+ type: str
+ host:
+ description: The contents of the host parameter determine whether your data volume
+ persists on the host container instance and where it is stored.
+ returned: when present
+ type: bool
+ source_path:
+ description: The path on the host container instance that is presented to the container.
+ returned: when present
+ type: str
+status:
+ description: The status of the task definition
+ returned: always
+ type: str
+requires_attributes:
+ description: The container instance attributes required by your task
+ returned: when present
+ type: complex
+ contains:
+ name:
+ description: The name of the attribute.
+ returned: when present
+ type: str
+ value:
+ description: The value of the attribute.
+ returned: when present
+ type: str
+ targetType:
+ description: The type of the target with which to attach the attribute.
+ returned: when present
+ type: str
+ targetId:
+ description: The ID of the target.
+ returned: when present
+ type: str
+placement_constraints:
+ description: A list of placement constraint objects to use for tasks
+ returned: always
+ type: complex
+ contains:
+ type:
+ description: The type of constraint.
+ returned: when present
+ type: str
+ expression:
+ description: A cluster query language expression to apply to the constraint.
+ returned: when present
+ type: str
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def main():
+ argument_spec = dict(
+ task_definition=dict(required=True, type='str')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'ecs_taskdefinition_facts':
+ module.deprecate("The 'ecs_taskdefinition_facts' module has been renamed to 'ecs_taskdefinition_info'",
+ date='2021-12-01', collection_name='community.aws')
+
+ ecs = module.client('ecs')
+
+ try:
+ ecs_td = ecs.describe_task_definition(taskDefinition=module.params['task_definition'])['taskDefinition']
+ except botocore.exceptions.ClientError:
+ ecs_td = {}
+
+ module.exit_json(changed=False, **camel_dict_to_snake_dict(ecs_td))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/efs.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/efs.py
new file mode 100644
index 00000000..56ec6980
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/efs.py
@@ -0,0 +1,752 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: efs
+version_added: 1.0.0
+short_description: create and maintain EFS file systems
+description:
+ - Module allows create, search and destroy Amazon EFS file systems.
+requirements: [ boto3 ]
+author:
+ - "Ryan Sydnor (@ryansydnor)"
+ - "Artem Kazakov (@akazakov)"
+options:
+ encrypt:
+ description:
+ - If I(encrypt=true) creates an encrypted file system. This can not be modified after the file system is created.
+ type: bool
+ default: false
+ kms_key_id:
+ description:
+ - The id of the AWS KMS CMK that will be used to protect the encrypted file system. This parameter is only
+ required if you want to use a non-default CMK. If this parameter is not specified, the default CMK for
+ Amazon EFS is used. The key id can be Key ID, Key ID ARN, Key Alias or Key Alias ARN.
+ type: str
+ purge_tags:
+ description:
+ - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter
+ is not set then tags will not be modified.
+ type: bool
+ default: true
+ state:
+ description:
+ - Allows to create, search and destroy Amazon EFS file system.
+ default: 'present'
+ choices: ['present', 'absent']
+ type: str
+ name:
+ description:
+ - Creation Token of Amazon EFS file system. Required for create and update. Either name or ID required for delete.
+ type: str
+ id:
+ description:
+ - ID of Amazon EFS. Either name or ID required for delete.
+ type: str
+ performance_mode:
+ description:
+ - File system's performance mode to use. Only takes effect during creation.
+ default: 'general_purpose'
+ choices: ['general_purpose', 'max_io']
+ type: str
+ tags:
+ description:
+ - "List of tags of Amazon EFS. Should be defined as dictionary
+ In case of 'present' state with list of tags and existing EFS (matched by 'name'), tags of EFS will be replaced with provided data."
+ type: dict
+ targets:
+ description:
+ - "List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes:
+ This data may be modified for existing EFS using state 'present' and new list of mount targets."
+ type: list
+ elements: dict
+ suboptions:
+ subnet_id:
+ required: true
+ description: The ID of the subnet to add the mount target in.
+ ip_address:
+ type: str
+ description: A valid IPv4 address within the address range of the specified subnet.
+ security_groups:
+ type: list
+ elements: str
+ description: List of security group IDs, of the form 'sg-xxxxxxxx'. These must be for the same VPC as subnet specified
+ throughput_mode:
+ description:
+ - The throughput_mode for the file system to be created.
+ - Requires botocore >= 1.10.57
+ choices: ['bursting', 'provisioned']
+ type: str
+ provisioned_throughput_in_mibps:
+ description:
+ - If the throughput_mode is provisioned, select the amount of throughput to provisioned in Mibps.
+ - Requires botocore >= 1.10.57
+ type: float
+ wait:
+ description:
+ - "In case of 'present' state should wait for EFS 'available' life cycle state (of course, if current state not 'deleting' or 'deleted')
+ In case of 'absent' state should wait for EFS 'deleted' life cycle state"
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - How long the module should wait (in seconds) for desired state before returning. Zero means wait as long as necessary.
+ default: 0
+ type: int
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+- name: EFS provisioning
+ community.aws.efs:
+ state: present
+ name: myTestEFS
+ tags:
+ Name: myTestNameTag
+ purpose: file-storage
+ targets:
+ - subnet_id: subnet-748c5d03
+ security_groups: [ "sg-1a2b3c4d" ]
+
+- name: Modifying EFS data
+ community.aws.efs:
+ state: present
+ name: myTestEFS
+ tags:
+ name: myAnotherTestTag
+ targets:
+ - subnet_id: subnet-7654fdca
+ security_groups: [ "sg-4c5d6f7a" ]
+
+- name: Deleting EFS
+ community.aws.efs:
+ state: absent
+ name: myTestEFS
+'''
+
+RETURN = r'''
+creation_time:
+ description: timestamp of creation date
+ returned: always
+ type: str
+ sample: "2015-11-16 07:30:57-05:00"
+creation_token:
+ description: EFS creation token
+ returned: always
+ type: str
+ sample: "console-88609e04-9a0e-4a2e-912c-feaa99509961"
+file_system_id:
+ description: ID of the file system
+ returned: always
+ type: str
+ sample: "fs-xxxxxxxx"
+life_cycle_state:
+ description: state of the EFS file system
+ returned: always
+ type: str
+ sample: "creating, available, deleting, deleted"
+mount_point:
+ description: url of file system with leading dot from the time when AWS EFS required to add a region suffix to the address
+ returned: always
+ type: str
+ sample: ".fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/"
+filesystem_address:
+ description: url of file system valid for use with mount
+ returned: always
+ type: str
+ sample: "fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/"
+mount_targets:
+ description: list of mount targets
+ returned: always
+ type: list
+ sample:
+ [
+ {
+ "file_system_id": "fs-a7ad440e",
+ "ip_address": "172.31.17.173",
+ "life_cycle_state": "available",
+ "mount_target_id": "fsmt-d8907871",
+ "network_interface_id": "eni-6e387e26",
+ "owner_id": "740748460359",
+ "security_groups": [
+ "sg-a30b22c6"
+ ],
+ "subnet_id": "subnet-e265c895"
+ },
+ ...
+ ]
+name:
+ description: name of the file system
+ returned: always
+ type: str
+ sample: "my-efs"
+number_of_mount_targets:
+ description: the number of targets mounted
+ returned: always
+ type: int
+ sample: 3
+owner_id:
+ description: AWS account ID of EFS owner
+ returned: always
+ type: str
+ sample: "XXXXXXXXXXXX"
+size_in_bytes:
+ description: size of the file system in bytes as of a timestamp
+ returned: always
+ type: dict
+ sample:
+ {
+ "timestamp": "2015-12-21 13:59:59-05:00",
+ "value": 12288
+ }
+performance_mode:
+ description: performance mode of the file system
+ returned: always
+ type: str
+ sample: "generalPurpose"
+tags:
+ description: tags on the efs instance
+ returned: always
+ type: dict
+ sample:
+ {
+ "name": "my-efs",
+ "key": "Value"
+ }
+
+'''
+
+from time import sleep
+from time import time as timestamp
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError as e:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (compare_aws_tags,
+ camel_dict_to_snake_dict,
+ ansible_dict_to_boto3_tag_list,
+ boto3_tag_list_to_ansible_dict,
+ )
+
+
+def _index_by_key(key, items):
+ return dict((item[key], item) for item in items)
+
+
+class EFSConnection(object):
+
+ DEFAULT_WAIT_TIMEOUT_SECONDS = 0
+
+ STATE_CREATING = 'creating'
+ STATE_AVAILABLE = 'available'
+ STATE_DELETING = 'deleting'
+ STATE_DELETED = 'deleted'
+
+ def __init__(self, module):
+ self.connection = module.client('efs')
+ region = module.region
+
+ self.module = module
+ self.region = region
+ self.wait = module.params.get('wait')
+ self.wait_timeout = module.params.get('wait_timeout')
+
+ def get_file_systems(self, **kwargs):
+ """
+ Returns generator of file systems including all attributes of FS
+ """
+ items = iterate_all(
+ 'FileSystems',
+ self.connection.describe_file_systems,
+ **kwargs
+ )
+ for item in items:
+ item['Name'] = item['CreationToken']
+ item['CreationTime'] = str(item['CreationTime'])
+ """
+ In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it
+ AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose
+ And new FilesystemAddress variable is introduced for direct use with other modules (e.g. mount)
+ AWS documentation is available here:
+ https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html
+ """
+ item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
+ item['FilesystemAddress'] = '%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
+ if 'Timestamp' in item['SizeInBytes']:
+ item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
+ if item['LifeCycleState'] == self.STATE_AVAILABLE:
+ item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId'])
+ item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId']))
+ else:
+ item['Tags'] = {}
+ item['MountTargets'] = []
+ yield item
+
+ def get_tags(self, **kwargs):
+ """
+ Returns tag list for selected instance of EFS
+ """
+ tags = self.connection.describe_tags(**kwargs)['Tags']
+ return tags
+
+ def get_mount_targets(self, **kwargs):
+ """
+ Returns mount targets for selected instance of EFS
+ """
+ targets = iterate_all(
+ 'MountTargets',
+ self.connection.describe_mount_targets,
+ **kwargs
+ )
+ for target in targets:
+ if target['LifeCycleState'] == self.STATE_AVAILABLE:
+ target['SecurityGroups'] = list(self.get_security_groups(
+ MountTargetId=target['MountTargetId']
+ ))
+ else:
+ target['SecurityGroups'] = []
+ yield target
+
+ def get_security_groups(self, **kwargs):
+ """
+ Returns security groups for selected instance of EFS
+ """
+ return iterate_all(
+ 'SecurityGroups',
+ self.connection.describe_mount_target_security_groups,
+ **kwargs
+ )
+
+ def get_file_system_id(self, name):
+ """
+ Returns ID of instance by instance name
+ """
+ info = first_or_default(iterate_all(
+ 'FileSystems',
+ self.connection.describe_file_systems,
+ CreationToken=name
+ ))
+ return info and info['FileSystemId'] or None
+
+ def get_file_system_state(self, name, file_system_id=None):
+ """
+ Returns state of filesystem by EFS id/name
+ """
+ info = first_or_default(iterate_all(
+ 'FileSystems',
+ self.connection.describe_file_systems,
+ CreationToken=name,
+ FileSystemId=file_system_id
+ ))
+ return info and info['LifeCycleState'] or self.STATE_DELETED
+
+ def get_mount_targets_in_state(self, file_system_id, states=None):
+ """
+ Returns states of mount targets of selected EFS with selected state(s) (optional)
+ """
+ targets = iterate_all(
+ 'MountTargets',
+ self.connection.describe_mount_targets,
+ FileSystemId=file_system_id
+ )
+
+ if states:
+ if not isinstance(states, list):
+ states = [states]
+ targets = filter(lambda target: target['LifeCycleState'] in states, targets)
+
+ return list(targets)
+
+ def supports_provisioned_mode(self):
+ """
+ Ensure boto3 includes provisioned throughput mode feature
+ """
+ return hasattr(self.connection, 'update_file_system')
+
+ def get_throughput_mode(self, **kwargs):
+ """
+ Returns throughput mode for selected EFS instance
+ """
+ info = first_or_default(iterate_all(
+ 'FileSystems',
+ self.connection.describe_file_systems,
+ **kwargs
+ ))
+
+ return info and info['ThroughputMode'] or None
+
+ def get_provisioned_throughput_in_mibps(self, **kwargs):
+ """
+ Returns throughput mode for selected EFS instance
+ """
+ info = first_or_default(iterate_all(
+ 'FileSystems',
+ self.connection.describe_file_systems,
+ **kwargs
+ ))
+ return info.get('ProvisionedThroughputInMibps', None)
+
+ def create_file_system(self, name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps):
+ """
+ Creates new filesystem with selected name
+ """
+ changed = False
+ state = self.get_file_system_state(name)
+ params = {}
+ params['CreationToken'] = name
+ params['PerformanceMode'] = performance_mode
+ if encrypt:
+ params['Encrypted'] = encrypt
+ if kms_key_id is not None:
+ params['KmsKeyId'] = kms_key_id
+ if throughput_mode:
+ if self.supports_provisioned_mode():
+ params['ThroughputMode'] = throughput_mode
+ else:
+ self.module.fail_json(msg="throughput_mode parameter requires botocore >= 1.10.57")
+ if provisioned_throughput_in_mibps:
+ if self.supports_provisioned_mode():
+ params['ProvisionedThroughputInMibps'] = provisioned_throughput_in_mibps
+ else:
+ self.module.fail_json(msg="provisioned_throughput_in_mibps parameter requires botocore >= 1.10.57")
+
+ if state in [self.STATE_DELETING, self.STATE_DELETED]:
+ wait_for(
+ lambda: self.get_file_system_state(name),
+ self.STATE_DELETED
+ )
+ try:
+ self.connection.create_file_system(**params)
+ changed = True
+ except (ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Unable to create file system.")
+
+ # we always wait for the state to be available when creating.
+ # if we try to take any actions on the file system before it's available
+ # we'll throw errors
+ wait_for(
+ lambda: self.get_file_system_state(name),
+ self.STATE_AVAILABLE,
+ self.wait_timeout
+ )
+
+ return changed
+
+ def update_file_system(self, name, throughput_mode, provisioned_throughput_in_mibps):
+ """
+ Update filesystem with new throughput settings
+ """
+ changed = False
+ state = self.get_file_system_state(name)
+ if state in [self.STATE_AVAILABLE, self.STATE_CREATING]:
+ fs_id = self.get_file_system_id(name)
+ current_mode = self.get_throughput_mode(FileSystemId=fs_id)
+ current_throughput = self.get_provisioned_throughput_in_mibps(FileSystemId=fs_id)
+ params = dict()
+ if throughput_mode and throughput_mode != current_mode:
+ params['ThroughputMode'] = throughput_mode
+ if provisioned_throughput_in_mibps and provisioned_throughput_in_mibps != current_throughput:
+ params['ProvisionedThroughputInMibps'] = provisioned_throughput_in_mibps
+ if len(params) > 0:
+ wait_for(
+ lambda: self.get_file_system_state(name),
+ self.STATE_AVAILABLE,
+ self.wait_timeout
+ )
+ try:
+ self.connection.update_file_system(FileSystemId=fs_id, **params)
+ changed = True
+ except (ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Unable to update file system.")
+ return changed
+
+ def converge_file_system(self, name, tags, purge_tags, targets, throughput_mode, provisioned_throughput_in_mibps):
+ """
+ Change attributes (mount targets and tags) of filesystem by name
+ """
+ result = False
+ fs_id = self.get_file_system_id(name)
+
+ if tags is not None:
+ tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(self.get_tags(FileSystemId=fs_id)), tags, purge_tags)
+
+ if tags_to_delete:
+ try:
+ self.connection.delete_tags(
+ FileSystemId=fs_id,
+ TagKeys=tags_to_delete
+ )
+ except (ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Unable to delete tags.")
+
+ result = True
+
+ if tags_need_modify:
+ try:
+ self.connection.create_tags(
+ FileSystemId=fs_id,
+ Tags=ansible_dict_to_boto3_tag_list(tags_need_modify)
+ )
+ except (ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Unable to create tags.")
+
+ result = True
+
+ if targets is not None:
+ incomplete_states = [self.STATE_CREATING, self.STATE_DELETING]
+ wait_for(
+ lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
+ 0
+ )
+ current_targets = _index_by_key('SubnetId', self.get_mount_targets(FileSystemId=fs_id))
+ targets = _index_by_key('SubnetId', targets)
+
+ targets_to_create, intersection, targets_to_delete = dict_diff(current_targets,
+ targets, True)
+
+ # To modify mount target it should be deleted and created again
+ changed = [sid for sid in intersection if not targets_equal(['SubnetId', 'IpAddress', 'NetworkInterfaceId'],
+ current_targets[sid], targets[sid])]
+ targets_to_delete = list(targets_to_delete) + changed
+ targets_to_create = list(targets_to_create) + changed
+
+ if targets_to_delete:
+ for sid in targets_to_delete:
+ self.connection.delete_mount_target(
+ MountTargetId=current_targets[sid]['MountTargetId']
+ )
+ wait_for(
+ lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
+ 0
+ )
+ result = True
+
+ if targets_to_create:
+ for sid in targets_to_create:
+ self.connection.create_mount_target(
+ FileSystemId=fs_id,
+ **targets[sid]
+ )
+ wait_for(
+ lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
+ 0,
+ self.wait_timeout
+ )
+ result = True
+
+ # If no security groups were passed into the module, then do not change it.
+ security_groups_to_update = [sid for sid in intersection if
+ 'SecurityGroups' in targets[sid] and
+ current_targets[sid]['SecurityGroups'] != targets[sid]['SecurityGroups']]
+
+ if security_groups_to_update:
+ for sid in security_groups_to_update:
+ self.connection.modify_mount_target_security_groups(
+ MountTargetId=current_targets[sid]['MountTargetId'],
+ SecurityGroups=targets[sid].get('SecurityGroups', None)
+ )
+ result = True
+
+ return result
+
+ def delete_file_system(self, name, file_system_id=None):
+ """
+ Removes EFS instance by id/name
+ """
+ result = False
+ state = self.get_file_system_state(name, file_system_id)
+ if state in [self.STATE_CREATING, self.STATE_AVAILABLE]:
+ wait_for(
+ lambda: self.get_file_system_state(name),
+ self.STATE_AVAILABLE
+ )
+ if not file_system_id:
+ file_system_id = self.get_file_system_id(name)
+ self.delete_mount_targets(file_system_id)
+ self.connection.delete_file_system(FileSystemId=file_system_id)
+ result = True
+
+ if self.wait:
+ wait_for(
+ lambda: self.get_file_system_state(name),
+ self.STATE_DELETED,
+ self.wait_timeout
+ )
+
+ return result
+
+ def delete_mount_targets(self, file_system_id):
+ """
+ Removes mount targets by EFS id
+ """
+ wait_for(
+ lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_CREATING)),
+ 0
+ )
+
+ targets = self.get_mount_targets_in_state(file_system_id, self.STATE_AVAILABLE)
+ for target in targets:
+ self.connection.delete_mount_target(MountTargetId=target['MountTargetId'])
+
+ wait_for(
+ lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_DELETING)),
+ 0
+ )
+
+ return len(targets) > 0
+
+
+def iterate_all(attr, map_method, **kwargs):
+ """
+ Method creates iterator from result set
+ """
+ args = dict((key, value) for (key, value) in kwargs.items() if value is not None)
+ wait = 1
+ while True:
+ try:
+ data = map_method(**args)
+ for elm in data[attr]:
+ yield elm
+ if 'NextMarker' in data:
+ args['Marker'] = data['Nextmarker']
+ continue
+ break
+ except ClientError as e:
+ if e.response['Error']['Code'] == "ThrottlingException" and wait < 600:
+ sleep(wait)
+ wait = wait * 2
+ continue
+ else:
+ raise
+
+
+def targets_equal(keys, a, b):
+ """
+ Method compare two mount targets by specified attributes
+ """
+ for key in keys:
+ if key in b and a[key] != b[key]:
+ return False
+
+ return True
+
+
+def dict_diff(dict1, dict2, by_key=False):
+ """
+ Helper method to calculate difference of two dictionaries
+ """
+ keys1 = set(dict1.keys() if by_key else dict1.items())
+ keys2 = set(dict2.keys() if by_key else dict2.items())
+
+ intersection = keys1 & keys2
+
+ return keys2 ^ intersection, intersection, keys1 ^ intersection
+
+
+def first_or_default(items, default=None):
+ """
+ Helper method to fetch first element of list (if exists)
+ """
+ for item in items:
+ return item
+ return default
+
+
+def wait_for(callback, value, timeout=EFSConnection.DEFAULT_WAIT_TIMEOUT_SECONDS):
+ """
+ Helper method to wait for desired value returned by callback method
+ """
+ wait_start = timestamp()
+ while True:
+ if callback() != value:
+ if timeout != 0 and (timestamp() - wait_start > timeout):
+ raise RuntimeError('Wait timeout exceeded (' + str(timeout) + ' sec)')
+ else:
+ sleep(5)
+ continue
+ break
+
+
+def main():
+ """
+ Module action handler
+ """
+ argument_spec = dict(
+ encrypt=dict(required=False, type="bool", default=False),
+ state=dict(required=False, type='str', choices=["present", "absent"], default="present"),
+ kms_key_id=dict(required=False, type='str', default=None),
+ purge_tags=dict(default=True, type='bool'),
+ id=dict(required=False, type='str', default=None),
+ name=dict(required=False, type='str', default=None),
+ tags=dict(required=False, type="dict", default={}),
+ targets=dict(required=False, type="list", default=[], elements='dict'),
+ performance_mode=dict(required=False, type='str', choices=["general_purpose", "max_io"], default="general_purpose"),
+ throughput_mode=dict(required=False, type='str', choices=["bursting", "provisioned"], default=None),
+ provisioned_throughput_in_mibps=dict(required=False, type='float'),
+ wait=dict(required=False, type="bool", default=False),
+ wait_timeout=dict(required=False, type="int", default=0)
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ connection = EFSConnection(module)
+
+ name = module.params.get('name')
+ fs_id = module.params.get('id')
+ tags = module.params.get('tags')
+ target_translations = {
+ 'ip_address': 'IpAddress',
+ 'security_groups': 'SecurityGroups',
+ 'subnet_id': 'SubnetId'
+ }
+ targets = [dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get('targets')]
+ performance_mode_translations = {
+ 'general_purpose': 'generalPurpose',
+ 'max_io': 'maxIO'
+ }
+ encrypt = module.params.get('encrypt')
+ kms_key_id = module.params.get('kms_key_id')
+ performance_mode = performance_mode_translations[module.params.get('performance_mode')]
+ purge_tags = module.params.get('purge_tags')
+ throughput_mode = module.params.get('throughput_mode')
+ provisioned_throughput_in_mibps = module.params.get('provisioned_throughput_in_mibps')
+ state = str(module.params.get('state')).lower()
+ changed = False
+
+ if state == 'present':
+ if not name:
+ module.fail_json(msg='Name parameter is required for create')
+
+ changed = connection.create_file_system(name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps)
+ if connection.supports_provisioned_mode():
+ changed = connection.update_file_system(name, throughput_mode, provisioned_throughput_in_mibps) or changed
+ changed = connection.converge_file_system(name=name, tags=tags, purge_tags=purge_tags, targets=targets,
+ throughput_mode=throughput_mode, provisioned_throughput_in_mibps=provisioned_throughput_in_mibps) or changed
+ result = first_or_default(connection.get_file_systems(CreationToken=name))
+
+ elif state == 'absent':
+ if not name and not fs_id:
+ module.fail_json(msg='Either name or id parameter is required for delete')
+
+ changed = connection.delete_file_system(name, fs_id)
+ result = None
+ if result:
+ result = camel_dict_to_snake_dict(result)
+ module.exit_json(changed=changed, efs=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/efs_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/efs_facts.py
new file mode 100644
index 00000000..62fd5837
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/efs_facts.py
@@ -0,0 +1,398 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: efs_info
+version_added: 1.0.0
+short_description: Get information about Amazon EFS file systems
+description:
+ - This module can be used to search Amazon EFS file systems.
+ - This module was called C(efs_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.aws.efs_info) module no longer returns C(ansible_facts)!
+requirements: [ boto3 ]
+author:
+ - "Ryan Sydnor (@ryansydnor)"
+options:
+ name:
+ description:
+ - Creation Token of Amazon EFS file system.
+ aliases: [ creation_token ]
+ type: str
+ id:
+ description:
+ - ID of Amazon EFS.
+ type: str
+ tags:
+ description:
+ - List of tags of Amazon EFS. Should be defined as dictionary.
+ type: dict
+ targets:
+ description:
+ - List of targets on which to filter the returned results.
+ - Result must match all of the specified targets, each of which can be a security group ID, a subnet ID or an IP address.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+- name: Find all existing efs
+ community.aws.efs_info:
+ register: result
+
+- name: Find efs using id
+ community.aws.efs_info:
+ id: fs-1234abcd
+ register: result
+
+- name: Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a'
+ community.aws.efs_info:
+ tags:
+ Name: myTestNameTag
+ targets:
+ - subnet-1a2b3c4d
+ - sg-4d3c2b1a
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result['efs'] }}"
+'''
+
+RETURN = r'''
+creation_time:
+ description: timestamp of creation date
+ returned: always
+ type: str
+ sample: "2015-11-16 07:30:57-05:00"
+creation_token:
+ description: EFS creation token
+ returned: always
+ type: str
+ sample: console-88609e04-9a0e-4a2e-912c-feaa99509961
+file_system_id:
+ description: ID of the file system
+ returned: always
+ type: str
+ sample: fs-xxxxxxxx
+life_cycle_state:
+ description: state of the EFS file system
+ returned: always
+ type: str
+ sample: creating, available, deleting, deleted
+mount_point:
+ description: url of file system with leading dot from the time AWS EFS required to add network suffix to EFS address
+ returned: always
+ type: str
+ sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
+filesystem_address:
+ description: url of file system
+ returned: always
+ type: str
+ sample: fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
+mount_targets:
+ description: list of mount targets
+ returned: always
+ type: list
+ sample:
+ [
+ {
+ "file_system_id": "fs-a7ad440e",
+ "ip_address": "172.31.17.173",
+ "life_cycle_state": "available",
+ "mount_target_id": "fsmt-d8907871",
+ "network_interface_id": "eni-6e387e26",
+ "owner_id": "740748460359",
+ "security_groups": [
+ "sg-a30b22c6"
+ ],
+ "subnet_id": "subnet-e265c895"
+ },
+ ...
+ ]
+name:
+ description: name of the file system
+ returned: always
+ type: str
+ sample: my-efs
+number_of_mount_targets:
+ description: the number of targets mounted
+ returned: always
+ type: int
+ sample: 3
+owner_id:
+ description: AWS account ID of EFS owner
+ returned: always
+ type: str
+ sample: XXXXXXXXXXXX
+size_in_bytes:
+ description: size of the file system in bytes as of a timestamp
+ returned: always
+ type: dict
+ sample:
+ {
+ "timestamp": "2015-12-21 13:59:59-05:00",
+ "value": 12288
+ }
+performance_mode:
+ description: performance mode of the file system
+ returned: always
+ type: str
+ sample: "generalPurpose"
+throughput_mode:
+ description: mode of throughput for the file system
+ returned: when botocore >= 1.10.57
+ type: str
+ sample: "bursting"
+provisioned_throughput_in_mibps:
+ description: throughput provisioned in Mibps
+ returned: when botocore >= 1.10.57 and throughput_mode is set to "provisioned"
+ type: float
+ sample: 15.0
+tags:
+ description: tags on the efs instance
+ returned: always
+ type: dict
+ sample:
+ {
+ "name": "my-efs",
+ "key": "Value"
+ }
+
+'''
+
+
+from collections import defaultdict
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+class EFSConnection(object):
+ STATE_CREATING = 'creating'
+ STATE_AVAILABLE = 'available'
+ STATE_DELETING = 'deleting'
+ STATE_DELETED = 'deleted'
+
+ def __init__(self, module):
+ try:
+ self.connection = module.client('efs')
+ self.module = module
+ except Exception as e:
+ module.fail_json(msg="Failed to connect to AWS: %s" % to_native(e))
+
+ self.region = module.region
+
+ @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
+ def list_file_systems(self, **kwargs):
+ """
+ Returns generator of file systems including all attributes of FS
+ """
+ paginator = self.connection.get_paginator('describe_file_systems')
+ return paginator.paginate(**kwargs).build_full_result()['FileSystems']
+
+ @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
+ def get_tags(self, file_system_id):
+ """
+ Returns tag list for selected instance of EFS
+ """
+ paginator = self.connection.get_paginator('describe_tags')
+ return boto3_tag_list_to_ansible_dict(paginator.paginate(FileSystemId=file_system_id).build_full_result()['Tags'])
+
+ @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
+ def get_mount_targets(self, file_system_id):
+ """
+ Returns mount targets for selected instance of EFS
+ """
+ paginator = self.connection.get_paginator('describe_mount_targets')
+ return paginator.paginate(FileSystemId=file_system_id).build_full_result()['MountTargets']
+
+ @AWSRetry.jittered_backoff(catch_extra_error_codes=['ThrottlingException'])
+ def get_security_groups(self, mount_target_id):
+ """
+ Returns security groups for selected instance of EFS
+ """
+ return self.connection.describe_mount_target_security_groups(MountTargetId=mount_target_id)['SecurityGroups']
+
+ def get_mount_targets_data(self, file_systems):
+ for item in file_systems:
+ if item['life_cycle_state'] == self.STATE_AVAILABLE:
+ try:
+ mount_targets = self.get_mount_targets(item['file_system_id'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't get EFS targets")
+ for mt in mount_targets:
+ item['mount_targets'].append(camel_dict_to_snake_dict(mt))
+ return file_systems
+
+ def get_security_groups_data(self, file_systems):
+ for item in file_systems:
+ if item['life_cycle_state'] == self.STATE_AVAILABLE:
+ for target in item['mount_targets']:
+ if target['life_cycle_state'] == self.STATE_AVAILABLE:
+ try:
+ target['security_groups'] = self.get_security_groups(target['mount_target_id'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't get EFS security groups")
+ else:
+ target['security_groups'] = []
+ else:
+ item['tags'] = {}
+ item['mount_targets'] = []
+ return file_systems
+
+ def get_file_systems(self, file_system_id=None, creation_token=None):
+ kwargs = dict()
+ if file_system_id:
+ kwargs['FileSystemId'] = file_system_id
+ if creation_token:
+ kwargs['CreationToken'] = creation_token
+ try:
+ file_systems = self.list_file_systems(**kwargs)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't get EFS file systems")
+
+ results = list()
+ for item in file_systems:
+ item['CreationTime'] = str(item['CreationTime'])
+ """
+ In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it
+ AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose
+ And new FilesystemAddress variable is introduced for direct use with other modules (e.g. mount)
+ AWS documentation is available here:
+ U(https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html)
+ """
+ item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
+ item['FilesystemAddress'] = '%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
+
+ if 'Timestamp' in item['SizeInBytes']:
+ item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
+ result = camel_dict_to_snake_dict(item)
+ result['tags'] = {}
+ result['mount_targets'] = []
+ # Set tags *after* doing camel to snake
+ if result['life_cycle_state'] == self.STATE_AVAILABLE:
+ try:
+ result['tags'] = self.get_tags(result['file_system_id'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't get EFS tags")
+ results.append(result)
+ return results
+
+
+def prefix_to_attr(attr_id):
+ """
+ Helper method to convert ID prefix to mount target attribute
+ """
+ attr_by_prefix = {
+ 'fsmt-': 'mount_target_id',
+ 'subnet-': 'subnet_id',
+ 'eni-': 'network_interface_id',
+ 'sg-': 'security_groups'
+ }
+ return first_or_default([attr_name for (prefix, attr_name) in attr_by_prefix.items()
+ if str(attr_id).startswith(prefix)], 'ip_address')
+
+
+def first_or_default(items, default=None):
+ """
+ Helper method to fetch first element of list (if exists)
+ """
+ for item in items:
+ return item
+ return default
+
+
+def has_tags(available, required):
+ """
+ Helper method to determine if tag requested already exists
+ """
+ for key, value in required.items():
+ if key not in available or value != available[key]:
+ return False
+ return True
+
+
+def has_targets(available, required):
+ """
+ Helper method to determine if mount target requested already exists
+ """
+ grouped = group_list_of_dict(available)
+ for (value, field) in required:
+ if field not in grouped or value not in grouped[field]:
+ return False
+ return True
+
+
+def group_list_of_dict(array):
+ """
+ Helper method to group list of dict to dict with all possible values
+ """
+ result = defaultdict(list)
+ for item in array:
+ for key, value in item.items():
+ result[key] += value if isinstance(value, list) else [value]
+ return result
+
+
+def main():
+ """
+ Module action handler
+ """
+ argument_spec = dict(
+ id=dict(),
+ name=dict(aliases=['creation_token']),
+ tags=dict(type="dict", default={}),
+ targets=dict(type="list", default=[], elements='str')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+ is_old_facts = module._name == 'efs_facts'
+ if is_old_facts:
+ module.deprecate("The 'efs_facts' module has been renamed to 'efs_info', "
+ "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws')
+
+ connection = EFSConnection(module)
+
+ name = module.params.get('name')
+ fs_id = module.params.get('id')
+ tags = module.params.get('tags')
+ targets = module.params.get('targets')
+
+ file_systems_info = connection.get_file_systems(fs_id, name)
+
+ if tags:
+ file_systems_info = [item for item in file_systems_info if has_tags(item['tags'], tags)]
+
+ file_systems_info = connection.get_mount_targets_data(file_systems_info)
+ file_systems_info = connection.get_security_groups_data(file_systems_info)
+
+ if targets:
+ targets = [(item, prefix_to_attr(item)) for item in targets]
+ file_systems_info = [item for item in file_systems_info if has_targets(item['mount_targets'], targets)]
+
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts={'efs': file_systems_info})
+ else:
+ module.exit_json(changed=False, efs=file_systems_info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/efs_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/efs_info.py
new file mode 100644
index 00000000..62fd5837
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/efs_info.py
@@ -0,0 +1,398 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: efs_info
+version_added: 1.0.0
+short_description: Get information about Amazon EFS file systems
+description:
+ - This module can be used to search Amazon EFS file systems.
+ - This module was called C(efs_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.aws.efs_info) module no longer returns C(ansible_facts)!
+requirements: [ boto3 ]
+author:
+ - "Ryan Sydnor (@ryansydnor)"
+options:
+ name:
+ description:
+ - Creation Token of Amazon EFS file system.
+ aliases: [ creation_token ]
+ type: str
+ id:
+ description:
+ - ID of Amazon EFS.
+ type: str
+ tags:
+ description:
+ - List of tags of Amazon EFS. Should be defined as dictionary.
+ type: dict
+ targets:
+ description:
+ - List of targets on which to filter the returned results.
+ - Result must match all of the specified targets, each of which can be a security group ID, a subnet ID or an IP address.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+- name: Find all existing efs
+ community.aws.efs_info:
+ register: result
+
+- name: Find efs using id
+ community.aws.efs_info:
+ id: fs-1234abcd
+ register: result
+
+- name: Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a'
+ community.aws.efs_info:
+ tags:
+ Name: myTestNameTag
+ targets:
+ - subnet-1a2b3c4d
+ - sg-4d3c2b1a
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result['efs'] }}"
+'''
+
+RETURN = r'''
+creation_time:
+ description: timestamp of creation date
+ returned: always
+ type: str
+ sample: "2015-11-16 07:30:57-05:00"
+creation_token:
+ description: EFS creation token
+ returned: always
+ type: str
+ sample: console-88609e04-9a0e-4a2e-912c-feaa99509961
+file_system_id:
+ description: ID of the file system
+ returned: always
+ type: str
+ sample: fs-xxxxxxxx
+life_cycle_state:
+ description: state of the EFS file system
+ returned: always
+ type: str
+ sample: creating, available, deleting, deleted
+mount_point:
+ description: url of file system with leading dot from the time AWS EFS required to add network suffix to EFS address
+ returned: always
+ type: str
+ sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
+filesystem_address:
+ description: url of file system
+ returned: always
+ type: str
+ sample: fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
+mount_targets:
+ description: list of mount targets
+ returned: always
+ type: list
+ sample:
+ [
+ {
+ "file_system_id": "fs-a7ad440e",
+ "ip_address": "172.31.17.173",
+ "life_cycle_state": "available",
+ "mount_target_id": "fsmt-d8907871",
+ "network_interface_id": "eni-6e387e26",
+ "owner_id": "740748460359",
+ "security_groups": [
+ "sg-a30b22c6"
+ ],
+ "subnet_id": "subnet-e265c895"
+ },
+ ...
+ ]
+name:
+ description: name of the file system
+ returned: always
+ type: str
+ sample: my-efs
+number_of_mount_targets:
+ description: the number of targets mounted
+ returned: always
+ type: int
+ sample: 3
+owner_id:
+ description: AWS account ID of EFS owner
+ returned: always
+ type: str
+ sample: XXXXXXXXXXXX
+size_in_bytes:
+ description: size of the file system in bytes as of a timestamp
+ returned: always
+ type: dict
+ sample:
+ {
+ "timestamp": "2015-12-21 13:59:59-05:00",
+ "value": 12288
+ }
+performance_mode:
+ description: performance mode of the file system
+ returned: always
+ type: str
+ sample: "generalPurpose"
+throughput_mode:
+ description: mode of throughput for the file system
+ returned: when botocore >= 1.10.57
+ type: str
+ sample: "bursting"
+provisioned_throughput_in_mibps:
+ description: throughput provisioned in Mibps
+ returned: when botocore >= 1.10.57 and throughput_mode is set to "provisioned"
+ type: float
+ sample: 15.0
+tags:
+ description: tags on the efs instance
+ returned: always
+ type: dict
+ sample:
+ {
+ "name": "my-efs",
+ "key": "Value"
+ }
+
+'''
+
+
+from collections import defaultdict
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+class EFSConnection(object):
+ STATE_CREATING = 'creating'
+ STATE_AVAILABLE = 'available'
+ STATE_DELETING = 'deleting'
+ STATE_DELETED = 'deleted'
+
+ def __init__(self, module):
+ try:
+ self.connection = module.client('efs')
+ self.module = module
+ except Exception as e:
+ module.fail_json(msg="Failed to connect to AWS: %s" % to_native(e))
+
+ self.region = module.region
+
+ @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
+ def list_file_systems(self, **kwargs):
+ """
+ Returns generator of file systems including all attributes of FS
+ """
+ paginator = self.connection.get_paginator('describe_file_systems')
+ return paginator.paginate(**kwargs).build_full_result()['FileSystems']
+
+ @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
+ def get_tags(self, file_system_id):
+ """
+ Returns tag list for selected instance of EFS
+ """
+ paginator = self.connection.get_paginator('describe_tags')
+ return boto3_tag_list_to_ansible_dict(paginator.paginate(FileSystemId=file_system_id).build_full_result()['Tags'])
+
+ @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
+ def get_mount_targets(self, file_system_id):
+ """
+ Returns mount targets for selected instance of EFS
+ """
+ paginator = self.connection.get_paginator('describe_mount_targets')
+ return paginator.paginate(FileSystemId=file_system_id).build_full_result()['MountTargets']
+
+ @AWSRetry.jittered_backoff(catch_extra_error_codes=['ThrottlingException'])
+ def get_security_groups(self, mount_target_id):
+ """
+ Returns security groups for selected instance of EFS
+ """
+ return self.connection.describe_mount_target_security_groups(MountTargetId=mount_target_id)['SecurityGroups']
+
+ def get_mount_targets_data(self, file_systems):
+ for item in file_systems:
+ if item['life_cycle_state'] == self.STATE_AVAILABLE:
+ try:
+ mount_targets = self.get_mount_targets(item['file_system_id'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't get EFS targets")
+ for mt in mount_targets:
+ item['mount_targets'].append(camel_dict_to_snake_dict(mt))
+ return file_systems
+
+ def get_security_groups_data(self, file_systems):
+ for item in file_systems:
+ if item['life_cycle_state'] == self.STATE_AVAILABLE:
+ for target in item['mount_targets']:
+ if target['life_cycle_state'] == self.STATE_AVAILABLE:
+ try:
+ target['security_groups'] = self.get_security_groups(target['mount_target_id'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't get EFS security groups")
+ else:
+ target['security_groups'] = []
+ else:
+ item['tags'] = {}
+ item['mount_targets'] = []
+ return file_systems
+
+ def get_file_systems(self, file_system_id=None, creation_token=None):
+ kwargs = dict()
+ if file_system_id:
+ kwargs['FileSystemId'] = file_system_id
+ if creation_token:
+ kwargs['CreationToken'] = creation_token
+ try:
+ file_systems = self.list_file_systems(**kwargs)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't get EFS file systems")
+
+ results = list()
+ for item in file_systems:
+ item['CreationTime'] = str(item['CreationTime'])
+ """
+ In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it
+ AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose
+ And new FilesystemAddress variable is introduced for direct use with other modules (e.g. mount)
+ AWS documentation is available here:
+ U(https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html)
+ """
+ item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
+ item['FilesystemAddress'] = '%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
+
+ if 'Timestamp' in item['SizeInBytes']:
+ item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
+ result = camel_dict_to_snake_dict(item)
+ result['tags'] = {}
+ result['mount_targets'] = []
+ # Set tags *after* doing camel to snake
+ if result['life_cycle_state'] == self.STATE_AVAILABLE:
+ try:
+ result['tags'] = self.get_tags(result['file_system_id'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't get EFS tags")
+ results.append(result)
+ return results
+
+
+def prefix_to_attr(attr_id):
+ """
+ Helper method to convert ID prefix to mount target attribute
+ """
+ attr_by_prefix = {
+ 'fsmt-': 'mount_target_id',
+ 'subnet-': 'subnet_id',
+ 'eni-': 'network_interface_id',
+ 'sg-': 'security_groups'
+ }
+ return first_or_default([attr_name for (prefix, attr_name) in attr_by_prefix.items()
+ if str(attr_id).startswith(prefix)], 'ip_address')
+
+
+def first_or_default(items, default=None):
+ """
+ Helper method to fetch first element of list (if exists)
+ """
+ for item in items:
+ return item
+ return default
+
+
+def has_tags(available, required):
+ """
+ Helper method to determine if tag requested already exists
+ """
+ for key, value in required.items():
+ if key not in available or value != available[key]:
+ return False
+ return True
+
+
+def has_targets(available, required):
+ """
+ Helper method to determine if mount target requested already exists
+ """
+ grouped = group_list_of_dict(available)
+ for (value, field) in required:
+ if field not in grouped or value not in grouped[field]:
+ return False
+ return True
+
+
+def group_list_of_dict(array):
+ """
+ Helper method to group list of dict to dict with all possible values
+ """
+ result = defaultdict(list)
+ for item in array:
+ for key, value in item.items():
+ result[key] += value if isinstance(value, list) else [value]
+ return result
+
+
+def main():
+ """
+ Module action handler
+ """
+ argument_spec = dict(
+ id=dict(),
+ name=dict(aliases=['creation_token']),
+ tags=dict(type="dict", default={}),
+ targets=dict(type="list", default=[], elements='str')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+ is_old_facts = module._name == 'efs_facts'
+ if is_old_facts:
+ module.deprecate("The 'efs_facts' module has been renamed to 'efs_info', "
+ "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws')
+
+ connection = EFSConnection(module)
+
+ name = module.params.get('name')
+ fs_id = module.params.get('id')
+ tags = module.params.get('tags')
+ targets = module.params.get('targets')
+
+ file_systems_info = connection.get_file_systems(fs_id, name)
+
+ if tags:
+ file_systems_info = [item for item in file_systems_info if has_tags(item['tags'], tags)]
+
+ file_systems_info = connection.get_mount_targets_data(file_systems_info)
+ file_systems_info = connection.get_security_groups_data(file_systems_info)
+
+ if targets:
+ targets = [(item, prefix_to_attr(item)) for item in targets]
+ file_systems_info = [item for item in file_systems_info if has_targets(item['mount_targets'], targets)]
+
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts={'efs': file_systems_info})
+ else:
+ module.exit_json(changed=False, efs=file_systems_info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elasticache.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elasticache.py
new file mode 100644
index 00000000..d6a649ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elasticache.py
@@ -0,0 +1,546 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: elasticache
+version_added: 1.0.0
+short_description: Manage cache clusters in Amazon ElastiCache
+description:
+ - Manage cache clusters in Amazon ElastiCache.
+ - Returns information about the specified cache cluster.
+requirements: [ boto3 ]
+author: "Jim Dalton (@jsdalton)"
+options:
+ state:
+ description:
+ - C(absent) or C(present) are idempotent actions that will create or destroy a cache cluster as needed.
+ - C(rebooted) will reboot the cluster, resulting in a momentary outage.
+ choices: ['present', 'absent', 'rebooted']
+ required: true
+ type: str
+ name:
+ description:
+ - The cache cluster identifier.
+ required: true
+ type: str
+ engine:
+ description:
+ - Name of the cache engine to be used.
+ - Supported values are C(redis) and C(memcached).
+ default: memcached
+ type: str
+ cache_engine_version:
+ description:
+ - The version number of the cache engine.
+ type: str
+ node_type:
+ description:
+ - The compute and memory capacity of the nodes in the cache cluster.
+ default: cache.t2.small
+ type: str
+ num_nodes:
+ description:
+ - The initial number of cache nodes that the cache cluster will have.
+ - Required when I(state=present).
+ type: int
+ default: 1
+ cache_port:
+ description:
+ - The port number on which each of the cache nodes will accept
+ connections.
+ type: int
+ cache_parameter_group:
+ description:
+ - The name of the cache parameter group to associate with this cache cluster. If this argument is omitted, the default cache parameter group
+ for the specified engine will be used.
+ aliases: [ 'parameter_group' ]
+ type: str
+ cache_subnet_group:
+ description:
+ - The subnet group name to associate with. Only use if inside a VPC.
+ - Required if inside a VPC.
+ type: str
+ security_group_ids:
+ description:
+ - A list of VPC security group IDs to associate with this cache cluster. Only use if inside a VPC.
+ type: list
+ elements: str
+ cache_security_groups:
+ description:
+ - A list of cache security group names to associate with this cache cluster. Must be an empty list if inside a VPC.
+ type: list
+ elements: str
+ zone:
+ description:
+ - The EC2 Availability Zone in which the cache cluster will be created.
+ type: str
+ wait:
+ description:
+ - Wait for cache cluster result before returning.
+ type: bool
+ default: true
+ hard_modify:
+ description:
+ - Whether to destroy and recreate an existing cache cluster if necessary in order to modify its state.
+ - Defaults to C(false).
+ type: bool
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r"""
+# Note: None of these examples set aws_access_key, aws_secret_key, or region.
+# It is assumed that their matching environment variables are set.
+
+- name: Basic example
+ community.aws.elasticache:
+ name: "test-please-delete"
+ state: present
+ engine: memcached
+ cache_engine_version: 1.4.14
+ node_type: cache.m1.small
+ num_nodes: 1
+ cache_port: 11211
+ cache_security_groups:
+ - default
+ zone: us-east-1d
+
+
+- name: Ensure cache cluster is gone
+ community.aws.elasticache:
+ name: "test-please-delete"
+ state: absent
+
+- name: Reboot cache cluster
+ community.aws.elasticache:
+ name: "test-please-delete"
+ state: rebooted
+
+"""
+from time import sleep
+from traceback import format_exc
+
+try:
+ import boto3
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+class ElastiCacheManager(object):
+
+ """Handles elasticache creation and destruction"""
+
+ EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying']
+
+ def __init__(self, module, name, engine, cache_engine_version, node_type,
+ num_nodes, cache_port, cache_parameter_group, cache_subnet_group,
+ cache_security_groups, security_group_ids, zone, wait,
+ hard_modify, region, **aws_connect_kwargs):
+ self.module = module
+ self.name = name
+ self.engine = engine.lower()
+ self.cache_engine_version = cache_engine_version
+ self.node_type = node_type
+ self.num_nodes = num_nodes
+ self.cache_port = cache_port
+ self.cache_parameter_group = cache_parameter_group
+ self.cache_subnet_group = cache_subnet_group
+ self.cache_security_groups = cache_security_groups
+ self.security_group_ids = security_group_ids
+ self.zone = zone
+ self.wait = wait
+ self.hard_modify = hard_modify
+
+ self.region = region
+ self.aws_connect_kwargs = aws_connect_kwargs
+
+ self.changed = False
+ self.data = None
+ self.status = 'gone'
+ self.conn = self._get_elasticache_connection()
+ self._refresh_data()
+
+ def ensure_present(self):
+ """Ensure cache cluster exists or create it if not"""
+ if self.exists():
+ self.sync()
+ else:
+ self.create()
+
+ def ensure_absent(self):
+ """Ensure cache cluster is gone or delete it if not"""
+ self.delete()
+
+ def ensure_rebooted(self):
+ """Ensure cache cluster is gone or delete it if not"""
+ self.reboot()
+
+ def exists(self):
+ """Check if cache cluster exists"""
+ return self.status in self.EXIST_STATUSES
+
+ def create(self):
+ """Create an ElastiCache cluster"""
+ if self.status == 'available':
+ return
+ if self.status in ['creating', 'rebooting', 'modifying']:
+ if self.wait:
+ self._wait_for_status('available')
+ return
+ if self.status == 'deleting':
+ if self.wait:
+ self._wait_for_status('gone')
+ else:
+ msg = "'%s' is currently deleting. Cannot create."
+ self.module.fail_json(msg=msg % self.name)
+
+ kwargs = dict(CacheClusterId=self.name,
+ NumCacheNodes=self.num_nodes,
+ CacheNodeType=self.node_type,
+ Engine=self.engine,
+ EngineVersion=self.cache_engine_version,
+ CacheSecurityGroupNames=self.cache_security_groups,
+ SecurityGroupIds=self.security_group_ids,
+ CacheParameterGroupName=self.cache_parameter_group,
+ CacheSubnetGroupName=self.cache_subnet_group)
+ if self.cache_port is not None:
+ kwargs['Port'] = self.cache_port
+ if self.zone is not None:
+ kwargs['PreferredAvailabilityZone'] = self.zone
+
+ try:
+ self.conn.create_cache_cluster(**kwargs)
+
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Failed to create cache cluster")
+
+ self._refresh_data()
+
+ self.changed = True
+ if self.wait:
+ self._wait_for_status('available')
+ return True
+
+ def delete(self):
+ """Destroy an ElastiCache cluster"""
+ if self.status == 'gone':
+ return
+ if self.status == 'deleting':
+ if self.wait:
+ self._wait_for_status('gone')
+ return
+ if self.status in ['creating', 'rebooting', 'modifying']:
+ if self.wait:
+ self._wait_for_status('available')
+ else:
+ msg = "'%s' is currently %s. Cannot delete."
+ self.module.fail_json(msg=msg % (self.name, self.status))
+
+ try:
+ response = self.conn.delete_cache_cluster(CacheClusterId=self.name)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Failed to delete cache cluster")
+
+ cache_cluster_data = response['CacheCluster']
+ self._refresh_data(cache_cluster_data)
+
+ self.changed = True
+ if self.wait:
+ self._wait_for_status('gone')
+
+ def sync(self):
+ """Sync settings to cluster if required"""
+ if not self.exists():
+ msg = "'%s' is %s. Cannot sync."
+ self.module.fail_json(msg=msg % (self.name, self.status))
+
+ if self.status in ['creating', 'rebooting', 'modifying']:
+ if self.wait:
+ self._wait_for_status('available')
+ else:
+ # Cluster can only be synced if available. If we can't wait
+ # for this, then just be done.
+ return
+
+ if self._requires_destroy_and_create():
+ if not self.hard_modify:
+ msg = "'%s' requires destructive modification. 'hard_modify' must be set to true to proceed."
+ self.module.fail_json(msg=msg % self.name)
+ if not self.wait:
+ msg = "'%s' requires destructive modification. 'wait' must be set to true."
+ self.module.fail_json(msg=msg % self.name)
+ self.delete()
+ self.create()
+ return
+
+ if self._requires_modification():
+ self.modify()
+
+ def modify(self):
+ """Modify the cache cluster. Note it's only possible to modify a few select options."""
+ nodes_to_remove = self._get_nodes_to_remove()
+ try:
+ self.conn.modify_cache_cluster(CacheClusterId=self.name,
+ NumCacheNodes=self.num_nodes,
+ CacheNodeIdsToRemove=nodes_to_remove,
+ CacheSecurityGroupNames=self.cache_security_groups,
+ CacheParameterGroupName=self.cache_parameter_group,
+ SecurityGroupIds=self.security_group_ids,
+ ApplyImmediately=True,
+ EngineVersion=self.cache_engine_version)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Failed to modify cache cluster")
+
+ self._refresh_data()
+
+ self.changed = True
+ if self.wait:
+ self._wait_for_status('available')
+
+ def reboot(self):
+ """Reboot the cache cluster"""
+ if not self.exists():
+ msg = "'%s' is %s. Cannot reboot."
+ self.module.fail_json(msg=msg % (self.name, self.status))
+ if self.status == 'rebooting':
+ return
+ if self.status in ['creating', 'modifying']:
+ if self.wait:
+ self._wait_for_status('available')
+ else:
+ msg = "'%s' is currently %s. Cannot reboot."
+ self.module.fail_json(msg=msg % (self.name, self.status))
+
+ # Collect ALL nodes for reboot
+ cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']]
+ try:
+ self.conn.reboot_cache_cluster(CacheClusterId=self.name,
+ CacheNodeIdsToReboot=cache_node_ids)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Failed to reboot cache cluster")
+
+ self._refresh_data()
+
+ self.changed = True
+ if self.wait:
+ self._wait_for_status('available')
+
+ def get_info(self):
+ """Return basic info about the cache cluster"""
+ info = {
+ 'name': self.name,
+ 'status': self.status
+ }
+ if self.data:
+ info['data'] = self.data
+ return info
+
+ def _wait_for_status(self, awaited_status):
+ """Wait for status to change from present status to awaited_status"""
+ status_map = {
+ 'creating': 'available',
+ 'rebooting': 'available',
+ 'modifying': 'available',
+ 'deleting': 'gone'
+ }
+ if self.status == awaited_status:
+ # No need to wait, we're already done
+ return
+ if status_map[self.status] != awaited_status:
+ msg = "Invalid awaited status. '%s' cannot transition to '%s'"
+ self.module.fail_json(msg=msg % (self.status, awaited_status))
+
+ if awaited_status not in set(status_map.values()):
+ msg = "'%s' is not a valid awaited status."
+ self.module.fail_json(msg=msg % awaited_status)
+
+ while True:
+ sleep(1)
+ self._refresh_data()
+ if self.status == awaited_status:
+ break
+
+ def _requires_modification(self):
+ """Check if cluster requires (nondestructive) modification"""
+ # Check modifiable data attributes
+ modifiable_data = {
+ 'NumCacheNodes': self.num_nodes,
+ 'EngineVersion': self.cache_engine_version
+ }
+ for key, value in modifiable_data.items():
+ if value is not None and value and self.data[key] != value:
+ return True
+
+ # Check cache security groups
+ cache_security_groups = []
+ for sg in self.data['CacheSecurityGroups']:
+ cache_security_groups.append(sg['CacheSecurityGroupName'])
+ if set(cache_security_groups) != set(self.cache_security_groups):
+ return True
+
+ # check vpc security groups
+ if self.security_group_ids:
+ vpc_security_groups = []
+ security_groups = self.data['SecurityGroups'] or []
+ for sg in security_groups:
+ vpc_security_groups.append(sg['SecurityGroupId'])
+ if set(vpc_security_groups) != set(self.security_group_ids):
+ return True
+
+ return False
+
+ def _requires_destroy_and_create(self):
+ """
+ Check whether a destroy and create is required to synchronize cluster.
+ """
+ unmodifiable_data = {
+ 'node_type': self.data['CacheNodeType'],
+ 'engine': self.data['Engine'],
+ 'cache_port': self._get_port()
+ }
+ # Only check for modifications if zone is specified
+ if self.zone is not None:
+ unmodifiable_data['zone'] = self.data['PreferredAvailabilityZone']
+ for key, value in unmodifiable_data.items():
+ if getattr(self, key) is not None and getattr(self, key) != value:
+ return True
+ return False
+
+ def _get_elasticache_connection(self):
+ """Get an elasticache connection"""
+ try:
+ return self.module.client('elasticache')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ def _get_port(self):
+ """Get the port. Where this information is retrieved from is engine dependent."""
+ if self.data['Engine'] == 'memcached':
+ return self.data['ConfigurationEndpoint']['Port']
+ elif self.data['Engine'] == 'redis':
+ # Redis only supports a single node (presently) so just use
+ # the first and only
+ return self.data['CacheNodes'][0]['Endpoint']['Port']
+
+ def _refresh_data(self, cache_cluster_data=None):
+ """Refresh data about this cache cluster"""
+
+ if cache_cluster_data is None:
+ try:
+ response = self.conn.describe_cache_clusters(CacheClusterId=self.name, ShowCacheNodeInfo=True)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'CacheClusterNotFound':
+ self.data = None
+ self.status = 'gone'
+ return
+ else:
+ self.module.fail_json_aws(e, msg="Failed to describe cache clusters")
+ cache_cluster_data = response['CacheClusters'][0]
+ self.data = cache_cluster_data
+ self.status = self.data['CacheClusterStatus']
+
+ # The documentation for elasticache lies -- status on rebooting is set
+ # to 'rebooting cache cluster nodes' instead of 'rebooting'. Fix it
+ # here to make status checks etc. more sane.
+ if self.status == 'rebooting cache cluster nodes':
+ self.status = 'rebooting'
+
+ def _get_nodes_to_remove(self):
+ """If there are nodes to remove, it figures out which need to be removed"""
+ num_nodes_to_remove = self.data['NumCacheNodes'] - self.num_nodes
+ if num_nodes_to_remove <= 0:
+ return []
+
+ if not self.hard_modify:
+ msg = "'%s' requires removal of cache nodes. 'hard_modify' must be set to true to proceed."
+ self.module.fail_json(msg=msg % self.name)
+
+ cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']]
+ return cache_node_ids[-num_nodes_to_remove:]
+
+
+def main():
+ """ elasticache ansible module """
+ argument_spec = dict(
+ state=dict(required=True, choices=['present', 'absent', 'rebooted']),
+ name=dict(required=True),
+ engine=dict(default='memcached'),
+ cache_engine_version=dict(default=""),
+ node_type=dict(default='cache.t2.small'),
+ num_nodes=dict(default=1, type='int'),
+ # alias for compat with the original PR 1950
+ cache_parameter_group=dict(default="", aliases=['parameter_group']),
+ cache_port=dict(type='int'),
+ cache_subnet_group=dict(default=""),
+ cache_security_groups=dict(default=[], type='list', elements='str'),
+ security_group_ids=dict(default=[], type='list', elements='str'),
+ zone=dict(),
+ wait=dict(default=True, type='bool'),
+ hard_modify=dict(type='bool'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ )
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
+
+ name = module.params['name']
+ state = module.params['state']
+ engine = module.params['engine']
+ cache_engine_version = module.params['cache_engine_version']
+ node_type = module.params['node_type']
+ num_nodes = module.params['num_nodes']
+ cache_port = module.params['cache_port']
+ cache_subnet_group = module.params['cache_subnet_group']
+ cache_security_groups = module.params['cache_security_groups']
+ security_group_ids = module.params['security_group_ids']
+ zone = module.params['zone']
+ wait = module.params['wait']
+ hard_modify = module.params['hard_modify']
+ cache_parameter_group = module.params['cache_parameter_group']
+
+ if cache_subnet_group and cache_security_groups:
+ module.fail_json(msg="Can't specify both cache_subnet_group and cache_security_groups")
+
+ if state == 'present' and not num_nodes:
+ module.fail_json(msg="'num_nodes' is a required parameter. Please specify num_nodes > 0")
+
+ elasticache_manager = ElastiCacheManager(module, name, engine,
+ cache_engine_version, node_type,
+ num_nodes, cache_port,
+ cache_parameter_group,
+ cache_subnet_group,
+ cache_security_groups,
+ security_group_ids, zone, wait,
+ hard_modify, region, **aws_connect_kwargs)
+
+ if state == 'present':
+ elasticache_manager.ensure_present()
+ elif state == 'absent':
+ elasticache_manager.ensure_absent()
+ elif state == 'rebooted':
+ elasticache_manager.ensure_rebooted()
+
+ facts_result = dict(changed=elasticache_manager.changed,
+ elasticache=elasticache_manager.get_info())
+
+ module.exit_json(**facts_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elasticache_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elasticache_facts.py
new file mode 100644
index 00000000..5b22c5ce
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elasticache_facts.py
@@ -0,0 +1,309 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: elasticache_info
+short_description: Retrieve information for AWS ElastiCache clusters
+version_added: 1.0.0
+description:
+ - Retrieve information from AWS ElastiCache clusters
+ - This module was called C(elasticache_facts) before Ansible 2.9. The usage did not change.
+options:
+ name:
+ description:
+ - The name of an ElastiCache cluster.
+ type: str
+
+author:
+ - Will Thames (@willthames)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: obtain all ElastiCache information
+ community.aws.elasticache_info:
+
+- name: obtain all information for a single ElastiCache cluster
+ community.aws.elasticache_info:
+ name: test_elasticache
+'''
+
+RETURN = '''
+elasticache_clusters:
+ description: List of ElastiCache clusters
+ returned: always
+ type: complex
+ contains:
+ auto_minor_version_upgrade:
+ description: Whether to automatically upgrade to minor versions
+ returned: always
+ type: bool
+ sample: true
+ cache_cluster_create_time:
+ description: Date and time cluster was created
+ returned: always
+ type: str
+ sample: '2017-09-15T05:43:46.038000+00:00'
+ cache_cluster_id:
+ description: ID of the cache cluster
+ returned: always
+ type: str
+ sample: abcd-1234-001
+ cache_cluster_status:
+ description: Status of ElastiCache cluster
+ returned: always
+ type: str
+ sample: available
+ cache_node_type:
+ description: Instance type of ElastiCache nodes
+ returned: always
+ type: str
+ sample: cache.t2.micro
+ cache_nodes:
+ description: List of ElastiCache nodes in the cluster
+ returned: always
+ type: complex
+ contains:
+ cache_node_create_time:
+ description: Date and time node was created
+ returned: always
+ type: str
+ sample: '2017-09-15T05:43:46.038000+00:00'
+ cache_node_id:
+ description: ID of the cache node
+ returned: always
+ type: str
+ sample: '0001'
+ cache_node_status:
+ description: Status of the cache node
+ returned: always
+ type: str
+ sample: available
+ customer_availability_zone:
+ description: Availability Zone in which the cache node was created
+ returned: always
+ type: str
+ sample: ap-southeast-2b
+ endpoint:
+ description: Connection details for the cache node
+ returned: always
+ type: complex
+ contains:
+ address:
+ description: URL of the cache node endpoint
+ returned: always
+ type: str
+ sample: abcd-1234-001.bgiz2p.0001.apse2.cache.amazonaws.com
+ port:
+ description: Port of the cache node endpoint
+ returned: always
+ type: int
+ sample: 6379
+ parameter_group_status:
+ description: Status of the Cache Parameter Group
+ returned: always
+ type: str
+ sample: in-sync
+ cache_parameter_group:
+ description: Contents of the Cache Parameter Group
+ returned: always
+ type: complex
+ contains:
+ cache_node_ids_to_reboot:
+ description: Cache nodes which need to be rebooted for parameter changes to be applied
+ returned: always
+ type: list
+ sample: []
+ cache_parameter_group_name:
+ description: Name of the cache parameter group
+ returned: always
+ type: str
+ sample: default.redis3.2
+ parameter_apply_status:
+ description: Status of parameter updates
+ returned: always
+ type: str
+ sample: in-sync
+ cache_security_groups:
+ description: Security Groups used by the cache
+ returned: always
+ type: list
+ sample:
+ - 'sg-abcd1234'
+ cache_subnet_group_name:
+ description: ElastiCache Subnet Group used by the cache
+ returned: always
+ type: str
+ sample: abcd-subnet-group
+ client_download_landing_page:
+ description: URL of client download web page
+ returned: always
+ type: str
+ sample: 'https://console.aws.amazon.com/elasticache/home#client-download:'
+ engine:
+ description: Engine used by ElastiCache
+ returned: always
+ type: str
+ sample: redis
+ engine_version:
+ description: Version of ElastiCache engine
+ returned: always
+ type: str
+ sample: 3.2.4
+ notification_configuration:
+ description: Configuration of notifications
+ returned: if notifications are enabled
+ type: complex
+ contains:
+ topic_arn:
+ description: ARN of notification destination topic
+ returned: if notifications are enabled
+ type: str
+ sample: arn:aws:sns:*:123456789012:my_topic
+ topic_name:
+ description: Name of notification destination topic
+ returned: if notifications are enabled
+ type: str
+ sample: MyTopic
+ num_cache_nodes:
+ description: Number of Cache Nodes
+ returned: always
+ type: int
+ sample: 1
+ pending_modified_values:
+ description: Values that are pending modification
+ returned: always
+ type: complex
+ contains: {}
+ preferred_availability_zone:
+ description: Preferred Availability Zone
+ returned: always
+ type: str
+ sample: ap-southeast-2b
+ preferred_maintenance_window:
+ description: Time slot for preferred maintenance window
+ returned: always
+ type: str
+ sample: sat:12:00-sat:13:00
+ replication_group_id:
+ description: Replication Group Id
+ returned: always
+ type: str
+ sample: replication-001
+ security_groups:
+ description: List of Security Groups associated with ElastiCache
+ returned: always
+ type: complex
+ contains:
+ security_group_id:
+ description: Security Group ID
+ returned: always
+ type: str
+ sample: sg-abcd1234
+ status:
+ description: Status of Security Group
+ returned: always
+ type: str
+ sample: active
+ tags:
+ description: Tags applied to the ElastiCache cluster
+ returned: always
+ type: complex
+ contains: {}
+ sample:
+ Application: web
+ Environment: test
+'''
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+@AWSRetry.exponential_backoff()
+def describe_cache_clusters_with_backoff(client, cluster_id=None):
+ paginator = client.get_paginator('describe_cache_clusters')
+ params = dict(ShowCacheNodeInfo=True)
+ if cluster_id:
+ params['CacheClusterId'] = cluster_id
+ try:
+ response = paginator.paginate(**params).build_full_result()
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'CacheClusterNotFound':
+ return []
+ raise
+ except botocore.exceptions.BotoCoreError:
+ raise
+ return response['CacheClusters']
+
+
+@AWSRetry.exponential_backoff()
+def get_elasticache_tags_with_backoff(client, cluster_id):
+ return client.list_tags_for_resource(ResourceName=cluster_id)['TagList']
+
+
+def get_aws_account_id(module):
+ try:
+ client = module.client('sts')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Can't authorize connection")
+
+ try:
+ return client.get_caller_identity()['Account']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain AWS account id")
+
+
+def get_elasticache_clusters(client, module):
+ region = module.region
+ try:
+ clusters = describe_cache_clusters_with_backoff(client, cluster_id=module.params.get('name'))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain cache cluster info")
+
+ account_id = get_aws_account_id(module)
+ results = []
+ for cluster in clusters:
+
+ cluster = camel_dict_to_snake_dict(cluster)
+ arn = "arn:aws:elasticache:%s:%s:cluster:%s" % (region, account_id, cluster['cache_cluster_id'])
+ try:
+ tags = get_elasticache_tags_with_backoff(client, arn)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get tags for cluster %s")
+
+ cluster['tags'] = boto3_tag_list_to_ansible_dict(tags)
+ results.append(cluster)
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=False),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'elasticache_facts':
+ module.deprecate("The 'elasticache_facts' module has been renamed to 'elasticache_info'", date='2021-12-01', collection_name='community.aws')
+
+ client = module.client('elasticache')
+
+ module.exit_json(elasticache_clusters=get_elasticache_clusters(client, module))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elasticache_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elasticache_info.py
new file mode 100644
index 00000000..5b22c5ce
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elasticache_info.py
@@ -0,0 +1,309 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: elasticache_info
+short_description: Retrieve information for AWS ElastiCache clusters
+version_added: 1.0.0
+description:
+ - Retrieve information from AWS ElastiCache clusters
+ - This module was called C(elasticache_facts) before Ansible 2.9. The usage did not change.
+options:
+ name:
+ description:
+ - The name of an ElastiCache cluster.
+ type: str
+
+author:
+ - Will Thames (@willthames)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: obtain all ElastiCache information
+ community.aws.elasticache_info:
+
+- name: obtain all information for a single ElastiCache cluster
+ community.aws.elasticache_info:
+ name: test_elasticache
+'''
+
+RETURN = '''
+elasticache_clusters:
+ description: List of ElastiCache clusters
+ returned: always
+ type: complex
+ contains:
+ auto_minor_version_upgrade:
+ description: Whether to automatically upgrade to minor versions
+ returned: always
+ type: bool
+ sample: true
+ cache_cluster_create_time:
+ description: Date and time cluster was created
+ returned: always
+ type: str
+ sample: '2017-09-15T05:43:46.038000+00:00'
+ cache_cluster_id:
+ description: ID of the cache cluster
+ returned: always
+ type: str
+ sample: abcd-1234-001
+ cache_cluster_status:
+ description: Status of ElastiCache cluster
+ returned: always
+ type: str
+ sample: available
+ cache_node_type:
+ description: Instance type of ElastiCache nodes
+ returned: always
+ type: str
+ sample: cache.t2.micro
+ cache_nodes:
+ description: List of ElastiCache nodes in the cluster
+ returned: always
+ type: complex
+ contains:
+ cache_node_create_time:
+ description: Date and time node was created
+ returned: always
+ type: str
+ sample: '2017-09-15T05:43:46.038000+00:00'
+ cache_node_id:
+ description: ID of the cache node
+ returned: always
+ type: str
+ sample: '0001'
+ cache_node_status:
+ description: Status of the cache node
+ returned: always
+ type: str
+ sample: available
+ customer_availability_zone:
+ description: Availability Zone in which the cache node was created
+ returned: always
+ type: str
+ sample: ap-southeast-2b
+ endpoint:
+ description: Connection details for the cache node
+ returned: always
+ type: complex
+ contains:
+ address:
+ description: URL of the cache node endpoint
+ returned: always
+ type: str
+ sample: abcd-1234-001.bgiz2p.0001.apse2.cache.amazonaws.com
+ port:
+ description: Port of the cache node endpoint
+ returned: always
+ type: int
+ sample: 6379
+ parameter_group_status:
+ description: Status of the Cache Parameter Group
+ returned: always
+ type: str
+ sample: in-sync
+ cache_parameter_group:
+ description: Contents of the Cache Parameter Group
+ returned: always
+ type: complex
+ contains:
+ cache_node_ids_to_reboot:
+ description: Cache nodes which need to be rebooted for parameter changes to be applied
+ returned: always
+ type: list
+ sample: []
+ cache_parameter_group_name:
+ description: Name of the cache parameter group
+ returned: always
+ type: str
+ sample: default.redis3.2
+ parameter_apply_status:
+ description: Status of parameter updates
+ returned: always
+ type: str
+ sample: in-sync
+ cache_security_groups:
+ description: Security Groups used by the cache
+ returned: always
+ type: list
+ sample:
+ - 'sg-abcd1234'
+ cache_subnet_group_name:
+ description: ElastiCache Subnet Group used by the cache
+ returned: always
+ type: str
+ sample: abcd-subnet-group
+ client_download_landing_page:
+ description: URL of client download web page
+ returned: always
+ type: str
+ sample: 'https://console.aws.amazon.com/elasticache/home#client-download:'
+ engine:
+ description: Engine used by ElastiCache
+ returned: always
+ type: str
+ sample: redis
+ engine_version:
+ description: Version of ElastiCache engine
+ returned: always
+ type: str
+ sample: 3.2.4
+ notification_configuration:
+ description: Configuration of notifications
+ returned: if notifications are enabled
+ type: complex
+ contains:
+ topic_arn:
+ description: ARN of notification destination topic
+ returned: if notifications are enabled
+ type: str
+ sample: arn:aws:sns:*:123456789012:my_topic
+ topic_name:
+ description: Name of notification destination topic
+ returned: if notifications are enabled
+ type: str
+ sample: MyTopic
+ num_cache_nodes:
+ description: Number of Cache Nodes
+ returned: always
+ type: int
+ sample: 1
+ pending_modified_values:
+ description: Values that are pending modification
+ returned: always
+ type: complex
+ contains: {}
+ preferred_availability_zone:
+ description: Preferred Availability Zone
+ returned: always
+ type: str
+ sample: ap-southeast-2b
+ preferred_maintenance_window:
+ description: Time slot for preferred maintenance window
+ returned: always
+ type: str
+ sample: sat:12:00-sat:13:00
+ replication_group_id:
+ description: Replication Group Id
+ returned: always
+ type: str
+ sample: replication-001
+ security_groups:
+ description: List of Security Groups associated with ElastiCache
+ returned: always
+ type: complex
+ contains:
+ security_group_id:
+ description: Security Group ID
+ returned: always
+ type: str
+ sample: sg-abcd1234
+ status:
+ description: Status of Security Group
+ returned: always
+ type: str
+ sample: active
+ tags:
+ description: Tags applied to the ElastiCache cluster
+ returned: always
+ type: complex
+ contains: {}
+ sample:
+ Application: web
+ Environment: test
+'''
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+@AWSRetry.exponential_backoff()
+def describe_cache_clusters_with_backoff(client, cluster_id=None):
+ paginator = client.get_paginator('describe_cache_clusters')
+ params = dict(ShowCacheNodeInfo=True)
+ if cluster_id:
+ params['CacheClusterId'] = cluster_id
+ try:
+ response = paginator.paginate(**params).build_full_result()
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'CacheClusterNotFound':
+ return []
+ raise
+ except botocore.exceptions.BotoCoreError:
+ raise
+ return response['CacheClusters']
+
+
+@AWSRetry.exponential_backoff()
+def get_elasticache_tags_with_backoff(client, cluster_id):
+ return client.list_tags_for_resource(ResourceName=cluster_id)['TagList']
+
+
+def get_aws_account_id(module):
+ try:
+ client = module.client('sts')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Can't authorize connection")
+
+ try:
+ return client.get_caller_identity()['Account']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain AWS account id")
+
+
+def get_elasticache_clusters(client, module):
+ region = module.region
+ try:
+ clusters = describe_cache_clusters_with_backoff(client, cluster_id=module.params.get('name'))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain cache cluster info")
+
+ account_id = get_aws_account_id(module)
+ results = []
+ for cluster in clusters:
+
+ cluster = camel_dict_to_snake_dict(cluster)
+ arn = "arn:aws:elasticache:%s:%s:cluster:%s" % (region, account_id, cluster['cache_cluster_id'])
+ try:
+ tags = get_elasticache_tags_with_backoff(client, arn)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get tags for cluster %s")
+
+ cluster['tags'] = boto3_tag_list_to_ansible_dict(tags)
+ results.append(cluster)
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=False),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'elasticache_facts':
+ module.deprecate("The 'elasticache_facts' module has been renamed to 'elasticache_info'", date='2021-12-01', collection_name='community.aws')
+
+ client = module.client('elasticache')
+
+ module.exit_json(elasticache_clusters=get_elasticache_clusters(client, module))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elasticache_parameter_group.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elasticache_parameter_group.py
new file mode 100644
index 00000000..1e9c5741
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elasticache_parameter_group.py
@@ -0,0 +1,343 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: elasticache_parameter_group
+version_added: 1.0.0
+short_description: Manage cache parameter groups in Amazon ElastiCache.
+description:
+ - Manage cache security groups in Amazon ElastiCache.
+ - Returns information about the specified cache cluster.
+author: "Sloane Hertel (@s-hertel)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements: [ boto3, botocore ]
+options:
+ group_family:
+ description:
+ - The name of the cache parameter group family that the cache parameter group can be used with.
+ Required when creating a cache parameter group.
+ choices: ['memcached1.4', 'memcached1.5', 'redis2.6', 'redis2.8', 'redis3.2', 'redis4.0', 'redis5.0']
+ type: str
+ name:
+ description:
+ - A user-specified name for the cache parameter group.
+ required: yes
+ type: str
+ description:
+ description:
+ - A user-specified description for the cache parameter group.
+ type: str
+ state:
+ description:
+ - Idempotent actions that will create/modify, destroy, or reset a cache parameter group as needed.
+ choices: ['present', 'absent', 'reset']
+ required: true
+ type: str
+ values:
+ description:
+ - A user-specified dictionary of parameters to reset or modify for the cache parameter group.
+ type: dict
+'''
+
+EXAMPLES = """
+# Note: None of these examples set aws_access_key, aws_secret_key, or region.
+# It is assumed that their matching environment variables are set.
+---
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: 'Create a test parameter group'
+ community.aws.elasticache_parameter_group:
+ name: 'test-param-group'
+ group_family: 'redis3.2'
+ description: 'This is a cache parameter group'
+ state: 'present'
+ - name: 'Modify a test parameter group'
+ community.aws.elasticache_parameter_group:
+ name: 'test-param-group'
+ values:
+ activerehashing: yes
+ client-output-buffer-limit-normal-hard-limit: 4
+ state: 'present'
+ - name: 'Reset all modifiable parameters for the test parameter group'
+ community.aws.elasticache_parameter_group:
+ name: 'test-param-group'
+ state: reset
+ - name: 'Delete a test parameter group'
+ community.aws.elasticache_parameter_group:
+ name: 'test-param-group'
+ state: 'absent'
+"""
+
+RETURN = """
+elasticache:
+ description: cache parameter group information and response metadata
+ returned: always
+ type: dict
+ sample:
+ cache_parameter_group:
+ cache_parameter_group_family: redis3.2
+ cache_parameter_group_name: test-please-delete
+ description: "initial description"
+ response_metadata:
+ http_headers:
+ content-length: "562"
+ content-type: text/xml
+ date: "Mon, 06 Feb 2017 22:14:08 GMT"
+ x-amzn-requestid: 947291f9-ecb9-11e6-85bd-3baa4eca2cc1
+ http_status_code: 200
+ request_id: 947291f9-ecb9-11e6-85bd-3baa4eca2cc1
+ retry_attempts: 0
+changed:
+ description: if the cache parameter group has changed
+ returned: always
+ type: bool
+ sample:
+ changed: true
+"""
+
+import traceback
+
+try:
+ import boto3
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six import string_types
+
+# import module snippets
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def create(module, conn, name, group_family, description):
+ """ Create ElastiCache parameter group. """
+ try:
+ response = conn.create_cache_parameter_group(CacheParameterGroupName=name, CacheParameterGroupFamily=group_family, Description=description)
+ changed = True
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to create cache parameter group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ return response, changed
+
+
+def delete(module, conn, name):
+ """ Delete ElastiCache parameter group. """
+ try:
+ conn.delete_cache_parameter_group(CacheParameterGroupName=name)
+ response = {}
+ changed = True
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to delete cache parameter group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ return response, changed
+
+
+def make_current_modifiable_param_dict(module, conn, name):
+ """ Gets the current state of the cache parameter group and creates a dict with the format: {ParameterName: [Allowed_Values, DataType, ParameterValue]}"""
+ current_info = get_info(conn, name)
+ if current_info is False:
+ module.fail_json(msg="Could not connect to the cache parameter group %s." % name)
+
+ parameters = current_info["Parameters"]
+ modifiable_params = {}
+
+ for param in parameters:
+ if param["IsModifiable"]:
+ modifiable_params[param["ParameterName"]] = [param.get("AllowedValues")]
+ modifiable_params[param["ParameterName"]].append(param["DataType"])
+ modifiable_params[param["ParameterName"]].append(param.get("ParameterValue"))
+ return modifiable_params
+
+
+def check_valid_modification(module, values, modifiable_params):
+ """ Check if the parameters and values in values are valid. """
+ changed_with_update = False
+
+ for parameter in values:
+ new_value = values[parameter]
+
+ # check valid modifiable parameters
+ if parameter not in modifiable_params:
+ module.fail_json(msg="%s is not a modifiable parameter. Valid parameters to modify are: %s." % (parameter, modifiable_params.keys()))
+
+ # check allowed datatype for modified parameters
+ str_to_type = {"integer": int, "string": string_types}
+ expected_type = str_to_type[modifiable_params[parameter][1]]
+ if not isinstance(new_value, expected_type):
+ if expected_type == str:
+ if isinstance(new_value, bool):
+ values[parameter] = "yes" if new_value else "no"
+ else:
+ values[parameter] = to_text(new_value)
+ elif expected_type == int:
+ if isinstance(new_value, bool):
+ values[parameter] = 1 if new_value else 0
+ else:
+ module.fail_json(msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." %
+ (new_value, type(new_value), parameter, modifiable_params[parameter][1]))
+ else:
+ module.fail_json(msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." %
+ (new_value, type(new_value), parameter, modifiable_params[parameter][1]))
+
+ # check allowed values for modifiable parameters
+ choices = modifiable_params[parameter][0]
+ if choices:
+ if not (to_text(new_value) in choices or isinstance(new_value, int)):
+ module.fail_json(msg="%s is not an allowed value for the parameter %s. Valid parameters are: %s." %
+ (new_value, parameter, choices))
+
+ # check if a new value is different from current value
+ if to_text(values[parameter]) != modifiable_params[parameter][2]:
+ changed_with_update = True
+
+ return changed_with_update, values
+
+
+def check_changed_parameter_values(values, old_parameters, new_parameters):
+ """ Checking if the new values are different than the old values. """
+ changed_with_update = False
+
+ # if the user specified parameters to reset, only check those for change
+ if values:
+ for parameter in values:
+ if old_parameters[parameter] != new_parameters[parameter]:
+ changed_with_update = True
+ break
+ # otherwise check all to find a change
+ else:
+ for parameter in old_parameters:
+ if old_parameters[parameter] != new_parameters[parameter]:
+ changed_with_update = True
+ break
+
+ return changed_with_update
+
+
+def modify(module, conn, name, values):
+ """ Modify ElastiCache parameter group to reflect the new information if it differs from the current. """
+ # compares current group parameters with the parameters we've specified to to a value to see if this will change the group
+ format_parameters = []
+ for key in values:
+ value = to_text(values[key])
+ format_parameters.append({'ParameterName': key, 'ParameterValue': value})
+ try:
+ response = conn.modify_cache_parameter_group(CacheParameterGroupName=name, ParameterNameValues=format_parameters)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to modify cache parameter group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ return response
+
+
+def reset(module, conn, name, values):
+ """ Reset ElastiCache parameter group if the current information is different from the new information. """
+ # used to compare with the reset parameters' dict to see if there have been changes
+ old_parameters_dict = make_current_modifiable_param_dict(module, conn, name)
+
+ format_parameters = []
+
+ # determine whether to reset all or specific parameters
+ if values:
+ all_parameters = False
+ format_parameters = []
+ for key in values:
+ value = to_text(values[key])
+ format_parameters.append({'ParameterName': key, 'ParameterValue': value})
+ else:
+ all_parameters = True
+
+ try:
+ response = conn.reset_cache_parameter_group(CacheParameterGroupName=name, ParameterNameValues=format_parameters, ResetAllParameters=all_parameters)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to reset cache parameter group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+
+ # determine changed
+ new_parameters_dict = make_current_modifiable_param_dict(module, conn, name)
+ changed = check_changed_parameter_values(values, old_parameters_dict, new_parameters_dict)
+
+ return response, changed
+
+
+def get_info(conn, name):
+ """ Gets info about the ElastiCache parameter group. Returns false if it doesn't exist or we don't have access. """
+ try:
+ data = conn.describe_cache_parameters(CacheParameterGroupName=name)
+ return data
+ except botocore.exceptions.ClientError as e:
+ return False
+
+
+def main():
+ argument_spec = dict(
+ group_family=dict(type='str', choices=['memcached1.4', 'memcached1.5', 'redis2.6', 'redis2.8', 'redis3.2', 'redis4.0', 'redis5.0']),
+ name=dict(required=True, type='str'),
+ description=dict(default='', type='str'),
+ state=dict(required=True, choices=['present', 'absent', 'reset']),
+ values=dict(type='dict'),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ parameter_group_family = module.params.get('group_family')
+ parameter_group_name = module.params.get('name')
+ group_description = module.params.get('description')
+ state = module.params.get('state')
+ values = module.params.get('values')
+
+ try:
+ connection = module.client('elasticache')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ exists = get_info(connection, parameter_group_name)
+
+ # check that the needed requirements are available
+ if state == 'present' and not (exists or parameter_group_family):
+ module.fail_json(msg="Creating a group requires a family group.")
+ elif state == 'reset' and not exists:
+ module.fail_json(msg="No group %s to reset. Please create the group before using the state 'reset'." % parameter_group_name)
+
+ # Taking action
+ changed = False
+ if state == 'present':
+ if exists:
+ # confirm that the group exists without any actions
+ if not values:
+ response = exists
+ changed = False
+ # modify existing group
+ else:
+ modifiable_params = make_current_modifiable_param_dict(module, connection, parameter_group_name)
+ changed, values = check_valid_modification(module, values, modifiable_params)
+ response = modify(module, connection, parameter_group_name, values)
+ # create group
+ else:
+ response, changed = create(module, connection, parameter_group_name, parameter_group_family, group_description)
+ if values:
+ modifiable_params = make_current_modifiable_param_dict(module, connection, parameter_group_name)
+ changed, values = check_valid_modification(module, values, modifiable_params)
+ response = modify(module, connection, parameter_group_name, values)
+ elif state == 'absent':
+ if exists:
+ # delete group
+ response, changed = delete(module, connection, parameter_group_name)
+ else:
+ response = {}
+ changed = False
+ elif state == 'reset':
+ response, changed = reset(module, connection, parameter_group_name, values)
+
+ facts_result = dict(changed=changed, elasticache=camel_dict_to_snake_dict(response))
+
+ module.exit_json(**facts_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elasticache_snapshot.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elasticache_snapshot.py
new file mode 100644
index 00000000..dc92df6b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elasticache_snapshot.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: elasticache_snapshot
+version_added: 1.0.0
+short_description: Manage cache snapshots in Amazon ElastiCache
+description:
+ - Manage cache snapshots in Amazon ElastiCache.
+ - Returns information about the specified snapshot.
+author: "Sloane Hertel (@s-hertel)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements: [ boto3, botocore ]
+options:
+ name:
+ description:
+ - The name of the snapshot we want to create, copy, delete.
+ required: true
+ type: str
+ state:
+ description:
+ - Actions that will create, destroy, or copy a snapshot.
+ required: true
+ choices: ['present', 'absent', 'copy']
+ type: str
+ replication_id:
+ description:
+ - The name of the existing replication group to make the snapshot.
+ type: str
+ cluster_id:
+ description:
+ - The name of an existing cache cluster in the replication group to make the snapshot.
+ type: str
+ target:
+ description:
+ - The name of a snapshot copy.
+ type: str
+ bucket:
+ description:
+ - The s3 bucket to which the snapshot is exported.
+ type: str
+'''
+
+EXAMPLES = """
+# Note: None of these examples set aws_access_key, aws_secret_key, or region.
+# It is assumed that their matching environment variables are set.
+
+- name: 'Create a snapshot'
+ community.aws.elasticache_snapshot:
+ name: 'test-snapshot'
+ state: 'present'
+ cluster_id: '{{ cluster }}'
+ replication_id: '{{ replication }}'
+"""
+
+RETURN = """
+response_metadata:
+ description: response metadata about the snapshot
+ returned: always
+ type: dict
+ sample:
+ http_headers:
+ content-length: 1490
+ content-type: text/xml
+ date: 'Tue, 07 Feb 2017 16:43:04 GMT'
+ x-amzn-requestid: 7f436dea-ed54-11e6-a04c-ab2372a1f14d
+ http_status_code: 200
+ request_id: 7f436dea-ed54-11e6-a04c-ab2372a1f14d
+ retry_attempts: 0
+snapshot:
+ description: snapshot data
+ returned: always
+ type: dict
+ sample:
+ auto_minor_version_upgrade: true
+ cache_cluster_create_time: '2017-02-01T17:43:58.261000+00:00'
+ cache_cluster_id: test-please-delete
+ cache_node_type: cache.m1.small
+ cache_parameter_group_name: default.redis3.2
+ cache_subnet_group_name: default
+ engine: redis
+ engine_version: 3.2.4
+ node_snapshots:
+ cache_node_create_time: '2017-02-01T17:43:58.261000+00:00'
+ cache_node_id: 0001
+ cache_size:
+ num_cache_nodes: 1
+ port: 11211
+ preferred_availability_zone: us-east-1d
+ preferred_maintenance_window: wed:03:00-wed:04:00
+ snapshot_name: deletesnapshot
+ snapshot_retention_limit: 0
+ snapshot_source: manual
+ snapshot_status: creating
+ snapshot_window: 10:00-11:00
+ vpc_id: vpc-c248fda4
+changed:
+ description: if a snapshot has been created, deleted, or copied
+ returned: always
+ type: bool
+ sample:
+ changed: true
+"""
+
+import traceback
+
+try:
+ import boto3
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def create(module, connection, replication_id, cluster_id, name):
+ """ Create an ElastiCache backup. """
+ try:
+ response = connection.create_snapshot(ReplicationGroupId=replication_id,
+ CacheClusterId=cluster_id,
+ SnapshotName=name)
+ changed = True
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == "SnapshotAlreadyExistsFault":
+ response = {}
+ changed = False
+ else:
+ module.fail_json(msg="Unable to create the snapshot.", exception=traceback.format_exc())
+ return response, changed
+
+
+def copy(module, connection, name, target, bucket):
+ """ Copy an ElastiCache backup. """
+ try:
+ response = connection.copy_snapshot(SourceSnapshotName=name,
+ TargetSnapshotName=target,
+ TargetBucket=bucket)
+ changed = True
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to copy the snapshot.", exception=traceback.format_exc())
+ return response, changed
+
+
+def delete(module, connection, name):
+ """ Delete an ElastiCache backup. """
+ try:
+ response = connection.delete_snapshot(SnapshotName=name)
+ changed = True
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == "SnapshotNotFoundFault":
+ response = {}
+ changed = False
+ elif e.response['Error']['Code'] == "InvalidSnapshotState":
+ module.fail_json(msg="Error: InvalidSnapshotState. The snapshot is not in an available state or failed state to allow deletion."
+ "You may need to wait a few minutes.")
+ else:
+ module.fail_json(msg="Unable to delete the snapshot.", exception=traceback.format_exc())
+ return response, changed
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True, type='str'),
+ state=dict(required=True, type='str', choices=['present', 'absent', 'copy']),
+ replication_id=dict(type='str'),
+ cluster_id=dict(type='str'),
+ target=dict(type='str'),
+ bucket=dict(type='str'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+ replication_id = module.params.get('replication_id')
+ cluster_id = module.params.get('cluster_id')
+ target = module.params.get('target')
+ bucket = module.params.get('bucket')
+
+ try:
+ connection = module.client('elasticache')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ changed = False
+ response = {}
+
+ if state == 'present':
+ if not all((replication_id, cluster_id)):
+ module.fail_json(msg="The state 'present' requires options: 'replication_id' and 'cluster_id'")
+ response, changed = create(module, connection, replication_id, cluster_id, name)
+ elif state == 'absent':
+ response, changed = delete(module, connection, name)
+ elif state == 'copy':
+ if not all((target, bucket)):
+ module.fail_json(msg="The state 'copy' requires options: 'target' and 'bucket'.")
+ response, changed = copy(module, connection, name, target, bucket)
+
+ facts_result = dict(changed=changed, **camel_dict_to_snake_dict(response))
+
+ module.exit_json(**facts_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elasticache_subnet_group.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elasticache_subnet_group.py
new file mode 100644
index 00000000..ab25e294
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elasticache_subnet_group.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: elasticache_subnet_group
+version_added: 1.0.0
+short_description: manage ElastiCache subnet groups
+description:
+ - Creates, modifies, and deletes ElastiCache subnet groups. This module has a dependency on python-boto >= 2.5.
+options:
+ state:
+ description:
+ - Specifies whether the subnet should be present or absent.
+ required: true
+ choices: [ 'present' , 'absent' ]
+ type: str
+ name:
+ description:
+ - Database subnet group identifier.
+ required: true
+ type: str
+ description:
+ description:
+ - ElastiCache subnet group description. Only set when a new group is added.
+ type: str
+ subnets:
+ description:
+ - List of subnet IDs that make up the ElastiCache subnet group.
+ type: list
+ elements: str
+author: "Tim Mahoney (@timmahoney)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+- name: Add or change a subnet group
+ community.aws.elasticache_subnet_group:
+ state: present
+ name: norwegian-blue
+ description: My Fancy Ex Parrot Subnet Group
+ subnets:
+ - subnet-aaaaaaaa
+ - subnet-bbbbbbbb
+
+- name: Remove a subnet group
+ community.aws.elasticache_subnet_group:
+ state: absent
+ name: norwegian-blue
+'''
+
+try:
+ import boto
+ from boto.elasticache import connect_to_region
+ from boto.exception import BotoServerError
+except ImportError:
+ pass # Handled by HAS_BOTO
+
+from ansible.module_utils._text import to_native
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+
+
+def main():
+ argument_spec = dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True),
+ description=dict(required=False),
+ subnets=dict(required=False, type='list', elements='str'),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, check_boto3=False)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ state = module.params.get('state')
+ group_name = module.params.get('name').lower()
+ group_description = module.params.get('description')
+ group_subnets = module.params.get('subnets') or {}
+
+ if state == 'present':
+ for required in ['name', 'description', 'subnets']:
+ if not module.params.get(required):
+ module.fail_json(msg=str("Parameter %s required for state='present'" % required))
+ else:
+ for not_allowed in ['description', 'subnets']:
+ if module.params.get(not_allowed):
+ module.fail_json(msg=str("Parameter %s not allowed for state='absent'" % not_allowed))
+
+ # Retrieve any AWS settings from the environment.
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
+
+ if not region:
+ module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
+
+ """Get an elasticache connection"""
+ try:
+ conn = connect_to_region(region_name=region, **aws_connect_kwargs)
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg=to_native(e))
+
+ try:
+ changed = False
+ exists = False
+
+ try:
+ matching_groups = conn.describe_cache_subnet_groups(group_name, max_records=100)
+ exists = len(matching_groups) > 0
+ except BotoServerError as e:
+ if e.error_code != 'CacheSubnetGroupNotFoundFault':
+ module.fail_json(msg=e.error_message)
+
+ if state == 'absent':
+ if exists:
+ conn.delete_cache_subnet_group(group_name)
+ changed = True
+ else:
+ if not exists:
+ new_group = conn.create_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets)
+ changed = True
+ else:
+ changed_group = conn.modify_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets)
+ changed = True
+
+ except BotoServerError as e:
+ if e.error_message != 'No modifications were requested.':
+ module.fail_json(msg=e.error_message)
+ else:
+ changed = False
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_application_lb.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_application_lb.py
new file mode 100644
index 00000000..f18a0081
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_application_lb.py
@@ -0,0 +1,665 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: elb_application_lb
+version_added: 1.0.0
+short_description: Manage an Application Load Balancer
+description:
+ - Manage an AWS Application Elastic Load Balancer. See U(https://aws.amazon.com/blogs/aws/new-aws-application-load-balancer/) for details.
+requirements: [ boto3 ]
+author: "Rob White (@wimnat)"
+options:
+ access_logs_enabled:
+ description:
+ - Whether or not to enable access logs.
+ - When set, I(access_logs_s3_bucket) must also be set.
+ type: bool
+ access_logs_s3_bucket:
+ description:
+ - The name of the S3 bucket for the access logs.
+ - The bucket must exist in the same
+ region as the load balancer and have a bucket policy that grants Elastic Load Balancing permission to write to the bucket.
+ - Required if access logs in Amazon S3 are enabled.
+ - When set, I(access_logs_enabled) must also be set.
+ type: str
+ access_logs_s3_prefix:
+ description:
+ - The prefix for the log location in the S3 bucket.
+ - If you don't specify a prefix, the access logs are stored in the root of the bucket.
+ - Cannot begin or end with a slash.
+ type: str
+ deletion_protection:
+ description:
+ - Indicates whether deletion protection for the ELB is enabled.
+ - Defaults to C(false).
+ type: bool
+ http2:
+ description:
+ - Indicates whether to enable HTTP2 routing.
+ - Defaults to C(false).
+ type: bool
+ idle_timeout:
+ description:
+ - The number of seconds to wait before an idle connection is closed.
+ type: int
+ listeners:
+ description:
+ - A list of dicts containing listeners to attach to the ELB. See examples for detail of the dict required. Note that listener keys
+ are CamelCased.
+ type: list
+ elements: dict
+ suboptions:
+ Port:
+ description: The port on which the load balancer is listening.
+ required: true
+ type: int
+ Protocol:
+ description: The protocol for connections from clients to the load balancer.
+ required: true
+ type: str
+ Certificates:
+ description: The SSL server certificate.
+ type: list
+ elements: dict
+ suboptions:
+ CertificateArn:
+ description: The Amazon Resource Name (ARN) of the certificate.
+ type: str
+ SslPolicy:
+ description: The security policy that defines which ciphers and protocols are supported.
+ type: str
+ DefaultActions:
+ description: The default actions for the listener.
+ required: true
+ type: list
+ elements: dict
+ suboptions:
+ Type:
+ description: The type of action.
+ type: str
+ TargetGroupArn:
+ description: The Amazon Resource Name (ARN) of the target group.
+ type: str
+ Rules:
+ type: list
+ elements: dict
+ description:
+ - A list of ALB Listener Rules.
+ - 'For the complete documentation of possible Conditions and Actions please see the boto3 documentation:'
+ - 'https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_rule'
+ suboptions:
+ Conditions:
+ type: list
+ description: Conditions which must be met for the actions to be applied.
+ Priority:
+ type: int
+ description: The rule priority.
+ Actions:
+ type: list
+ description: Actions to apply if all of the rule's conditions are met.
+ name:
+ description:
+ - The name of the load balancer. This name must be unique within your AWS account, can have a maximum of 32 characters, must contain only alphanumeric
+ characters or hyphens, and must not begin or end with a hyphen.
+ required: true
+ type: str
+ purge_listeners:
+ description:
+ - If C(yes), existing listeners will be purged from the ELB to match exactly what is defined by I(listeners) parameter.
+ - If the I(listeners) parameter is not set then listeners will not be modified.
+ default: yes
+ type: bool
+ purge_tags:
+ description:
+ - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter.
+ - If the I(tags) parameter is not set then tags will not be modified.
+ default: yes
+ type: bool
+ subnets:
+ description:
+ - A list of the IDs of the subnets to attach to the load balancer. You can specify only one subnet per Availability Zone. You must specify subnets from
+ at least two Availability Zones.
+ - Required if I(state=present).
+ type: list
+ elements: str
+ security_groups:
+ description:
+ - A list of the names or IDs of the security groups to assign to the load balancer.
+ - Required if I(state=present).
+ default: []
+ type: list
+ elements: str
+ scheme:
+ description:
+ - Internet-facing or internal load balancer. An ELB scheme can not be modified after creation.
+ default: internet-facing
+ choices: [ 'internet-facing', 'internal' ]
+ type: str
+ state:
+ description:
+ - Create or destroy the load balancer.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ tags:
+ description:
+ - A dictionary of one or more tags to assign to the load balancer.
+ type: dict
+ wait:
+ description:
+ - Wait for the load balancer to have a state of 'active' before completing. A status check is
+ performed every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
+ default: no
+ type: bool
+ wait_timeout:
+ description:
+ - The time in seconds to use in conjunction with I(wait).
+ type: int
+ purge_rules:
+ description:
+ - When set to C(no), keep the existing load balancer rules in place. Will modify and add, but will not delete.
+ default: yes
+ type: bool
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+notes:
+ - Listeners are matched based on port. If a listener's port is changed then a new listener will be created.
+ - Listener rules are matched based on priority. If a rule's priority is changed then a new rule will be created.
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create an ELB and attach a listener
+- community.aws.elb_application_lb:
+ name: myelb
+ security_groups:
+ - sg-12345678
+ - my-sec-group
+ subnets:
+ - subnet-012345678
+ - subnet-abcdef000
+ listeners:
+ - Protocol: HTTP # Required. The protocol for connections from clients to the load balancer (HTTP or HTTPS) (case-sensitive).
+ Port: 80 # Required. The port on which the load balancer is listening.
+ # The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy.
+ SslPolicy: ELBSecurityPolicy-2015-05
+ Certificates: # The ARN of the certificate (only one certficate ARN should be provided)
+ - CertificateArn: arn:aws:iam::12345678987:server-certificate/test.domain.com
+ DefaultActions:
+ - Type: forward # Required.
+ TargetGroupName: # Required. The name of the target group
+ state: present
+
+# Create an ELB and attach a listener with logging enabled
+- community.aws.elb_application_lb:
+ access_logs_enabled: yes
+ access_logs_s3_bucket: mybucket
+ access_logs_s3_prefix: "logs"
+ name: myelb
+ security_groups:
+ - sg-12345678
+ - my-sec-group
+ subnets:
+ - subnet-012345678
+ - subnet-abcdef000
+ listeners:
+ - Protocol: HTTP # Required. The protocol for connections from clients to the load balancer (HTTP or HTTPS) (case-sensitive).
+ Port: 80 # Required. The port on which the load balancer is listening.
+ # The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy.
+ SslPolicy: ELBSecurityPolicy-2015-05
+ Certificates: # The ARN of the certificate (only one certficate ARN should be provided)
+ - CertificateArn: arn:aws:iam::12345678987:server-certificate/test.domain.com
+ DefaultActions:
+ - Type: forward # Required.
+ TargetGroupName: # Required. The name of the target group
+ state: present
+
+# Create an ALB with listeners and rules
+- community.aws.elb_application_lb:
+ name: test-alb
+ subnets:
+ - subnet-12345678
+ - subnet-87654321
+ security_groups:
+ - sg-12345678
+ scheme: internal
+ listeners:
+ - Protocol: HTTPS
+ Port: 443
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: test-target-group
+ Certificates:
+ - CertificateArn: arn:aws:iam::12345678987:server-certificate/test.domain.com
+ SslPolicy: ELBSecurityPolicy-2015-05
+ Rules:
+ - Conditions:
+ - Field: path-pattern
+ Values:
+ - '/test'
+ Priority: '1'
+ Actions:
+ - TargetGroupName: test-target-group
+ Type: forward
+ - Conditions:
+ - Field: path-pattern
+ Values:
+ - "/redirect-path/*"
+ Priority: '2'
+ Actions:
+ - Type: redirect
+ RedirectConfig:
+ Host: "#{host}"
+ Path: "/example/redir" # or /#{path}
+ Port: "#{port}"
+ Protocol: "#{protocol}"
+ Query: "#{query}"
+ StatusCode: "HTTP_302" # or HTTP_301
+ - Conditions:
+ - Field: path-pattern
+ Values:
+ - "/fixed-response-path/"
+ Priority: '3'
+ Actions:
+ - Type: fixed-response
+ FixedResponseConfig:
+ ContentType: "text/plain"
+ MessageBody: "This is the page you're looking for"
+ StatusCode: "200"
+ - Conditions:
+ - Field: host-header
+ Values:
+ - "hostname.domain.com"
+ - "alternate.domain.com"
+ Priority: '4'
+ Actions:
+ - TargetGroupName: test-target-group
+ Type: forward
+ state: present
+
+# Remove an ELB
+- community.aws.elb_application_lb:
+ name: myelb
+ state: absent
+
+'''
+
+RETURN = r'''
+access_logs_s3_bucket:
+ description: The name of the S3 bucket for the access logs.
+ returned: when state is present
+ type: str
+ sample: mys3bucket
+access_logs_s3_enabled:
+ description: Indicates whether access logs stored in Amazon S3 are enabled.
+ returned: when state is present
+ type: str
+ sample: true
+access_logs_s3_prefix:
+ description: The prefix for the location in the S3 bucket.
+ returned: when state is present
+ type: str
+ sample: my/logs
+availability_zones:
+ description: The Availability Zones for the load balancer.
+ returned: when state is present
+ type: list
+ sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a'}]"
+canonical_hosted_zone_id:
+ description: The ID of the Amazon Route 53 hosted zone associated with the load balancer.
+ returned: when state is present
+ type: str
+ sample: ABCDEF12345678
+created_time:
+ description: The date and time the load balancer was created.
+ returned: when state is present
+ type: str
+ sample: "2015-02-12T02:14:02+00:00"
+deletion_protection_enabled:
+ description: Indicates whether deletion protection is enabled.
+ returned: when state is present
+ type: str
+ sample: true
+dns_name:
+ description: The public DNS name of the load balancer.
+ returned: when state is present
+ type: str
+ sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com
+idle_timeout_timeout_seconds:
+ description: The idle timeout value, in seconds.
+ returned: when state is present
+ type: int
+ sample: 60
+ip_address_type:
+ description: The type of IP addresses used by the subnets for the load balancer.
+ returned: when state is present
+ type: str
+ sample: ipv4
+listeners:
+ description: Information about the listeners.
+ returned: when state is present
+ type: complex
+ contains:
+ listener_arn:
+ description: The Amazon Resource Name (ARN) of the listener.
+ returned: when state is present
+ type: str
+ sample: ""
+ load_balancer_arn:
+ description: The Amazon Resource Name (ARN) of the load balancer.
+ returned: when state is present
+ type: str
+ sample: ""
+ port:
+ description: The port on which the load balancer is listening.
+ returned: when state is present
+ type: int
+ sample: 80
+ protocol:
+ description: The protocol for connections from clients to the load balancer.
+ returned: when state is present
+ type: str
+ sample: HTTPS
+ certificates:
+ description: The SSL server certificate.
+ returned: when state is present
+ type: complex
+ contains:
+ certificate_arn:
+ description: The Amazon Resource Name (ARN) of the certificate.
+ returned: when state is present
+ type: str
+ sample: ""
+ ssl_policy:
+ description: The security policy that defines which ciphers and protocols are supported.
+ returned: when state is present
+ type: str
+ sample: ""
+ default_actions:
+ description: The default actions for the listener.
+ returned: when state is present
+ type: str
+ contains:
+ type:
+ description: The type of action.
+ returned: when state is present
+ type: str
+ sample: ""
+ target_group_arn:
+ description: The Amazon Resource Name (ARN) of the target group.
+ returned: when state is present
+ type: str
+ sample: ""
+load_balancer_arn:
+ description: The Amazon Resource Name (ARN) of the load balancer.
+ returned: when state is present
+ type: str
+ sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455
+load_balancer_name:
+ description: The name of the load balancer.
+ returned: when state is present
+ type: str
+ sample: my-elb
+routing_http2_enabled:
+ description: Indicates whether HTTP/2 is enabled.
+ returned: when state is present
+ type: str
+ sample: true
+scheme:
+ description: Internet-facing or internal load balancer.
+ returned: when state is present
+ type: str
+ sample: internal
+security_groups:
+ description: The IDs of the security groups for the load balancer.
+ returned: when state is present
+ type: list
+ sample: ['sg-0011223344']
+state:
+ description: The state of the load balancer.
+ returned: when state is present
+ type: dict
+ sample: "{'code': 'active'}"
+tags:
+ description: The tags attached to the load balancer.
+ returned: when state is present
+ type: dict
+ sample: "{
+ 'Tag': 'Example'
+ }"
+type:
+ description: The type of load balancer.
+ returned: when state is present
+ type: str
+ sample: application
+vpc_id:
+ description: The ID of the VPC for the load balancer.
+ returned: when state is present
+ type: str
+ sample: vpc-0011223344
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags
+
+from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import (
+ ApplicationLoadBalancer,
+ ELBListener,
+ ELBListenerRule,
+ ELBListenerRules,
+ ELBListeners,
+)
+from ansible_collections.amazon.aws.plugins.module_utils.elb_utils import get_elb_listener_rules
+
+
+def create_or_update_elb(elb_obj):
+ """Create ELB or modify main attributes. json_exit here"""
+
+ if elb_obj.elb:
+ # ELB exists so check subnets, security groups and tags match what has been passed
+
+ # Subnets
+ if not elb_obj.compare_subnets():
+ elb_obj.modify_subnets()
+
+ # Security Groups
+ if not elb_obj.compare_security_groups():
+ elb_obj.modify_security_groups()
+
+ # Tags - only need to play with tags if tags parameter has been set to something
+ if elb_obj.tags is not None:
+
+ # Delete necessary tags
+ tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(elb_obj.elb['tags']),
+ boto3_tag_list_to_ansible_dict(elb_obj.tags), elb_obj.purge_tags)
+ if tags_to_delete:
+ elb_obj.delete_tags(tags_to_delete)
+
+ # Add/update tags
+ if tags_need_modify:
+ elb_obj.modify_tags()
+
+ else:
+ # Create load balancer
+ elb_obj.create_elb()
+
+ # ELB attributes
+ elb_obj.update_elb_attributes()
+ elb_obj.modify_elb_attributes()
+
+ # Listeners
+ listeners_obj = ELBListeners(elb_obj.connection, elb_obj.module, elb_obj.elb['LoadBalancerArn'])
+
+ listeners_to_add, listeners_to_modify, listeners_to_delete = listeners_obj.compare_listeners()
+
+ # Delete listeners
+ for listener_to_delete in listeners_to_delete:
+ listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_delete, elb_obj.elb['LoadBalancerArn'])
+ listener_obj.delete()
+ listeners_obj.changed = True
+
+ # Add listeners
+ for listener_to_add in listeners_to_add:
+ listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_add, elb_obj.elb['LoadBalancerArn'])
+ listener_obj.add()
+ listeners_obj.changed = True
+
+ # Modify listeners
+ for listener_to_modify in listeners_to_modify:
+ listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_modify, elb_obj.elb['LoadBalancerArn'])
+ listener_obj.modify()
+ listeners_obj.changed = True
+
+ # If listeners changed, mark ELB as changed
+ if listeners_obj.changed:
+ elb_obj.changed = True
+
+ # Rules of each listener
+ for listener in listeners_obj.listeners:
+ if 'Rules' in listener:
+ rules_obj = ELBListenerRules(elb_obj.connection, elb_obj.module, elb_obj.elb['LoadBalancerArn'], listener['Rules'], listener['Port'])
+
+ rules_to_add, rules_to_modify, rules_to_delete = rules_obj.compare_rules()
+
+ # Delete rules
+ if elb_obj.module.params['purge_rules']:
+ for rule in rules_to_delete:
+ rule_obj = ELBListenerRule(elb_obj.connection, elb_obj.module, {'RuleArn': rule}, rules_obj.listener_arn)
+ rule_obj.delete()
+ elb_obj.changed = True
+
+ # Add rules
+ for rule in rules_to_add:
+ rule_obj = ELBListenerRule(elb_obj.connection, elb_obj.module, rule, rules_obj.listener_arn)
+ rule_obj.create()
+ elb_obj.changed = True
+
+ # Modify rules
+ for rule in rules_to_modify:
+ rule_obj = ELBListenerRule(elb_obj.connection, elb_obj.module, rule, rules_obj.listener_arn)
+ rule_obj.modify()
+ elb_obj.changed = True
+
+ # Get the ELB again
+ elb_obj.update()
+
+ # Get the ELB listeners again
+ listeners_obj.update()
+
+ # Update the ELB attributes
+ elb_obj.update_elb_attributes()
+
+ # Convert to snake_case and merge in everything we want to return to the user
+ snaked_elb = camel_dict_to_snake_dict(elb_obj.elb)
+ snaked_elb.update(camel_dict_to_snake_dict(elb_obj.elb_attributes))
+ snaked_elb['listeners'] = []
+ for listener in listeners_obj.current_listeners:
+ # For each listener, get listener rules
+ listener['rules'] = get_elb_listener_rules(elb_obj.connection, elb_obj.module, listener['ListenerArn'])
+ snaked_elb['listeners'].append(camel_dict_to_snake_dict(listener))
+
+ # Change tags to ansible friendly dict
+ snaked_elb['tags'] = boto3_tag_list_to_ansible_dict(snaked_elb['tags'])
+
+ elb_obj.module.exit_json(changed=elb_obj.changed, **snaked_elb)
+
+
+def delete_elb(elb_obj):
+
+ if elb_obj.elb:
+ elb_obj.delete()
+
+ elb_obj.module.exit_json(changed=elb_obj.changed)
+
+
+def main():
+
+ argument_spec = dict(
+ access_logs_enabled=dict(type='bool'),
+ access_logs_s3_bucket=dict(type='str'),
+ access_logs_s3_prefix=dict(type='str'),
+ deletion_protection=dict(type='bool'),
+ http2=dict(type='bool'),
+ idle_timeout=dict(type='int'),
+ listeners=dict(type='list',
+ elements='dict',
+ options=dict(
+ Protocol=dict(type='str', required=True),
+ Port=dict(type='int', required=True),
+ SslPolicy=dict(type='str'),
+ Certificates=dict(type='list', elements='dict'),
+ DefaultActions=dict(type='list', required=True, elements='dict'),
+ Rules=dict(type='list', elements='dict')
+ )
+ ),
+ name=dict(required=True, type='str'),
+ purge_listeners=dict(default=True, type='bool'),
+ purge_tags=dict(default=True, type='bool'),
+ subnets=dict(type='list', elements='str'),
+ security_groups=dict(type='list', elements='str'),
+ scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']),
+ state=dict(choices=['present', 'absent'], default='present'),
+ tags=dict(type='dict'),
+ wait_timeout=dict(type='int'),
+ wait=dict(default=False, type='bool'),
+ purge_rules=dict(default=True, type='bool')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present', ['subnets', 'security_groups'])
+ ],
+ required_together=[
+ ['access_logs_enabled', 'access_logs_s3_bucket']
+ ]
+ )
+
+ # Quick check of listeners parameters
+ listeners = module.params.get("listeners")
+ if listeners is not None:
+ for listener in listeners:
+ for key in listener.keys():
+ if key == 'Protocol' and listener[key] == 'HTTPS':
+ if listener.get('SslPolicy') is None:
+ module.fail_json(msg="'SslPolicy' is a required listener dict key when Protocol = HTTPS")
+
+ if listener.get('Certificates') is None:
+ module.fail_json(msg="'Certificates' is a required listener dict key when Protocol = HTTPS")
+
+ connection = module.client('elbv2')
+ connection_ec2 = module.client('ec2')
+
+ state = module.params.get("state")
+
+ elb = ApplicationLoadBalancer(connection, connection_ec2, module)
+
+ if state == 'present':
+ create_or_update_elb(elb)
+ else:
+ delete_elb(elb)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_application_lb_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_application_lb_facts.py
new file mode 100644
index 00000000..06e1f3ae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_application_lb_facts.py
@@ -0,0 +1,286 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: elb_application_lb_info
+version_added: 1.0.0
+short_description: Gather information about application ELBs in AWS
+description:
+ - Gather information about application ELBs in AWS
+ - This module was called C(elb_application_lb_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+author: Rob White (@wimnat)
+options:
+ load_balancer_arns:
+ description:
+ - The Amazon Resource Names (ARN) of the load balancers. You can specify up to 20 load balancers in a single call.
+ required: false
+ type: list
+ elements: str
+ names:
+ description:
+ - The names of the load balancers.
+ required: false
+ type: list
+ elements: str
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all target groups
+ community.aws.elb_application_lb_info:
+
+- name: Gather information about the target group attached to a particular ELB
+ community.aws.elb_application_lb_info:
+ load_balancer_arns:
+ - "arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-elb/aabbccddeeff"
+
+- name: Gather information about a target groups named 'tg1' and 'tg2'
+ community.aws.elb_application_lb_info:
+ names:
+ - elb1
+ - elb2
+
+- name: Gather information about specific ALB
+ community.aws.elb_application_lb_info:
+ names: "alb-name"
+ region: "aws-region"
+ register: alb_info
+- ansible.builtin.debug:
+ var: alb_info
+'''
+
+RETURN = r'''
+load_balancers:
+ description: a list of load balancers
+ returned: always
+ type: complex
+ contains:
+ access_logs_s3_bucket:
+ description: The name of the S3 bucket for the access logs.
+ returned: when status is present
+ type: str
+ sample: mys3bucket
+ access_logs_s3_enabled:
+ description: Indicates whether access logs stored in Amazon S3 are enabled.
+ returned: when status is present
+ type: str
+ sample: true
+ access_logs_s3_prefix:
+ description: The prefix for the location in the S3 bucket.
+ returned: when status is present
+ type: str
+ sample: /my/logs
+ availability_zones:
+ description: The Availability Zones for the load balancer.
+ returned: when status is present
+ type: list
+ sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a'}]"
+ canonical_hosted_zone_id:
+ description: The ID of the Amazon Route 53 hosted zone associated with the load balancer.
+ returned: when status is present
+ type: str
+ sample: ABCDEF12345678
+ created_time:
+ description: The date and time the load balancer was created.
+ returned: when status is present
+ type: str
+ sample: "2015-02-12T02:14:02+00:00"
+ deletion_protection_enabled:
+ description: Indicates whether deletion protection is enabled.
+ returned: when status is present
+ type: str
+ sample: true
+ dns_name:
+ description: The public DNS name of the load balancer.
+ returned: when status is present
+ type: str
+ sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com
+ idle_timeout_timeout_seconds:
+ description: The idle timeout value, in seconds.
+ returned: when status is present
+ type: str
+ sample: 60
+ ip_address_type:
+ description: The type of IP addresses used by the subnets for the load balancer.
+ returned: when status is present
+ type: str
+ sample: ipv4
+ load_balancer_arn:
+ description: The Amazon Resource Name (ARN) of the load balancer.
+ returned: when status is present
+ type: str
+ sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455
+ load_balancer_name:
+ description: The name of the load balancer.
+ returned: when status is present
+ type: str
+ sample: my-elb
+ scheme:
+ description: Internet-facing or internal load balancer.
+ returned: when status is present
+ type: str
+ sample: internal
+ security_groups:
+ description: The IDs of the security groups for the load balancer.
+ returned: when status is present
+ type: list
+ sample: ['sg-0011223344']
+ state:
+ description: The state of the load balancer.
+ returned: when status is present
+ type: dict
+ sample: "{'code': 'active'}"
+ tags:
+ description: The tags attached to the load balancer.
+ returned: when status is present
+ type: dict
+ sample: "{
+ 'Tag': 'Example'
+ }"
+ type:
+ description: The type of load balancer.
+ returned: when status is present
+ type: str
+ sample: application
+ vpc_id:
+ description: The ID of the VPC for the load balancer.
+ returned: when status is present
+ type: str
+ sample: vpc-0011223344
+'''
+
+import traceback
+
+try:
+ import boto3
+ import botocore
+ from botocore.exceptions import ClientError, NoCredentialsError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def get_elb_listeners(connection, module, elb_arn):
+
+ try:
+ return connection.describe_listeners(LoadBalancerArn=elb_arn)['Listeners']
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to describe elb listeners")
+
+
+def get_listener_rules(connection, module, listener_arn):
+
+ try:
+ return connection.describe_rules(ListenerArn=listener_arn)['Rules']
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to describe listener rules")
+
+
+def get_load_balancer_attributes(connection, module, load_balancer_arn):
+
+ try:
+ load_balancer_attributes = boto3_tag_list_to_ansible_dict(connection.describe_load_balancer_attributes(LoadBalancerArn=load_balancer_arn)['Attributes'])
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to describe load balancer attributes")
+
+ # Replace '.' with '_' in attribute key names to make it more Ansibley
+ for k, v in list(load_balancer_attributes.items()):
+ load_balancer_attributes[k.replace('.', '_')] = v
+ del load_balancer_attributes[k]
+
+ return load_balancer_attributes
+
+
+def get_load_balancer_tags(connection, module, load_balancer_arn):
+
+ try:
+ return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[load_balancer_arn])['TagDescriptions'][0]['Tags'])
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to describe load balancer tags")
+
+
+def list_load_balancers(connection, module):
+
+ load_balancer_arns = module.params.get("load_balancer_arns")
+ names = module.params.get("names")
+
+ try:
+ load_balancer_paginator = connection.get_paginator('describe_load_balancers')
+ if not load_balancer_arns and not names:
+ load_balancers = load_balancer_paginator.paginate().build_full_result()
+ if load_balancer_arns:
+ load_balancers = load_balancer_paginator.paginate(LoadBalancerArns=load_balancer_arns).build_full_result()
+ if names:
+ load_balancers = load_balancer_paginator.paginate(Names=names).build_full_result()
+ except is_boto3_error_code('LoadBalancerNotFound'):
+ module.exit_json(load_balancers=[])
+ except ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to list load balancers")
+ except NoCredentialsError as e:
+ module.fail_json(msg="AWS authentication problem. " + to_native(e), exception=traceback.format_exc())
+
+ for load_balancer in load_balancers['LoadBalancers']:
+ # Get the attributes for each elb
+ load_balancer.update(get_load_balancer_attributes(connection, module, load_balancer['LoadBalancerArn']))
+
+ # Get the listeners for each elb
+ load_balancer['listeners'] = get_elb_listeners(connection, module, load_balancer['LoadBalancerArn'])
+
+ # For each listener, get listener rules
+ for listener in load_balancer['listeners']:
+ listener['rules'] = get_listener_rules(connection, module, listener['ListenerArn'])
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_load_balancers = [camel_dict_to_snake_dict(load_balancer) for load_balancer in load_balancers['LoadBalancers']]
+
+ # Get tags for each load balancer
+ for snaked_load_balancer in snaked_load_balancers:
+ snaked_load_balancer['tags'] = get_load_balancer_tags(connection, module, snaked_load_balancer['load_balancer_arn'])
+
+ module.exit_json(load_balancers=snaked_load_balancers)
+
+
+def main():
+
+ argument_spec = dict(
+ load_balancer_arns=dict(type='list', elements='str'),
+ names=dict(type='list', elements='str')
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[['load_balancer_arns', 'names']],
+ supports_check_mode=True,
+ )
+ if module._name == 'elb_application_lb_facts':
+ module.deprecate("The 'elb_application_lb_facts' module has been renamed to 'elb_application_lb_info'",
+ date='2021-12-01', collection_name='community.aws')
+
+ try:
+ connection = module.client('elbv2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ list_load_balancers(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_application_lb_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_application_lb_info.py
new file mode 100644
index 00000000..06e1f3ae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_application_lb_info.py
@@ -0,0 +1,286 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: elb_application_lb_info
+version_added: 1.0.0
+short_description: Gather information about application ELBs in AWS
+description:
+ - Gather information about application ELBs in AWS
+ - This module was called C(elb_application_lb_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+author: Rob White (@wimnat)
+options:
+ load_balancer_arns:
+ description:
+ - The Amazon Resource Names (ARN) of the load balancers. You can specify up to 20 load balancers in a single call.
+ required: false
+ type: list
+ elements: str
+ names:
+ description:
+ - The names of the load balancers.
+ required: false
+ type: list
+ elements: str
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all target groups
+ community.aws.elb_application_lb_info:
+
+- name: Gather information about the target group attached to a particular ELB
+ community.aws.elb_application_lb_info:
+ load_balancer_arns:
+ - "arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-elb/aabbccddeeff"
+
+- name: Gather information about a target groups named 'tg1' and 'tg2'
+ community.aws.elb_application_lb_info:
+ names:
+ - elb1
+ - elb2
+
+- name: Gather information about specific ALB
+ community.aws.elb_application_lb_info:
+ names: "alb-name"
+ region: "aws-region"
+ register: alb_info
+- ansible.builtin.debug:
+ var: alb_info
+'''
+
+RETURN = r'''
+load_balancers:
+ description: a list of load balancers
+ returned: always
+ type: complex
+ contains:
+ access_logs_s3_bucket:
+ description: The name of the S3 bucket for the access logs.
+ returned: when status is present
+ type: str
+ sample: mys3bucket
+ access_logs_s3_enabled:
+ description: Indicates whether access logs stored in Amazon S3 are enabled.
+ returned: when status is present
+ type: str
+ sample: true
+ access_logs_s3_prefix:
+ description: The prefix for the location in the S3 bucket.
+ returned: when status is present
+ type: str
+ sample: /my/logs
+ availability_zones:
+ description: The Availability Zones for the load balancer.
+ returned: when status is present
+ type: list
+ sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a'}]"
+ canonical_hosted_zone_id:
+ description: The ID of the Amazon Route 53 hosted zone associated with the load balancer.
+ returned: when status is present
+ type: str
+ sample: ABCDEF12345678
+ created_time:
+ description: The date and time the load balancer was created.
+ returned: when status is present
+ type: str
+ sample: "2015-02-12T02:14:02+00:00"
+ deletion_protection_enabled:
+ description: Indicates whether deletion protection is enabled.
+ returned: when status is present
+ type: str
+ sample: true
+ dns_name:
+ description: The public DNS name of the load balancer.
+ returned: when status is present
+ type: str
+ sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com
+ idle_timeout_timeout_seconds:
+ description: The idle timeout value, in seconds.
+ returned: when status is present
+ type: str
+ sample: 60
+ ip_address_type:
+ description: The type of IP addresses used by the subnets for the load balancer.
+ returned: when status is present
+ type: str
+ sample: ipv4
+ load_balancer_arn:
+ description: The Amazon Resource Name (ARN) of the load balancer.
+ returned: when status is present
+ type: str
+ sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455
+ load_balancer_name:
+ description: The name of the load balancer.
+ returned: when status is present
+ type: str
+ sample: my-elb
+ scheme:
+ description: Internet-facing or internal load balancer.
+ returned: when status is present
+ type: str
+ sample: internal
+ security_groups:
+ description: The IDs of the security groups for the load balancer.
+ returned: when status is present
+ type: list
+ sample: ['sg-0011223344']
+ state:
+ description: The state of the load balancer.
+ returned: when status is present
+ type: dict
+ sample: "{'code': 'active'}"
+ tags:
+ description: The tags attached to the load balancer.
+ returned: when status is present
+ type: dict
+ sample: "{
+ 'Tag': 'Example'
+ }"
+ type:
+ description: The type of load balancer.
+ returned: when status is present
+ type: str
+ sample: application
+ vpc_id:
+ description: The ID of the VPC for the load balancer.
+ returned: when status is present
+ type: str
+ sample: vpc-0011223344
+'''
+
+import traceback
+
+try:
+ import boto3
+ import botocore
+ from botocore.exceptions import ClientError, NoCredentialsError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def get_elb_listeners(connection, module, elb_arn):
+
+ try:
+ return connection.describe_listeners(LoadBalancerArn=elb_arn)['Listeners']
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to describe elb listeners")
+
+
+def get_listener_rules(connection, module, listener_arn):
+
+ try:
+ return connection.describe_rules(ListenerArn=listener_arn)['Rules']
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to describe listener rules")
+
+
+def get_load_balancer_attributes(connection, module, load_balancer_arn):
+
+ try:
+ load_balancer_attributes = boto3_tag_list_to_ansible_dict(connection.describe_load_balancer_attributes(LoadBalancerArn=load_balancer_arn)['Attributes'])
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to describe load balancer attributes")
+
+ # Replace '.' with '_' in attribute key names to make it more Ansibley
+ for k, v in list(load_balancer_attributes.items()):
+ load_balancer_attributes[k.replace('.', '_')] = v
+ del load_balancer_attributes[k]
+
+ return load_balancer_attributes
+
+
+def get_load_balancer_tags(connection, module, load_balancer_arn):
+
+ try:
+ return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[load_balancer_arn])['TagDescriptions'][0]['Tags'])
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to describe load balancer tags")
+
+
+def list_load_balancers(connection, module):
+
+ load_balancer_arns = module.params.get("load_balancer_arns")
+ names = module.params.get("names")
+
+ try:
+ load_balancer_paginator = connection.get_paginator('describe_load_balancers')
+ if not load_balancer_arns and not names:
+ load_balancers = load_balancer_paginator.paginate().build_full_result()
+ if load_balancer_arns:
+ load_balancers = load_balancer_paginator.paginate(LoadBalancerArns=load_balancer_arns).build_full_result()
+ if names:
+ load_balancers = load_balancer_paginator.paginate(Names=names).build_full_result()
+ except is_boto3_error_code('LoadBalancerNotFound'):
+ module.exit_json(load_balancers=[])
+ except ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to list load balancers")
+ except NoCredentialsError as e:
+ module.fail_json(msg="AWS authentication problem. " + to_native(e), exception=traceback.format_exc())
+
+ for load_balancer in load_balancers['LoadBalancers']:
+ # Get the attributes for each elb
+ load_balancer.update(get_load_balancer_attributes(connection, module, load_balancer['LoadBalancerArn']))
+
+ # Get the listeners for each elb
+ load_balancer['listeners'] = get_elb_listeners(connection, module, load_balancer['LoadBalancerArn'])
+
+ # For each listener, get listener rules
+ for listener in load_balancer['listeners']:
+ listener['rules'] = get_listener_rules(connection, module, listener['ListenerArn'])
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_load_balancers = [camel_dict_to_snake_dict(load_balancer) for load_balancer in load_balancers['LoadBalancers']]
+
+ # Get tags for each load balancer
+ for snaked_load_balancer in snaked_load_balancers:
+ snaked_load_balancer['tags'] = get_load_balancer_tags(connection, module, snaked_load_balancer['load_balancer_arn'])
+
+ module.exit_json(load_balancers=snaked_load_balancers)
+
+
+def main():
+
+ argument_spec = dict(
+ load_balancer_arns=dict(type='list', elements='str'),
+ names=dict(type='list', elements='str')
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[['load_balancer_arns', 'names']],
+ supports_check_mode=True,
+ )
+ if module._name == 'elb_application_lb_facts':
+ module.deprecate("The 'elb_application_lb_facts' module has been renamed to 'elb_application_lb_info'",
+ date='2021-12-01', collection_name='community.aws')
+
+ try:
+ connection = module.client('elbv2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ list_load_balancers(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_classic_lb.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_classic_lb.py
new file mode 100644
index 00000000..cd7d4587
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_classic_lb.py
@@ -0,0 +1,1354 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: elb_classic_lb
+version_added: 1.0.0
+description:
+ - Returns information about the load balancer.
+ - Will be marked changed when called only if state is changed.
+short_description: Creates or destroys Amazon ELB.
+author:
+ - "Jim Dalton (@jsdalton)"
+options:
+ state:
+ description:
+ - Create or destroy the ELB.
+ choices: ["present", "absent"]
+ required: true
+ type: str
+ name:
+ description:
+ - The name of the ELB.
+ required: true
+ type: str
+ listeners:
+ description:
+ - List of ports/protocols for this ELB to listen on (see example).
+ type: list
+ elements: dict
+ purge_listeners:
+ description:
+ - Purge existing listeners on ELB that are not found in listeners.
+ type: bool
+ default: true
+ instance_ids:
+ description:
+ - List of instance ids to attach to this ELB.
+ type: list
+ elements: str
+ purge_instance_ids:
+ description:
+ - Purge existing instance ids on ELB that are not found in I(instance_ids).
+ type: bool
+ default: false
+ zones:
+ description:
+ - List of availability zones to enable on this ELB.
+ type: list
+ elements: str
+ purge_zones:
+ description:
+ - Purge existing availability zones on ELB that are not found in zones.
+ type: bool
+ default: false
+ security_group_ids:
+ description:
+ - A list of security groups to apply to the ELB.
+ type: list
+ elements: str
+ security_group_names:
+ description:
+ - A list of security group names to apply to the ELB.
+ type: list
+ elements: str
+ health_check:
+ description:
+ - An associative array of health check configuration settings (see example).
+ type: dict
+ access_logs:
+ description:
+ - An associative array of access logs configuration settings (see example).
+ type: dict
+ subnets:
+ description:
+ - A list of VPC subnets to use when creating ELB. Zones should be empty if using this.
+ type: list
+ elements: str
+ purge_subnets:
+ description:
+ - Purge existing subnets on ELB that are not found in subnets.
+ type: bool
+ default: false
+ scheme:
+ description:
+ - The scheme to use when creating the ELB.
+ - For a private VPC-visible ELB use C(internal).
+ - If you choose to update your scheme with a different value the ELB will be destroyed and
+ recreated. To update scheme you must set I(wait=true).
+ choices: ["internal", "internet-facing"]
+ default: 'internet-facing'
+ type: str
+ validate_certs:
+ description:
+ - When set to C(false), SSL certificates will not be validated for boto versions >= 2.6.0.
+ type: bool
+ default: true
+ connection_draining_timeout:
+ description:
+ - Wait a specified timeout allowing connections to drain before terminating an instance.
+ type: int
+ idle_timeout:
+ description:
+ - ELB connections from clients and to servers are timed out after this amount of time.
+ type: int
+ cross_az_load_balancing:
+ description:
+ - Distribute load across all configured Availability Zones.
+ - Defaults to C(false).
+ type: bool
+ stickiness:
+ description:
+ - An associative array of stickiness policy settings. Policy will be applied to all listeners (see example).
+ type: dict
+ wait:
+ description:
+ - When specified, Ansible will check the status of the load balancer to ensure it has been successfully
+ removed from AWS.
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - Used in conjunction with wait. Number of seconds to wait for the ELB to be terminated.
+ A maximum of C(600) seconds (10 minutes) is allowed.
+ default: 60
+ type: int
+ tags:
+ description:
+ - An associative array of tags. To delete all tags, supply an empty dict.
+ type: dict
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r"""
+# Note: None of these examples set aws_access_key, aws_secret_key, or region.
+# It is assumed that their matching environment variables are set.
+
+# Basic provisioning example (non-VPC)
+
+- community.aws.elb_classic_lb:
+ name: "test-please-delete"
+ state: present
+ zones:
+ - us-east-1a
+ - us-east-1d
+ listeners:
+ - protocol: http # options are http, https, ssl, tcp
+ load_balancer_port: 80
+ instance_port: 80
+ proxy_protocol: True
+ - protocol: https
+ load_balancer_port: 443
+ instance_protocol: http # optional, defaults to value of protocol setting
+ instance_port: 80
+ # ssl certificate required for https or ssl
+ ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert"
+ delegate_to: localhost
+
+# Internal ELB example
+
+- community.aws.elb_classic_lb:
+ name: "test-vpc"
+ scheme: internal
+ state: present
+ instance_ids:
+ - i-abcd1234
+ purge_instance_ids: true
+ subnets:
+ - subnet-abcd1234
+ - subnet-1a2b3c4d
+ listeners:
+ - protocol: http # options are http, https, ssl, tcp
+ load_balancer_port: 80
+ instance_port: 80
+ delegate_to: localhost
+
+# Configure a health check and the access logs
+- community.aws.elb_classic_lb:
+ name: "test-please-delete"
+ state: present
+ zones:
+ - us-east-1d
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ health_check:
+ ping_protocol: http # options are http, https, ssl, tcp
+ ping_port: 80
+ ping_path: "/index.html" # not required for tcp or ssl
+ response_timeout: 5 # seconds
+ interval: 30 # seconds
+ unhealthy_threshold: 2
+ healthy_threshold: 10
+ access_logs:
+ interval: 5 # minutes (defaults to 60)
+ s3_location: "my-bucket" # This value is required if access_logs is set
+ s3_prefix: "logs"
+ delegate_to: localhost
+
+# Ensure ELB is gone
+- community.aws.elb_classic_lb:
+ name: "test-please-delete"
+ state: absent
+ delegate_to: localhost
+
+# Ensure ELB is gone and wait for check (for default timeout)
+- community.aws.elb_classic_lb:
+ name: "test-please-delete"
+ state: absent
+ wait: yes
+ delegate_to: localhost
+
+# Ensure ELB is gone and wait for check with timeout value
+- community.aws.elb_classic_lb:
+ name: "test-please-delete"
+ state: absent
+ wait: yes
+ wait_timeout: 600
+ delegate_to: localhost
+
+# Normally, this module will purge any listeners that exist on the ELB
+# but aren't specified in the listeners parameter. If purge_listeners is
+# false it leaves them alone
+- community.aws.elb_classic_lb:
+ name: "test-please-delete"
+ state: present
+ zones:
+ - us-east-1a
+ - us-east-1d
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ purge_listeners: no
+ delegate_to: localhost
+
+# Normally, this module will leave availability zones that are enabled
+# on the ELB alone. If purge_zones is true, then any extraneous zones
+# will be removed
+- community.aws.elb_classic_lb:
+ name: "test-please-delete"
+ state: present
+ zones:
+ - us-east-1a
+ - us-east-1d
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ purge_zones: yes
+ delegate_to: localhost
+
+# Creates a ELB and assigns a list of subnets to it.
+- community.aws.elb_classic_lb:
+ state: present
+ name: 'New ELB'
+ security_group_ids: 'sg-123456, sg-67890'
+ region: us-west-2
+ subnets: 'subnet-123456,subnet-67890'
+ purge_subnets: yes
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ delegate_to: localhost
+
+# Create an ELB with connection draining, increased idle timeout and cross availability
+# zone load balancing
+- community.aws.elb_classic_lb:
+ name: "New ELB"
+ state: present
+ connection_draining_timeout: 60
+ idle_timeout: 300
+ cross_az_load_balancing: "yes"
+ region: us-east-1
+ zones:
+ - us-east-1a
+ - us-east-1d
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ delegate_to: localhost
+
+# Create an ELB with load balancer stickiness enabled
+- community.aws.elb_classic_lb:
+ name: "New ELB"
+ state: present
+ region: us-east-1
+ zones:
+ - us-east-1a
+ - us-east-1d
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ stickiness:
+ type: loadbalancer
+ enabled: yes
+ expiration: 300
+ delegate_to: localhost
+
+# Create an ELB with application stickiness enabled
+- community.aws.elb_classic_lb:
+ name: "New ELB"
+ state: present
+ region: us-east-1
+ zones:
+ - us-east-1a
+ - us-east-1d
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ stickiness:
+ type: application
+ enabled: yes
+ cookie: SESSIONID
+ delegate_to: localhost
+
+# Create an ELB and add tags
+- community.aws.elb_classic_lb:
+ name: "New ELB"
+ state: present
+ region: us-east-1
+ zones:
+ - us-east-1a
+ - us-east-1d
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ tags:
+ Name: "New ELB"
+ stack: "production"
+ client: "Bob"
+ delegate_to: localhost
+
+# Delete all tags from an ELB
+- community.aws.elb_classic_lb:
+ name: "New ELB"
+ state: present
+ region: us-east-1
+ zones:
+ - us-east-1a
+ - us-east-1d
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ tags: {}
+ delegate_to: localhost
+"""
+
+import random
+import time
+import traceback
+
+try:
+ import boto
+ import boto.ec2.elb
+ import boto.ec2.elb.attributes
+ import boto.vpc
+ from boto.ec2.elb.healthcheck import HealthCheck
+ from boto.ec2.tag import Tag
+except ImportError:
+ pass # Handled by HAS_BOTO
+
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_native
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO
+
+
+def _throttleable_operation(max_retries):
+ def _operation_wrapper(op):
+ def _do_op(*args, **kwargs):
+ retry = 0
+ while True:
+ try:
+ return op(*args, **kwargs)
+ except boto.exception.BotoServerError as e:
+ if retry < max_retries and e.code in \
+ ("Throttling", "RequestLimitExceeded"):
+ retry = retry + 1
+ time.sleep(min(random.random() * (2 ** retry), 300))
+ continue
+ else:
+ raise
+ return _do_op
+ return _operation_wrapper
+
+
+def _get_vpc_connection(module, region, aws_connect_params):
+ try:
+ return connect_to_aws(boto.vpc, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ module.fail_json(msg=str(e))
+
+
+_THROTTLING_RETRIES = 5
+
+
+class ElbManager(object):
+ """Handles ELB creation and destruction"""
+
+ def __init__(self, module, name, listeners=None, purge_listeners=None,
+ zones=None, purge_zones=None, security_group_ids=None,
+ health_check=None, subnets=None, purge_subnets=None,
+ scheme="internet-facing", connection_draining_timeout=None,
+ idle_timeout=None,
+ cross_az_load_balancing=None, access_logs=None,
+ stickiness=None, wait=None, wait_timeout=None, tags=None,
+ region=None,
+ instance_ids=None, purge_instance_ids=None, **aws_connect_params):
+
+ self.module = module
+ self.name = name
+ self.listeners = listeners
+ self.purge_listeners = purge_listeners
+ self.instance_ids = instance_ids
+ self.purge_instance_ids = purge_instance_ids
+ self.zones = zones
+ self.purge_zones = purge_zones
+ self.security_group_ids = security_group_ids
+ self.health_check = health_check
+ self.subnets = subnets
+ self.purge_subnets = purge_subnets
+ self.scheme = scheme
+ self.connection_draining_timeout = connection_draining_timeout
+ self.idle_timeout = idle_timeout
+ self.cross_az_load_balancing = cross_az_load_balancing
+ self.access_logs = access_logs
+ self.stickiness = stickiness
+ self.wait = wait
+ self.wait_timeout = wait_timeout
+ self.tags = tags
+
+ self.aws_connect_params = aws_connect_params
+ self.region = region
+
+ self.changed = False
+ self.status = 'gone'
+ self.elb_conn = self._get_elb_connection()
+
+ try:
+ self.elb = self._get_elb()
+ except boto.exception.BotoServerError as e:
+ module.fail_json(msg='unable to get all load balancers: %s' % to_native(e), exception=traceback.format_exc())
+
+ self.ec2_conn = self._get_ec2_connection()
+
+ @_throttleable_operation(_THROTTLING_RETRIES)
+ def ensure_ok(self):
+ """Create the ELB"""
+ if not self.elb:
+ # Zones and listeners will be added at creation
+ self._create_elb()
+ else:
+ if self._get_scheme():
+ # the only way to change the scheme is by recreating the resource
+ self.ensure_gone()
+ self._create_elb()
+ else:
+ self._set_zones()
+ self._set_security_groups()
+ self._set_elb_listeners()
+ self._set_subnets()
+ self._set_health_check()
+ # boto has introduced support for some ELB attributes in
+ # different versions, so we check first before trying to
+ # set them to avoid errors
+ if self._check_attribute_support('connection_draining'):
+ self._set_connection_draining_timeout()
+ if self._check_attribute_support('connecting_settings'):
+ self._set_idle_timeout()
+ if self._check_attribute_support('cross_zone_load_balancing'):
+ self._set_cross_az_load_balancing()
+ if self._check_attribute_support('access_log'):
+ self._set_access_log()
+ # add sticky options
+ self.select_stickiness_policy()
+
+ # ensure backend server policies are correct
+ self._set_backend_policies()
+ # set/remove instance ids
+ self._set_instance_ids()
+
+ self._set_tags()
+
+ def ensure_gone(self):
+ """Destroy the ELB"""
+ if self.elb:
+ self._delete_elb()
+ if self.wait:
+ elb_removed = self._wait_for_elb_removed()
+ # Unfortunately even though the ELB itself is removed quickly
+ # the interfaces take longer so reliant security groups cannot
+ # be deleted until the interface has registered as removed.
+ elb_interface_removed = self._wait_for_elb_interface_removed()
+ if not (elb_removed and elb_interface_removed):
+ self.module.fail_json(msg='Timed out waiting for removal of load balancer.')
+
+ def get_info(self):
+ try:
+ check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
+ except Exception:
+ check_elb = None
+
+ if not check_elb:
+ info = {
+ 'name': self.name,
+ 'status': self.status,
+ 'region': self.region
+ }
+ else:
+ try:
+ lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name']
+ except Exception:
+ lb_cookie_policy = None
+ try:
+ app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name']
+ except Exception:
+ app_cookie_policy = None
+
+ info = {
+ 'name': check_elb.name,
+ 'dns_name': check_elb.dns_name,
+ 'zones': check_elb.availability_zones,
+ 'security_group_ids': check_elb.security_groups,
+ 'status': self.status,
+ 'subnets': self.subnets,
+ 'scheme': check_elb.scheme,
+ 'hosted_zone_name': check_elb.canonical_hosted_zone_name,
+ 'hosted_zone_id': check_elb.canonical_hosted_zone_name_id,
+ 'lb_cookie_policy': lb_cookie_policy,
+ 'app_cookie_policy': app_cookie_policy,
+ 'proxy_policy': self._get_proxy_protocol_policy(),
+ 'backends': self._get_backend_policies(),
+ 'instances': [instance.id for instance in check_elb.instances],
+ 'out_of_service_count': 0,
+ 'in_service_count': 0,
+ 'unknown_instance_state_count': 0,
+ 'region': self.region
+ }
+
+ # status of instances behind the ELB
+ if info['instances']:
+ info['instance_health'] = [dict(
+ instance_id=instance_state.instance_id,
+ reason_code=instance_state.reason_code,
+ state=instance_state.state
+ ) for instance_state in self.elb_conn.describe_instance_health(self.name)]
+ else:
+ info['instance_health'] = []
+
+ # instance state counts: InService or OutOfService
+ if info['instance_health']:
+ for instance_state in info['instance_health']:
+ if instance_state['state'] == "InService":
+ info['in_service_count'] += 1
+ elif instance_state['state'] == "OutOfService":
+ info['out_of_service_count'] += 1
+ else:
+ info['unknown_instance_state_count'] += 1
+
+ if check_elb.health_check:
+ info['health_check'] = {
+ 'target': check_elb.health_check.target,
+ 'interval': check_elb.health_check.interval,
+ 'timeout': check_elb.health_check.timeout,
+ 'healthy_threshold': check_elb.health_check.healthy_threshold,
+ 'unhealthy_threshold': check_elb.health_check.unhealthy_threshold,
+ }
+
+ if check_elb.listeners:
+ info['listeners'] = [self._api_listener_as_tuple(l)
+ for l in check_elb.listeners]
+ elif self.status == 'created':
+ # When creating a new ELB, listeners don't show in the
+ # immediately returned result, so just include the
+ # ones that were added
+ info['listeners'] = [self._listener_as_tuple(l)
+ for l in self.listeners]
+ else:
+ info['listeners'] = []
+
+ if self._check_attribute_support('connection_draining'):
+ info['connection_draining_timeout'] = int(self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout)
+
+ if self._check_attribute_support('connecting_settings'):
+ info['idle_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectingSettings').idle_timeout
+
+ if self._check_attribute_support('cross_zone_load_balancing'):
+ is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing')
+ if is_cross_az_lb_enabled:
+ info['cross_az_load_balancing'] = 'yes'
+ else:
+ info['cross_az_load_balancing'] = 'no'
+
+ # return stickiness info?
+
+ info['tags'] = self.tags
+
+ return info
+
+ @_throttleable_operation(_THROTTLING_RETRIES)
+ def _wait_for_elb_removed(self):
+ polling_increment_secs = 15
+ max_retries = (self.wait_timeout // polling_increment_secs)
+ status_achieved = False
+
+ for x in range(0, max_retries):
+ try:
+ self.elb_conn.get_all_lb_attributes(self.name)
+ except (boto.exception.BotoServerError, Exception) as e:
+ if "LoadBalancerNotFound" in e.code:
+ status_achieved = True
+ break
+ else:
+ time.sleep(polling_increment_secs)
+
+ return status_achieved
+
+ @_throttleable_operation(_THROTTLING_RETRIES)
+ def _wait_for_elb_interface_removed(self):
+ polling_increment_secs = 15
+ max_retries = (self.wait_timeout // polling_increment_secs)
+ status_achieved = False
+
+ elb_interfaces = self.ec2_conn.get_all_network_interfaces(
+ filters={'attachment.instance-owner-id': 'amazon-elb',
+ 'description': 'ELB {0}'.format(self.name)})
+
+ for x in range(0, max_retries):
+ for interface in elb_interfaces:
+ try:
+ result = self.ec2_conn.get_all_network_interfaces(interface.id)
+ if result == []:
+ status_achieved = True
+ break
+ else:
+ time.sleep(polling_increment_secs)
+ except (boto.exception.BotoServerError, Exception) as e:
+ if 'InvalidNetworkInterfaceID' in e.code:
+ status_achieved = True
+ break
+ else:
+ self.module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ return status_achieved
+
+ @_throttleable_operation(_THROTTLING_RETRIES)
+ def _get_elb(self):
+ elbs = self.elb_conn.get_all_load_balancers()
+ for elb in elbs:
+ if self.name == elb.name:
+ self.status = 'ok'
+ return elb
+
+ def _get_elb_connection(self):
+ try:
+ return connect_to_aws(boto.ec2.elb, self.region,
+ **self.aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ self.module.fail_json(msg=str(e))
+
+ def _get_ec2_connection(self):
+ try:
+ return connect_to_aws(boto.ec2, self.region,
+ **self.aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, Exception) as e:
+ self.module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ @_throttleable_operation(_THROTTLING_RETRIES)
+ def _delete_elb(self):
+ # True if succeeds, exception raised if not
+ result = self.elb_conn.delete_load_balancer(name=self.name)
+ if result:
+ self.changed = True
+ self.status = 'deleted'
+
+ def _create_elb(self):
+ listeners = [self._listener_as_tuple(l) for l in self.listeners]
+ self.elb = self.elb_conn.create_load_balancer(name=self.name,
+ zones=self.zones,
+ security_groups=self.security_group_ids,
+ complex_listeners=listeners,
+ subnets=self.subnets,
+ scheme=self.scheme)
+ if self.elb:
+ # HACK: Work around a boto bug in which the listeners attribute is
+ # always set to the listeners argument to create_load_balancer, and
+ # not the complex_listeners
+ # We're not doing a self.elb = self._get_elb here because there
+ # might be eventual consistency issues and it doesn't necessarily
+ # make sense to wait until the ELB gets returned from the EC2 API.
+ # This is necessary in the event we hit the throttling errors and
+ # need to retry ensure_ok
+ # See https://github.com/boto/boto/issues/3526
+ self.elb.listeners = self.listeners
+ self.changed = True
+ self.status = 'created'
+
+ def _create_elb_listeners(self, listeners):
+ """Takes a list of listener tuples and creates them"""
+ # True if succeeds, exception raised if not
+ self.changed = self.elb_conn.create_load_balancer_listeners(self.name,
+ complex_listeners=listeners)
+
+ def _delete_elb_listeners(self, listeners):
+ """Takes a list of listener tuples and deletes them from the elb"""
+ ports = [l[0] for l in listeners]
+
+ # True if succeeds, exception raised if not
+ self.changed = self.elb_conn.delete_load_balancer_listeners(self.name,
+ ports)
+
+ def _set_elb_listeners(self):
+ """
+ Creates listeners specified by self.listeners; overwrites existing
+ listeners on these ports; removes extraneous listeners
+ """
+ listeners_to_add = []
+ listeners_to_remove = []
+ listeners_to_keep = []
+
+ # Check for any listeners we need to create or overwrite
+ for listener in self.listeners:
+ listener_as_tuple = self._listener_as_tuple(listener)
+
+ # First we loop through existing listeners to see if one is
+ # already specified for this port
+ existing_listener_found = None
+ for existing_listener in self.elb.listeners:
+ # Since ELB allows only one listener on each incoming port, a
+ # single match on the incoming port is all we're looking for
+ if existing_listener[0] == int(listener['load_balancer_port']):
+ existing_listener_found = self._api_listener_as_tuple(existing_listener)
+ break
+
+ if existing_listener_found:
+ # Does it match exactly?
+ if listener_as_tuple != existing_listener_found:
+ # The ports are the same but something else is different,
+ # so we'll remove the existing one and add the new one
+ listeners_to_remove.append(existing_listener_found)
+ listeners_to_add.append(listener_as_tuple)
+ else:
+ # We already have this listener, so we're going to keep it
+ listeners_to_keep.append(existing_listener_found)
+ else:
+ # We didn't find an existing listener, so just add the new one
+ listeners_to_add.append(listener_as_tuple)
+
+ # Check for any extraneous listeners we need to remove, if desired
+ if self.purge_listeners:
+ for existing_listener in self.elb.listeners:
+ existing_listener_tuple = self._api_listener_as_tuple(existing_listener)
+ if existing_listener_tuple in listeners_to_remove:
+ # Already queued for removal
+ continue
+ if existing_listener_tuple in listeners_to_keep:
+ # Keep this one around
+ continue
+ # Since we're not already removing it and we don't need to keep
+ # it, let's get rid of it
+ listeners_to_remove.append(existing_listener_tuple)
+
+ if listeners_to_remove:
+ self._delete_elb_listeners(listeners_to_remove)
+
+ if listeners_to_add:
+ self._create_elb_listeners(listeners_to_add)
+
+ def _api_listener_as_tuple(self, listener):
+ """Adds ssl_certificate_id to ELB API tuple if present"""
+ base_tuple = listener.get_complex_tuple()
+ if listener.ssl_certificate_id and len(base_tuple) < 5:
+ return base_tuple + (listener.ssl_certificate_id,)
+ return base_tuple
+
+ def _listener_as_tuple(self, listener):
+ """Formats listener as a 4- or 5-tuples, in the order specified by the
+ ELB API"""
+ # N.B. string manipulations on protocols below (str(), upper()) is to
+ # ensure format matches output from ELB API
+ listener_list = [
+ int(listener['load_balancer_port']),
+ int(listener['instance_port']),
+ str(listener['protocol'].upper()),
+ ]
+
+ # Instance protocol is not required by ELB API; it defaults to match
+ # load balancer protocol. We'll mimic that behavior here
+ if 'instance_protocol' in listener:
+ listener_list.append(str(listener['instance_protocol'].upper()))
+ else:
+ listener_list.append(str(listener['protocol'].upper()))
+
+ if 'ssl_certificate_id' in listener:
+ listener_list.append(str(listener['ssl_certificate_id']))
+
+ return tuple(listener_list)
+
+ def _enable_zones(self, zones):
+ try:
+ self.elb.enable_zones(zones)
+ except boto.exception.BotoServerError as e:
+ self.module.fail_json(msg='unable to enable zones: %s' % to_native(e), exception=traceback.format_exc())
+
+ self.changed = True
+
+ def _disable_zones(self, zones):
+ try:
+ self.elb.disable_zones(zones)
+ except boto.exception.BotoServerError as e:
+ self.module.fail_json(msg='unable to disable zones: %s' % to_native(e), exception=traceback.format_exc())
+ self.changed = True
+
+ def _attach_subnets(self, subnets):
+ self.elb_conn.attach_lb_to_subnets(self.name, subnets)
+ self.changed = True
+
+ def _detach_subnets(self, subnets):
+ self.elb_conn.detach_lb_from_subnets(self.name, subnets)
+ self.changed = True
+
+ def _set_subnets(self):
+ """Determine which subnets need to be attached or detached on the ELB"""
+ if self.subnets:
+ if self.purge_subnets:
+ subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets))
+ subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
+ else:
+ subnets_to_detach = None
+ subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
+
+ if subnets_to_attach:
+ self._attach_subnets(subnets_to_attach)
+ if subnets_to_detach:
+ self._detach_subnets(subnets_to_detach)
+
+ def _get_scheme(self):
+ """Determine if the current scheme is different than the scheme of the ELB"""
+ if self.scheme:
+ if self.elb.scheme != self.scheme:
+ if not self.wait:
+ self.module.fail_json(msg="Unable to modify scheme without using the wait option")
+ return True
+ return False
+
+ def _set_zones(self):
+ """Determine which zones need to be enabled or disabled on the ELB"""
+ if self.zones:
+ if self.purge_zones:
+ zones_to_disable = list(set(self.elb.availability_zones) -
+ set(self.zones))
+ zones_to_enable = list(set(self.zones) -
+ set(self.elb.availability_zones))
+ else:
+ zones_to_disable = None
+ zones_to_enable = list(set(self.zones) -
+ set(self.elb.availability_zones))
+ if zones_to_enable:
+ self._enable_zones(zones_to_enable)
+ # N.B. This must come second, in case it would have removed all zones
+ if zones_to_disable:
+ self._disable_zones(zones_to_disable)
+
+ def _set_security_groups(self):
+ if self.security_group_ids is not None and set(self.elb.security_groups) != set(self.security_group_ids):
+ self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids)
+ self.changed = True
+
+ def _set_health_check(self):
+ """Set health check values on ELB as needed"""
+ if self.health_check:
+ # This just makes it easier to compare each of the attributes
+ # and look for changes. Keys are attributes of the current
+ # health_check; values are desired values of new health_check
+ health_check_config = {
+ "target": self._get_health_check_target(),
+ "timeout": self.health_check['response_timeout'],
+ "interval": self.health_check['interval'],
+ "unhealthy_threshold": self.health_check['unhealthy_threshold'],
+ "healthy_threshold": self.health_check['healthy_threshold'],
+ }
+
+ update_health_check = False
+
+ # The health_check attribute is *not* set on newly created
+ # ELBs! So we have to create our own.
+ if not self.elb.health_check:
+ self.elb.health_check = HealthCheck()
+
+ for attr, desired_value in health_check_config.items():
+ if getattr(self.elb.health_check, attr) != desired_value:
+ setattr(self.elb.health_check, attr, desired_value)
+ update_health_check = True
+
+ if update_health_check:
+ self.elb.configure_health_check(self.elb.health_check)
+ self.changed = True
+
+ def _check_attribute_support(self, attr):
+ return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr)
+
+ def _set_cross_az_load_balancing(self):
+ attributes = self.elb.get_attributes()
+ if self.cross_az_load_balancing:
+ if not attributes.cross_zone_load_balancing.enabled:
+ self.changed = True
+ attributes.cross_zone_load_balancing.enabled = True
+ else:
+ if attributes.cross_zone_load_balancing.enabled:
+ self.changed = True
+ attributes.cross_zone_load_balancing.enabled = False
+ self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing',
+ attributes.cross_zone_load_balancing.enabled)
+
+ def _set_access_log(self):
+ attributes = self.elb.get_attributes()
+ if self.access_logs:
+ if 's3_location' not in self.access_logs:
+ self.module.fail_json(msg='s3_location information required')
+
+ access_logs_config = {
+ "enabled": True,
+ "s3_bucket_name": self.access_logs['s3_location'],
+ "s3_bucket_prefix": self.access_logs.get('s3_prefix', ''),
+ "emit_interval": self.access_logs.get('interval', 60),
+ }
+
+ update_access_logs_config = False
+ for attr, desired_value in access_logs_config.items():
+ if getattr(attributes.access_log, attr) != desired_value:
+ setattr(attributes.access_log, attr, desired_value)
+ update_access_logs_config = True
+ if update_access_logs_config:
+ self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
+ self.changed = True
+ elif attributes.access_log.enabled:
+ attributes.access_log.enabled = False
+ self.changed = True
+ self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
+
+ def _set_connection_draining_timeout(self):
+ attributes = self.elb.get_attributes()
+ if self.connection_draining_timeout is not None:
+ if not attributes.connection_draining.enabled or \
+ attributes.connection_draining.timeout != self.connection_draining_timeout:
+ self.changed = True
+ attributes.connection_draining.enabled = True
+ attributes.connection_draining.timeout = self.connection_draining_timeout
+ self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
+ else:
+ if attributes.connection_draining.enabled:
+ self.changed = True
+ attributes.connection_draining.enabled = False
+ self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
+
+ def _set_idle_timeout(self):
+ attributes = self.elb.get_attributes()
+ if self.idle_timeout is not None:
+ if attributes.connecting_settings.idle_timeout != self.idle_timeout:
+ self.changed = True
+ attributes.connecting_settings.idle_timeout = self.idle_timeout
+ self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings)
+
+ def _policy_name(self, policy_type):
+ return 'elb-classic-lb-{0}'.format(to_native(policy_type, errors='surrogate_or_strict'))
+
+ def _create_policy(self, policy_param, policy_meth, policy):
+ getattr(self.elb_conn, policy_meth)(policy_param, self.elb.name, policy)
+
+ def _delete_policy(self, elb_name, policy):
+ self.elb_conn.delete_lb_policy(elb_name, policy)
+
+ def _update_policy(self, policy_param, policy_meth, policy_attr, policy):
+ self._delete_policy(self.elb.name, policy)
+ self._create_policy(policy_param, policy_meth, policy)
+
+ def _set_listener_policy(self, listeners_dict, policy=None):
+ policy = [] if policy is None else policy
+
+ for listener_port in listeners_dict:
+ if listeners_dict[listener_port].startswith('HTTP'):
+ self.elb_conn.set_lb_policies_of_listener(self.elb.name, listener_port, policy)
+
+ def _set_stickiness_policy(self, elb_info, listeners_dict, policy, **policy_attrs):
+ for p in getattr(elb_info.policies, policy_attrs['attr']):
+ if str(p.__dict__['policy_name']) == str(policy[0]):
+ if str(p.__dict__[policy_attrs['dict_key']]) != str(policy_attrs['param_value'] or 0):
+ self._set_listener_policy(listeners_dict)
+ self._update_policy(policy_attrs['param_value'], policy_attrs['method'], policy_attrs['attr'], policy[0])
+ self.changed = True
+ break
+ else:
+ self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0])
+ self.changed = True
+
+ self._set_listener_policy(listeners_dict, policy)
+
+ def select_stickiness_policy(self):
+ if self.stickiness:
+
+ if 'cookie' in self.stickiness and 'expiration' in self.stickiness:
+ self.module.fail_json(msg='\'cookie\' and \'expiration\' can not be set at the same time')
+
+ elb_info = self.elb_conn.get_all_load_balancers(self.elb.name)[0]
+ d = {}
+ for listener in elb_info.listeners:
+ d[listener[0]] = listener[2]
+ listeners_dict = d
+
+ if self.stickiness['type'] == 'loadbalancer':
+ policy = []
+ policy_type = 'LBCookieStickinessPolicyType'
+
+ if self.module.boolean(self.stickiness['enabled']):
+
+ if 'expiration' not in self.stickiness:
+ self.module.fail_json(msg='expiration must be set when type is loadbalancer')
+
+ try:
+ expiration = self.stickiness['expiration'] if int(self.stickiness['expiration']) else None
+ except ValueError:
+ self.module.fail_json(msg='expiration must be set to an integer')
+
+ policy_attrs = {
+ 'type': policy_type,
+ 'attr': 'lb_cookie_stickiness_policies',
+ 'method': 'create_lb_cookie_stickiness_policy',
+ 'dict_key': 'cookie_expiration_period',
+ 'param_value': expiration
+ }
+ policy.append(self._policy_name(policy_attrs['type']))
+
+ self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
+ elif not self.module.boolean(self.stickiness['enabled']):
+ if len(elb_info.policies.lb_cookie_stickiness_policies):
+ if elb_info.policies.lb_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
+ self.changed = True
+ else:
+ self.changed = False
+ self._set_listener_policy(listeners_dict)
+ self._delete_policy(self.elb.name, self._policy_name(policy_type))
+
+ elif self.stickiness['type'] == 'application':
+ policy = []
+ policy_type = 'AppCookieStickinessPolicyType'
+ if self.module.boolean(self.stickiness['enabled']):
+
+ if 'cookie' not in self.stickiness:
+ self.module.fail_json(msg='cookie must be set when type is application')
+
+ policy_attrs = {
+ 'type': policy_type,
+ 'attr': 'app_cookie_stickiness_policies',
+ 'method': 'create_app_cookie_stickiness_policy',
+ 'dict_key': 'cookie_name',
+ 'param_value': self.stickiness['cookie']
+ }
+ policy.append(self._policy_name(policy_attrs['type']))
+ self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
+ elif not self.module.boolean(self.stickiness['enabled']):
+ if len(elb_info.policies.app_cookie_stickiness_policies):
+ if elb_info.policies.app_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
+ self.changed = True
+ self._set_listener_policy(listeners_dict)
+ self._delete_policy(self.elb.name, self._policy_name(policy_type))
+
+ else:
+ self._set_listener_policy(listeners_dict)
+
+ def _get_backend_policies(self):
+ """Get a list of backend policies"""
+ policies = []
+ if self.elb.backends is not None:
+ for backend in self.elb.backends:
+ if backend.policies is not None:
+ for policy in backend.policies:
+ policies.append(str(backend.instance_port) + ':' + policy.policy_name)
+
+ return policies
+
+ def _set_backend_policies(self):
+ """Sets policies for all backends"""
+ ensure_proxy_protocol = False
+ replace = []
+ backend_policies = self._get_backend_policies()
+
+ # Find out what needs to be changed
+ for listener in self.listeners:
+ want = False
+
+ if 'proxy_protocol' in listener and listener['proxy_protocol']:
+ ensure_proxy_protocol = True
+ want = True
+
+ if str(listener['instance_port']) + ':ProxyProtocol-policy' in backend_policies:
+ if not want:
+ replace.append({'port': listener['instance_port'], 'policies': []})
+ elif want:
+ replace.append({'port': listener['instance_port'], 'policies': ['ProxyProtocol-policy']})
+
+ # enable or disable proxy protocol
+ if ensure_proxy_protocol:
+ self._set_proxy_protocol_policy()
+
+ # Make the backend policies so
+ for item in replace:
+ self.elb_conn.set_lb_policies_of_backend_server(self.elb.name, item['port'], item['policies'])
+ self.changed = True
+
+ def _get_proxy_protocol_policy(self):
+ """Find out if the elb has a proxy protocol enabled"""
+ if self.elb.policies is not None and self.elb.policies.other_policies is not None:
+ for policy in self.elb.policies.other_policies:
+ if policy.policy_name == 'ProxyProtocol-policy':
+ return policy.policy_name
+
+ return None
+
+ def _set_proxy_protocol_policy(self):
+ """Install a proxy protocol policy if needed"""
+ proxy_policy = self._get_proxy_protocol_policy()
+
+ if proxy_policy is None:
+ self.elb_conn.create_lb_policy(
+ self.elb.name, 'ProxyProtocol-policy', 'ProxyProtocolPolicyType', {'ProxyProtocol': True}
+ )
+ self.changed = True
+
+ # TODO: remove proxy protocol policy if not needed anymore? There is no side effect to leaving it there
+
+ def _diff_list(self, a, b):
+ """Find the entries in list a that are not in list b"""
+ b = set(b)
+ return [aa for aa in a if aa not in b]
+
+ def _get_instance_ids(self):
+ """Get the current list of instance ids installed in the elb"""
+ instances = []
+ if self.elb.instances is not None:
+ for instance in self.elb.instances:
+ instances.append(instance.id)
+
+ return instances
+
+ def _set_instance_ids(self):
+ """Register or deregister instances from an lb instance"""
+ assert_instances = self.instance_ids or []
+
+ has_instances = self._get_instance_ids()
+
+ add_instances = self._diff_list(assert_instances, has_instances)
+ if add_instances:
+ self.elb_conn.register_instances(self.elb.name, add_instances)
+ self.changed = True
+
+ if self.purge_instance_ids:
+ remove_instances = self._diff_list(has_instances, assert_instances)
+ if remove_instances:
+ self.elb_conn.deregister_instances(self.elb.name, remove_instances)
+ self.changed = True
+
+ def _set_tags(self):
+ """Add/Delete tags"""
+ if self.tags is None:
+ return
+
+ params = {'LoadBalancerNames.member.1': self.name}
+
+ tagdict = dict()
+
+ # get the current list of tags from the ELB, if ELB exists
+ if self.elb:
+ current_tags = self.elb_conn.get_list('DescribeTags', params,
+ [('member', Tag)])
+ tagdict = dict((tag.Key, tag.Value) for tag in current_tags
+ if hasattr(tag, 'Key'))
+
+ # Add missing tags
+ dictact = dict(set(self.tags.items()) - set(tagdict.items()))
+ if dictact:
+ for i, key in enumerate(dictact):
+ params['Tags.member.%d.Key' % (i + 1)] = key
+ params['Tags.member.%d.Value' % (i + 1)] = dictact[key]
+
+ self.elb_conn.make_request('AddTags', params)
+ self.changed = True
+
+ # Remove extra tags
+ dictact = dict(set(tagdict.items()) - set(self.tags.items()))
+ if dictact:
+ for i, key in enumerate(dictact):
+ params['Tags.member.%d.Key' % (i + 1)] = key
+
+ self.elb_conn.make_request('RemoveTags', params)
+ self.changed = True
+
+ def _get_health_check_target(self):
+ """Compose target string from healthcheck parameters"""
+ protocol = self.health_check['ping_protocol'].upper()
+ path = ""
+
+ if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check:
+ path = self.health_check['ping_path']
+
+ return "%s:%s%s" % (protocol, self.health_check['ping_port'], path)
+
+
+def main():
+ argument_spec = dict(
+ state={'required': True, 'choices': ['present', 'absent']},
+ name={'required': True},
+ listeners={'default': None, 'required': False, 'type': 'list', 'elements': 'dict'},
+ purge_listeners={'default': True, 'required': False, 'type': 'bool'},
+ instance_ids={'default': None, 'required': False, 'type': 'list', 'elements': 'str'},
+ purge_instance_ids={'default': False, 'required': False, 'type': 'bool'},
+ zones={'default': None, 'required': False, 'type': 'list', 'elements': 'str'},
+ purge_zones={'default': False, 'required': False, 'type': 'bool'},
+ security_group_ids={'default': None, 'required': False, 'type': 'list', 'elements': 'str'},
+ security_group_names={'default': None, 'required': False, 'type': 'list', 'elements': 'str'},
+ health_check={'default': None, 'required': False, 'type': 'dict'},
+ subnets={'default': None, 'required': False, 'type': 'list', 'elements': 'str'},
+ purge_subnets={'default': False, 'required': False, 'type': 'bool'},
+ scheme={'default': 'internet-facing', 'required': False, 'choices': ['internal', 'internet-facing']},
+ connection_draining_timeout={'default': None, 'required': False, 'type': 'int'},
+ idle_timeout={'default': None, 'type': 'int', 'required': False},
+ cross_az_load_balancing={'default': None, 'type': 'bool', 'required': False},
+ stickiness={'default': None, 'required': False, 'type': 'dict'},
+ access_logs={'default': None, 'required': False, 'type': 'dict'},
+ wait={'default': False, 'type': 'bool', 'required': False},
+ wait_timeout={'default': 60, 'type': 'int', 'required': False},
+ tags={'default': None, 'required': False, 'type': 'dict'},
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[['security_group_ids', 'security_group_names']],
+ check_boto3=False,
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+ if not region:
+ module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
+
+ name = module.params['name']
+ state = module.params['state']
+ listeners = module.params['listeners']
+ purge_listeners = module.params['purge_listeners']
+ instance_ids = module.params['instance_ids']
+ purge_instance_ids = module.params['purge_instance_ids']
+ zones = module.params['zones']
+ purge_zones = module.params['purge_zones']
+ security_group_ids = module.params['security_group_ids']
+ security_group_names = module.params['security_group_names']
+ health_check = module.params['health_check']
+ access_logs = module.params['access_logs']
+ subnets = module.params['subnets']
+ purge_subnets = module.params['purge_subnets']
+ scheme = module.params['scheme']
+ connection_draining_timeout = module.params['connection_draining_timeout']
+ idle_timeout = module.params['idle_timeout']
+ cross_az_load_balancing = module.params['cross_az_load_balancing']
+ stickiness = module.params['stickiness']
+ wait = module.params['wait']
+ wait_timeout = module.params['wait_timeout']
+ tags = module.params['tags']
+
+ if state == 'present' and not listeners:
+ module.fail_json(msg="At least one listener is required for ELB creation")
+
+ if state == 'present' and not (zones or subnets):
+ module.fail_json(msg="At least one availability zone or subnet is required for ELB creation")
+
+ if wait_timeout > 600:
+ module.fail_json(msg='wait_timeout maximum is 600 seconds')
+
+ if security_group_names:
+ security_group_ids = []
+ try:
+ ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
+ if subnets: # We have at least one subnet, ergo this is a VPC
+ vpc_conn = _get_vpc_connection(module=module, region=region, aws_connect_params=aws_connect_params)
+ vpc_id = vpc_conn.get_all_subnets([subnets[0]])[0].vpc_id
+ filters = {'vpc_id': vpc_id}
+ else:
+ filters = None
+ grp_details = ec2.get_all_security_groups(filters=filters)
+
+ for group_name in security_group_names:
+ if isinstance(group_name, string_types):
+ group_name = [group_name]
+
+ group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name]
+ security_group_ids.extend(group_id)
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg=str(e))
+
+ elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
+ purge_zones, security_group_ids, health_check,
+ subnets, purge_subnets, scheme,
+ connection_draining_timeout, idle_timeout,
+ cross_az_load_balancing,
+ access_logs, stickiness, wait, wait_timeout, tags,
+ region=region, instance_ids=instance_ids, purge_instance_ids=purge_instance_ids,
+ **aws_connect_params)
+
+ # check for unsupported attributes for this version of boto
+ if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'):
+ module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute")
+
+ if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'):
+ module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute")
+
+ if idle_timeout and not elb_man._check_attribute_support('connecting_settings'):
+ module.fail_json(msg="You must install boto >= 2.33.0 to use the idle_timeout attribute")
+
+ if state == 'present':
+ elb_man.ensure_ok()
+ elif state == 'absent':
+ elb_man.ensure_gone()
+
+ ansible_facts = {'ec2_elb': 'info'}
+ ec2_facts_result = dict(changed=elb_man.changed,
+ elb=elb_man.get_info(),
+ ansible_facts=ansible_facts)
+
+ module.exit_json(**ec2_facts_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_classic_lb_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_classic_lb_facts.py
new file mode 100644
index 00000000..12a6a437
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_classic_lb_facts.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: elb_classic_lb_info
+version_added: 1.0.0
+short_description: Gather information about EC2 Elastic Load Balancers in AWS
+description:
+ - Gather information about EC2 Elastic Load Balancers in AWS
+ - This module was called C(elb_classic_lb_facts) before Ansible 2.9. The usage did not change.
+author:
+ - "Michael Schultz (@mjschultz)"
+ - "Fernando Jose Pando (@nand0p)"
+options:
+ names:
+ description:
+ - List of ELB names to gather information about. Pass this option to gather information about a set of ELBs, otherwise, all ELBs are returned.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements:
+ - botocore
+ - boto3
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+# Output format tries to match amazon.aws.ec2_elb_lb module input parameters
+
+# Gather information about all ELBs
+- community.aws.elb_classic_lb_info:
+ register: elb_info
+
+- ansible.builtin.debug:
+ msg: "{{ item.dns_name }}"
+ loop: "{{ elb_info.elbs }}"
+
+# Gather information about a particular ELB
+- community.aws.elb_classic_lb_info:
+ names: frontend-prod-elb
+ register: elb_info
+
+- ansible.builtin.debug:
+ msg: "{{ elb_info.elbs.0.dns_name }}"
+
+# Gather information about a set of ELBs
+- community.aws.elb_classic_lb_info:
+ names:
+ - frontend-prod-elb
+ - backend-prod-elb
+ register: elb_info
+
+- ansible.builtin.debug:
+ msg: "{{ item.dns_name }}"
+ loop: "{{ elb_info.elbs }}"
+
+'''
+
+RETURN = r'''
+elbs:
+ description: a list of load balancers
+ returned: always
+ type: list
+ sample:
+ elbs:
+ - attributes:
+ access_log:
+ enabled: false
+ connection_draining:
+ enabled: true
+ timeout: 300
+ connection_settings:
+ idle_timeout: 60
+ cross_zone_load_balancing:
+ enabled: true
+ availability_zones:
+ - "us-east-1a"
+ - "us-east-1b"
+ - "us-east-1c"
+ - "us-east-1d"
+ - "us-east-1e"
+ backend_server_description: []
+ canonical_hosted_zone_name: test-lb-XXXXXXXXXXXX.us-east-1.elb.amazonaws.com
+ canonical_hosted_zone_name_id: XXXXXXXXXXXXXX
+ created_time: '2017-08-23T18:25:03.280000+00:00'
+ dns_name: test-lb-XXXXXXXXXXXX.us-east-1.elb.amazonaws.com
+ health_check:
+ healthy_threshold: 10
+ interval: 30
+ target: HTTP:80/index.html
+ timeout: 5
+ unhealthy_threshold: 2
+ instances: []
+ instances_inservice: []
+ instances_inservice_count: 0
+ instances_outofservice: []
+ instances_outofservice_count: 0
+ instances_unknownservice: []
+ instances_unknownservice_count: 0
+ listener_descriptions:
+ - listener:
+ instance_port: 80
+ instance_protocol: HTTP
+ load_balancer_port: 80
+ protocol: HTTP
+ policy_names: []
+ load_balancer_name: test-lb
+ policies:
+ app_cookie_stickiness_policies: []
+ lb_cookie_stickiness_policies: []
+ other_policies: []
+ scheme: internet-facing
+ security_groups:
+ - sg-29d13055
+ source_security_group:
+ group_name: default
+ owner_alias: XXXXXXXXXXXX
+ subnets:
+ - subnet-XXXXXXXX
+ - subnet-XXXXXXXX
+ tags: {}
+ vpc_id: vpc-c248fda4
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (
+ AWSRetry,
+ camel_dict_to_snake_dict,
+ boto3_tag_list_to_ansible_dict
+)
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def list_elbs(connection, names):
+ paginator = connection.get_paginator('describe_load_balancers')
+ load_balancers = paginator.paginate(LoadBalancerNames=names).build_full_result().get('LoadBalancerDescriptions', [])
+ results = []
+
+ for lb in load_balancers:
+ description = camel_dict_to_snake_dict(lb)
+ name = lb['LoadBalancerName']
+ instances = lb.get('Instances', [])
+ description['tags'] = get_tags(connection, name)
+ description['instances_inservice'], description['instances_inservice_count'] = lb_instance_health(connection, name, instances, 'InService')
+ description['instances_outofservice'], description['instances_outofservice_count'] = lb_instance_health(connection, name, instances, 'OutOfService')
+ description['instances_unknownservice'], description['instances_unknownservice_count'] = lb_instance_health(connection, name, instances, 'Unknown')
+ description['attributes'] = get_lb_attributes(connection, name)
+ results.append(description)
+ return results
+
+
+def get_lb_attributes(connection, name):
+ attributes = connection.describe_load_balancer_attributes(LoadBalancerName=name).get('LoadBalancerAttributes', {})
+ return camel_dict_to_snake_dict(attributes)
+
+
+def get_tags(connection, load_balancer_name):
+ tags = connection.describe_tags(LoadBalancerNames=[load_balancer_name])['TagDescriptions']
+ if not tags:
+ return {}
+ return boto3_tag_list_to_ansible_dict(tags[0]['Tags'])
+
+
+def lb_instance_health(connection, load_balancer_name, instances, state):
+ instance_states = connection.describe_instance_health(LoadBalancerName=load_balancer_name, Instances=instances).get('InstanceStates', [])
+ instate = [instance['InstanceId'] for instance in instance_states if instance['State'] == state]
+ return instate, len(instate)
+
+
+def main():
+ argument_spec = dict(
+ names={'default': [], 'type': 'list', 'elements': 'str'}
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+ if module._name == 'elb_classic_lb_facts':
+ module.deprecate("The 'elb_classic_lb_facts' module has been renamed to 'elb_classic_lb_info'", date='2021-12-01', collection_name='community.aws')
+
+ connection = module.client('elb')
+
+ try:
+ elbs = list_elbs(connection, module.params.get('names'))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get load balancer information.")
+
+ module.exit_json(elbs=elbs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_classic_lb_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_classic_lb_info.py
new file mode 100644
index 00000000..12a6a437
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_classic_lb_info.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: elb_classic_lb_info
+version_added: 1.0.0
+short_description: Gather information about EC2 Elastic Load Balancers in AWS
+description:
+ - Gather information about EC2 Elastic Load Balancers in AWS
+ - This module was called C(elb_classic_lb_facts) before Ansible 2.9. The usage did not change.
+author:
+ - "Michael Schultz (@mjschultz)"
+ - "Fernando Jose Pando (@nand0p)"
+options:
+ names:
+ description:
+ - List of ELB names to gather information about. Pass this option to gather information about a set of ELBs, otherwise, all ELBs are returned.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements:
+ - botocore
+ - boto3
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+# Output format tries to match amazon.aws.ec2_elb_lb module input parameters
+
+# Gather information about all ELBs
+- community.aws.elb_classic_lb_info:
+ register: elb_info
+
+- ansible.builtin.debug:
+ msg: "{{ item.dns_name }}"
+ loop: "{{ elb_info.elbs }}"
+
+# Gather information about a particular ELB
+- community.aws.elb_classic_lb_info:
+ names: frontend-prod-elb
+ register: elb_info
+
+- ansible.builtin.debug:
+ msg: "{{ elb_info.elbs.0.dns_name }}"
+
+# Gather information about a set of ELBs
+- community.aws.elb_classic_lb_info:
+ names:
+ - frontend-prod-elb
+ - backend-prod-elb
+ register: elb_info
+
+- ansible.builtin.debug:
+ msg: "{{ item.dns_name }}"
+ loop: "{{ elb_info.elbs }}"
+
+'''
+
+RETURN = r'''
+elbs:
+ description: a list of load balancers
+ returned: always
+ type: list
+ sample:
+ elbs:
+ - attributes:
+ access_log:
+ enabled: false
+ connection_draining:
+ enabled: true
+ timeout: 300
+ connection_settings:
+ idle_timeout: 60
+ cross_zone_load_balancing:
+ enabled: true
+ availability_zones:
+ - "us-east-1a"
+ - "us-east-1b"
+ - "us-east-1c"
+ - "us-east-1d"
+ - "us-east-1e"
+ backend_server_description: []
+ canonical_hosted_zone_name: test-lb-XXXXXXXXXXXX.us-east-1.elb.amazonaws.com
+ canonical_hosted_zone_name_id: XXXXXXXXXXXXXX
+ created_time: '2017-08-23T18:25:03.280000+00:00'
+ dns_name: test-lb-XXXXXXXXXXXX.us-east-1.elb.amazonaws.com
+ health_check:
+ healthy_threshold: 10
+ interval: 30
+ target: HTTP:80/index.html
+ timeout: 5
+ unhealthy_threshold: 2
+ instances: []
+ instances_inservice: []
+ instances_inservice_count: 0
+ instances_outofservice: []
+ instances_outofservice_count: 0
+ instances_unknownservice: []
+ instances_unknownservice_count: 0
+ listener_descriptions:
+ - listener:
+ instance_port: 80
+ instance_protocol: HTTP
+ load_balancer_port: 80
+ protocol: HTTP
+ policy_names: []
+ load_balancer_name: test-lb
+ policies:
+ app_cookie_stickiness_policies: []
+ lb_cookie_stickiness_policies: []
+ other_policies: []
+ scheme: internet-facing
+ security_groups:
+ - sg-29d13055
+ source_security_group:
+ group_name: default
+ owner_alias: XXXXXXXXXXXX
+ subnets:
+ - subnet-XXXXXXXX
+ - subnet-XXXXXXXX
+ tags: {}
+ vpc_id: vpc-c248fda4
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (
+ AWSRetry,
+ camel_dict_to_snake_dict,
+ boto3_tag_list_to_ansible_dict
+)
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def list_elbs(connection, names):
+ paginator = connection.get_paginator('describe_load_balancers')
+ load_balancers = paginator.paginate(LoadBalancerNames=names).build_full_result().get('LoadBalancerDescriptions', [])
+ results = []
+
+ for lb in load_balancers:
+ description = camel_dict_to_snake_dict(lb)
+ name = lb['LoadBalancerName']
+ instances = lb.get('Instances', [])
+ description['tags'] = get_tags(connection, name)
+ description['instances_inservice'], description['instances_inservice_count'] = lb_instance_health(connection, name, instances, 'InService')
+ description['instances_outofservice'], description['instances_outofservice_count'] = lb_instance_health(connection, name, instances, 'OutOfService')
+ description['instances_unknownservice'], description['instances_unknownservice_count'] = lb_instance_health(connection, name, instances, 'Unknown')
+ description['attributes'] = get_lb_attributes(connection, name)
+ results.append(description)
+ return results
+
+
+def get_lb_attributes(connection, name):
+ attributes = connection.describe_load_balancer_attributes(LoadBalancerName=name).get('LoadBalancerAttributes', {})
+ return camel_dict_to_snake_dict(attributes)
+
+
+def get_tags(connection, load_balancer_name):
+ tags = connection.describe_tags(LoadBalancerNames=[load_balancer_name])['TagDescriptions']
+ if not tags:
+ return {}
+ return boto3_tag_list_to_ansible_dict(tags[0]['Tags'])
+
+
+def lb_instance_health(connection, load_balancer_name, instances, state):
+ instance_states = connection.describe_instance_health(LoadBalancerName=load_balancer_name, Instances=instances).get('InstanceStates', [])
+ instate = [instance['InstanceId'] for instance in instance_states if instance['State'] == state]
+ return instate, len(instate)
+
+
+def main():
+ argument_spec = dict(
+ names={'default': [], 'type': 'list', 'elements': 'str'}
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+ if module._name == 'elb_classic_lb_facts':
+ module.deprecate("The 'elb_classic_lb_facts' module has been renamed to 'elb_classic_lb_info'", date='2021-12-01', collection_name='community.aws')
+
+ connection = module.client('elb')
+
+ try:
+ elbs = list_elbs(connection, module.params.get('names'))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get load balancer information.")
+
+ module.exit_json(elbs=elbs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_instance.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_instance.py
new file mode 100644
index 00000000..fe10d6cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_instance.py
@@ -0,0 +1,369 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: elb_instance
+version_added: 1.0.0
+short_description: De-registers or registers instances from EC2 ELBs
+description:
+ - This module de-registers or registers an AWS EC2 instance from the ELBs
+ that it belongs to.
+ - Returns fact "ec2_elbs" which is a list of elbs attached to the instance
+ if state=absent is passed as an argument.
+ - Will be marked changed when called only if there are ELBs found to operate on.
+author: "John Jarvis (@jarv)"
+options:
+ state:
+ description:
+ - register or deregister the instance
+ required: true
+ choices: ['present', 'absent']
+ type: str
+ instance_id:
+ description:
+ - EC2 Instance ID
+ required: true
+ type: str
+ ec2_elbs:
+ description:
+ - List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register.
+ type: list
+ elements: str
+ enable_availability_zone:
+ description:
+ - Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already
+ been enabled. If set to no, the task will fail if the availability zone is not enabled on the ELB.
+ type: bool
+ default: 'yes'
+ wait:
+ description:
+ - Wait for instance registration or deregistration to complete successfully before returning.
+ type: bool
+ default: 'yes'
+ validate_certs:
+ description:
+ - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
+ type: bool
+ default: 'yes'
+ wait_timeout:
+ description:
+ - Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs.
+ If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no.
+ default: 0
+ type: int
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r"""
+# basic pre_task and post_task example
+pre_tasks:
+ - name: Instance De-register
+ community.aws.elb_instance:
+ instance_id: "{{ ansible_ec2_instance_id }}"
+ state: absent
+ delegate_to: localhost
+roles:
+ - myrole
+post_tasks:
+ - name: Instance Register
+ community.aws.elb_instance:
+ instance_id: "{{ ansible_ec2_instance_id }}"
+ ec2_elbs: "{{ item }}"
+ state: present
+ delegate_to: localhost
+ loop: "{{ ec2_elbs }}"
+"""
+
+import time
+
+try:
+ import boto
+ import boto.ec2
+ import boto.ec2.autoscale
+ import boto.ec2.elb
+ from boto.regioninfo import RegionInfo
+except ImportError:
+ pass # Handled by HAS_BOTO
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+
+
+class ElbManager:
+ """Handles EC2 instance ELB registration and de-registration"""
+
+ def __init__(self, module, instance_id=None, ec2_elbs=None,
+ region=None, **aws_connect_params):
+ self.module = module
+ self.instance_id = instance_id
+ self.region = region
+ self.aws_connect_params = aws_connect_params
+ self.lbs = self._get_instance_lbs(ec2_elbs)
+ self.changed = False
+
+ def deregister(self, wait, timeout):
+ """De-register the instance from all ELBs and wait for the ELB
+ to report it out-of-service"""
+
+ for lb in self.lbs:
+ initial_state = self._get_instance_health(lb)
+ if initial_state is None:
+ # Instance isn't registered with this load
+ # balancer. Ignore it and try the next one.
+ continue
+
+ lb.deregister_instances([self.instance_id])
+
+ # The ELB is changing state in some way. Either an instance that's
+ # InService is moving to OutOfService, or an instance that's
+ # already OutOfService is being deregistered.
+ self.changed = True
+
+ if wait:
+ self._await_elb_instance_state(lb, 'OutOfService', initial_state, timeout)
+
+ def register(self, wait, enable_availability_zone, timeout):
+ """Register the instance for all ELBs and wait for the ELB
+ to report the instance in-service"""
+ for lb in self.lbs:
+ initial_state = self._get_instance_health(lb)
+
+ if enable_availability_zone:
+ self._enable_availailability_zone(lb)
+
+ lb.register_instances([self.instance_id])
+
+ if wait:
+ self._await_elb_instance_state(lb, 'InService', initial_state, timeout)
+ else:
+ # We cannot assume no change was made if we don't wait
+ # to find out
+ self.changed = True
+
+ def exists(self, lbtest):
+ """ Verify that the named ELB actually exists """
+
+ found = False
+ for lb in self.lbs:
+ if lb.name == lbtest:
+ found = True
+ break
+ return found
+
+ def _enable_availailability_zone(self, lb):
+ """Enable the current instance's availability zone in the provided lb.
+ Returns True if the zone was enabled or False if no change was made.
+ lb: load balancer"""
+ instance = self._get_instance()
+ if instance.placement in lb.availability_zones:
+ return False
+
+ lb.enable_zones(zones=instance.placement)
+
+ # If successful, the new zone will have been added to
+ # lb.availability_zones
+ return instance.placement in lb.availability_zones
+
+ def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout):
+ """Wait for an ELB to change state
+ lb: load balancer
+ awaited_state : state to poll for (string)"""
+
+ wait_timeout = time.time() + timeout
+ while True:
+ instance_state = self._get_instance_health(lb)
+
+ if not instance_state:
+ msg = ("The instance %s could not be put in service on %s."
+ " Reason: Invalid Instance")
+ self.module.fail_json(msg=msg % (self.instance_id, lb))
+
+ if instance_state.state == awaited_state:
+ # Check the current state against the initial state, and only set
+ # changed if they are different.
+ if (initial_state is None) or (instance_state.state != initial_state.state):
+ self.changed = True
+ break
+ elif self._is_instance_state_pending(instance_state):
+ # If it's pending, we'll skip further checks and continue waiting
+ pass
+ elif (awaited_state == 'InService'
+ and instance_state.reason_code == "Instance"
+ and time.time() >= wait_timeout):
+ # If the reason_code for the instance being out of service is
+ # "Instance" this indicates a failure state, e.g. the instance
+ # has failed a health check or the ELB does not have the
+ # instance's availability zone enabled. The exact reason why is
+ # described in InstantState.description.
+ msg = ("The instance %s could not be put in service on %s."
+ " Reason: %s")
+ self.module.fail_json(msg=msg % (self.instance_id,
+ lb,
+ instance_state.description))
+ time.sleep(1)
+
+ def _is_instance_state_pending(self, instance_state):
+ """
+ Determines whether the instance_state is "pending", meaning there is
+ an operation under way to bring it in service.
+ """
+ # This is messy, because AWS provides no way to distinguish between
+ # an instance that is is OutOfService because it's pending vs. OutOfService
+ # because it's failing health checks. So we're forced to analyze the
+ # description, which is likely to be brittle.
+ return (instance_state and 'pending' in instance_state.description)
+
+ def _get_instance_health(self, lb):
+ """
+ Check instance health, should return status object or None under
+ certain error conditions.
+ """
+ try:
+ status = lb.get_instance_health([self.instance_id])[0]
+ except boto.exception.BotoServerError as e:
+ if e.error_code == 'InvalidInstance':
+ return None
+ else:
+ raise
+ return status
+
+ def _get_instance_lbs(self, ec2_elbs=None):
+ """Returns a list of ELBs attached to self.instance_id
+ ec2_elbs: an optional list of elb names that will be used
+ for elb lookup instead of returning what elbs
+ are attached to self.instance_id"""
+
+ if not ec2_elbs:
+ ec2_elbs = self._get_auto_scaling_group_lbs()
+
+ try:
+ elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ self.module.fail_json(msg=str(e))
+
+ elbs = []
+ marker = None
+ while True:
+ try:
+ newelbs = elb.get_all_load_balancers(marker=marker)
+ marker = newelbs.next_marker
+ elbs.extend(newelbs)
+ if not marker:
+ break
+ except TypeError:
+ # Older version of boto do not allow for params
+ elbs = elb.get_all_load_balancers()
+ break
+
+ if ec2_elbs:
+ lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs)
+ else:
+ lbs = []
+ for lb in elbs:
+ for info in lb.instances:
+ if self.instance_id == info.id:
+ lbs.append(lb)
+ return lbs
+
+ def _get_auto_scaling_group_lbs(self):
+ """Returns a list of ELBs associated with self.instance_id
+ indirectly through its auto scaling group membership"""
+
+ try:
+ asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ self.module.fail_json(msg=str(e))
+
+ asg_instances = asg.get_all_autoscaling_instances([self.instance_id])
+ if len(asg_instances) > 1:
+ self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.")
+
+ if not asg_instances:
+ asg_elbs = []
+ else:
+ asg_name = asg_instances[0].group_name
+
+ asgs = asg.get_all_groups([asg_name])
+ if len(asg_instances) != 1:
+ self.module.fail_json(msg="Illegal state, expected one auto scaling group.")
+
+ asg_elbs = asgs[0].load_balancers
+
+ return asg_elbs
+
+ def _get_instance(self):
+ """Returns a boto.ec2.InstanceObject for self.instance_id"""
+ try:
+ ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ self.module.fail_json(msg=str(e))
+ return ec2.get_only_instances(instance_ids=[self.instance_id])[0]
+
+
+def main():
+ argument_spec = dict(
+ state={'required': True, 'choices': ['present', 'absent']},
+ instance_id={'required': True},
+ ec2_elbs={'default': None, 'required': False, 'type': 'list', 'elements': 'str'},
+ enable_availability_zone={'default': True, 'required': False, 'type': 'bool'},
+ wait={'required': False, 'default': True, 'type': 'bool'},
+ wait_timeout={'required': False, 'default': 0, 'type': 'int'},
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ check_boto3=False,
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if not region:
+ module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
+
+ ec2_elbs = module.params['ec2_elbs']
+ wait = module.params['wait']
+ enable_availability_zone = module.params['enable_availability_zone']
+ timeout = module.params['wait_timeout']
+
+ if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
+ module.fail_json(msg="ELBs are required for registration")
+
+ instance_id = module.params['instance_id']
+ elb_man = ElbManager(module, instance_id, ec2_elbs, region=region, **aws_connect_params)
+
+ if ec2_elbs is not None:
+ for elb in ec2_elbs:
+ if not elb_man.exists(elb):
+ msg = "ELB %s does not exist" % elb
+ module.fail_json(msg=msg)
+
+ if not module.check_mode:
+ if module.params['state'] == 'present':
+ elb_man.register(wait, enable_availability_zone, timeout)
+ elif module.params['state'] == 'absent':
+ elb_man.deregister(wait, timeout)
+
+ ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]}
+ ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts)
+
+ module.exit_json(**ec2_facts_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_network_lb.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_network_lb.py
new file mode 100644
index 00000000..5e34c527
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_network_lb.py
@@ -0,0 +1,468 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Rob White (@wimnat)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: elb_network_lb
+version_added: 1.0.0
+short_description: Manage a Network Load Balancer
+description:
+ - Manage an AWS Network Elastic Load Balancer. See
+ U(https://aws.amazon.com/blogs/aws/new-network-load-balancer-effortless-scaling-to-millions-of-requests-per-second/) for details.
+requirements: [ boto3 ]
+author: "Rob White (@wimnat)"
+options:
+ cross_zone_load_balancing:
+ description:
+ - Indicates whether cross-zone load balancing is enabled.
+ - Defaults to C(false).
+ type: bool
+ deletion_protection:
+ description:
+ - Indicates whether deletion protection for the ELB is enabled.
+ - Defaults to C(false).
+ type: bool
+ listeners:
+ description:
+ - A list of dicts containing listeners to attach to the ELB. See examples for detail of the dict required. Note that listener keys
+ are CamelCased.
+ type: list
+ elements: dict
+ suboptions:
+ Port:
+ description: The port on which the load balancer is listening.
+ type: int
+ required: true
+ Protocol:
+ description: The protocol for connections from clients to the load balancer.
+ type: str
+ required: true
+ Certificates:
+ description: The SSL server certificate.
+ type: list
+ elements: dict
+ suboptions:
+ CertificateArn:
+ description: The Amazon Resource Name (ARN) of the certificate.
+ type: str
+ SslPolicy:
+ description: The security policy that defines which ciphers and protocols are supported.
+ type: str
+ DefaultActions:
+ description: The default actions for the listener.
+ required: true
+ type: list
+ elements: dict
+ suboptions:
+ Type:
+ description: The type of action.
+ type: str
+ TargetGroupArn:
+ description: The Amazon Resource Name (ARN) of the target group.
+ type: str
+ name:
+ description:
+ - The name of the load balancer. This name must be unique within your AWS account, can have a maximum of 32 characters, must contain only alphanumeric
+ characters or hyphens, and must not begin or end with a hyphen.
+ required: true
+ type: str
+ purge_listeners:
+ description:
+ - If I(purge_listeners=true), existing listeners will be purged from the ELB to match exactly what is defined by I(listeners) parameter.
+ - If the I(listeners) parameter is not set then listeners will not be modified.
+ default: true
+ type: bool
+ purge_tags:
+ description:
+ - If I(purge_tags=true), existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter.
+ - If the I(tags) parameter is not set then tags will not be modified.
+ default: true
+ type: bool
+ subnet_mappings:
+ description:
+ - A list of dicts containing the IDs of the subnets to attach to the load balancer. You can also specify the allocation ID of an Elastic IP
+ to attach to the load balancer. You can specify one Elastic IP address per subnet.
+ - This parameter is mutually exclusive with I(subnets).
+ type: list
+ elements: dict
+ subnets:
+ description:
+ - A list of the IDs of the subnets to attach to the load balancer. You can specify only one subnet per Availability Zone. You must specify subnets from
+ at least two Availability Zones.
+ - Required when I(state=present).
+ - This parameter is mutually exclusive with I(subnet_mappings).
+ type: list
+ elements: str
+ scheme:
+ description:
+ - Internet-facing or internal load balancer. An ELB scheme can not be modified after creation.
+ default: internet-facing
+ choices: [ 'internet-facing', 'internal' ]
+ type: str
+ state:
+ description:
+ - Create or destroy the load balancer.
+ - The current default is C(absent). However, this behavior is inconsistent with other modules
+ and as such the default will change to C(present) in 2.14.
+ To maintain the existing behavior explicitly set I(state=absent).
+ choices: [ 'present', 'absent' ]
+ type: str
+ tags:
+ description:
+ - A dictionary of one or more tags to assign to the load balancer.
+ type: dict
+ wait:
+ description:
+ - Whether or not to wait for the network load balancer to reach the desired state.
+ type: bool
+ wait_timeout:
+ description:
+ - The duration in seconds to wait, used in conjunction with I(wait).
+ type: int
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+notes:
+ - Listeners are matched based on port. If a listener's port is changed then a new listener will be created.
+ - Listener rules are matched based on priority. If a rule's priority is changed then a new rule will be created.
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create an ELB and attach a listener
+ community.aws.elb_network_lb:
+ name: myelb
+ subnets:
+ - subnet-012345678
+ - subnet-abcdef000
+ listeners:
+ - Protocol: TCP # Required. The protocol for connections from clients to the load balancer (TCP, TLS, UDP or TCP_UDP) (case-sensitive).
+ Port: 80 # Required. The port on which the load balancer is listening.
+ DefaultActions:
+ - Type: forward # Required. Only 'forward' is accepted at this time
+ TargetGroupName: mytargetgroup # Required. The name of the target group
+ state: present
+
+- name: Create an ELB with an attached Elastic IP address
+ community.aws.elb_network_lb:
+ name: myelb
+ subnet_mappings:
+ - SubnetId: subnet-012345678
+ AllocationId: eipalloc-aabbccdd
+ listeners:
+ - Protocol: TCP # Required. The protocol for connections from clients to the load balancer (TCP, TLS, UDP or TCP_UDP) (case-sensitive).
+ Port: 80 # Required. The port on which the load balancer is listening.
+ DefaultActions:
+ - Type: forward # Required. Only 'forward' is accepted at this time
+ TargetGroupName: mytargetgroup # Required. The name of the target group
+ state: present
+
+- name: Remove an ELB
+ community.aws.elb_network_lb:
+ name: myelb
+ state: absent
+
+'''
+
+RETURN = r'''
+availability_zones:
+ description: The Availability Zones for the load balancer.
+ returned: when state is present
+ type: list
+ sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a', 'load_balancer_addresses': []}]"
+canonical_hosted_zone_id:
+ description: The ID of the Amazon Route 53 hosted zone associated with the load balancer.
+ returned: when state is present
+ type: str
+ sample: ABCDEF12345678
+created_time:
+ description: The date and time the load balancer was created.
+ returned: when state is present
+ type: str
+ sample: "2015-02-12T02:14:02+00:00"
+deletion_protection_enabled:
+ description: Indicates whether deletion protection is enabled.
+ returned: when state is present
+ type: str
+ sample: true
+dns_name:
+ description: The public DNS name of the load balancer.
+ returned: when state is present
+ type: str
+ sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com
+idle_timeout_timeout_seconds:
+ description: The idle timeout value, in seconds.
+ returned: when state is present
+ type: str
+ sample: 60
+ip_address_type:
+ description: The type of IP addresses used by the subnets for the load balancer.
+ returned: when state is present
+ type: str
+ sample: ipv4
+listeners:
+ description: Information about the listeners.
+ returned: when state is present
+ type: complex
+ contains:
+ listener_arn:
+ description: The Amazon Resource Name (ARN) of the listener.
+ returned: when state is present
+ type: str
+ sample: ""
+ load_balancer_arn:
+ description: The Amazon Resource Name (ARN) of the load balancer.
+ returned: when state is present
+ type: str
+ sample: ""
+ port:
+ description: The port on which the load balancer is listening.
+ returned: when state is present
+ type: int
+ sample: 80
+ protocol:
+ description: The protocol for connections from clients to the load balancer.
+ returned: when state is present
+ type: str
+ sample: HTTPS
+ certificates:
+ description: The SSL server certificate.
+ returned: when state is present
+ type: complex
+ contains:
+ certificate_arn:
+ description: The Amazon Resource Name (ARN) of the certificate.
+ returned: when state is present
+ type: str
+ sample: ""
+ ssl_policy:
+ description: The security policy that defines which ciphers and protocols are supported.
+ returned: when state is present
+ type: str
+ sample: ""
+ default_actions:
+ description: The default actions for the listener.
+ returned: when state is present
+ type: str
+ contains:
+ type:
+ description: The type of action.
+ returned: when state is present
+ type: str
+ sample: ""
+ target_group_arn:
+ description: The Amazon Resource Name (ARN) of the target group.
+ returned: when state is present
+ type: str
+ sample: ""
+load_balancer_arn:
+ description: The Amazon Resource Name (ARN) of the load balancer.
+ returned: when state is present
+ type: str
+ sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455
+load_balancer_name:
+ description: The name of the load balancer.
+ returned: when state is present
+ type: str
+ sample: my-elb
+load_balancing_cross_zone_enabled:
+ description: Indicates whether cross-zone load balancing is enabled.
+ returned: when state is present
+ type: str
+ sample: true
+scheme:
+ description: Internet-facing or internal load balancer.
+ returned: when state is present
+ type: str
+ sample: internal
+state:
+ description: The state of the load balancer.
+ returned: when state is present
+ type: dict
+ sample: "{'code': 'active'}"
+tags:
+ description: The tags attached to the load balancer.
+ returned: when state is present
+ type: dict
+ sample: "{
+ 'Tag': 'Example'
+ }"
+type:
+ description: The type of load balancer.
+ returned: when state is present
+ type: str
+ sample: network
+vpc_id:
+ description: The ID of the VPC for the load balancer.
+ returned: when state is present
+ type: str
+ sample: vpc-0011223344
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import NetworkLoadBalancer, ELBListeners, ELBListener
+
+
+def create_or_update_elb(elb_obj):
+ """Create ELB or modify main attributes. json_exit here"""
+
+ if elb_obj.elb:
+ # ELB exists so check subnets, security groups and tags match what has been passed
+
+ # Subnets
+ if not elb_obj.compare_subnets():
+ elb_obj.modify_subnets()
+
+ # Tags - only need to play with tags if tags parameter has been set to something
+ if elb_obj.tags is not None:
+
+ # Delete necessary tags
+ tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(elb_obj.elb['tags']),
+ boto3_tag_list_to_ansible_dict(elb_obj.tags), elb_obj.purge_tags)
+ if tags_to_delete:
+ elb_obj.delete_tags(tags_to_delete)
+
+ # Add/update tags
+ if tags_need_modify:
+ elb_obj.modify_tags()
+
+ else:
+ # Create load balancer
+ elb_obj.create_elb()
+
+ # ELB attributes
+ elb_obj.update_elb_attributes()
+ elb_obj.modify_elb_attributes()
+
+ # Listeners
+ listeners_obj = ELBListeners(elb_obj.connection, elb_obj.module, elb_obj.elb['LoadBalancerArn'])
+
+ listeners_to_add, listeners_to_modify, listeners_to_delete = listeners_obj.compare_listeners()
+
+ # Delete listeners
+ for listener_to_delete in listeners_to_delete:
+ listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_delete, elb_obj.elb['LoadBalancerArn'])
+ listener_obj.delete()
+ listeners_obj.changed = True
+
+ # Add listeners
+ for listener_to_add in listeners_to_add:
+ listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_add, elb_obj.elb['LoadBalancerArn'])
+ listener_obj.add()
+ listeners_obj.changed = True
+
+ # Modify listeners
+ for listener_to_modify in listeners_to_modify:
+ listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_modify, elb_obj.elb['LoadBalancerArn'])
+ listener_obj.modify()
+ listeners_obj.changed = True
+
+ # If listeners changed, mark ELB as changed
+ if listeners_obj.changed:
+ elb_obj.changed = True
+
+ # Get the ELB again
+ elb_obj.update()
+
+ # Get the ELB listeners again
+ listeners_obj.update()
+
+ # Update the ELB attributes
+ elb_obj.update_elb_attributes()
+
+ # Convert to snake_case and merge in everything we want to return to the user
+ snaked_elb = camel_dict_to_snake_dict(elb_obj.elb)
+ snaked_elb.update(camel_dict_to_snake_dict(elb_obj.elb_attributes))
+ snaked_elb['listeners'] = []
+ for listener in listeners_obj.current_listeners:
+ snaked_elb['listeners'].append(camel_dict_to_snake_dict(listener))
+
+ # Change tags to ansible friendly dict
+ snaked_elb['tags'] = boto3_tag_list_to_ansible_dict(snaked_elb['tags'])
+
+ elb_obj.module.exit_json(changed=elb_obj.changed, **snaked_elb)
+
+
+def delete_elb(elb_obj):
+
+ if elb_obj.elb:
+ elb_obj.delete()
+
+ elb_obj.module.exit_json(changed=elb_obj.changed)
+
+
+def main():
+
+ argument_spec = (
+ dict(
+ cross_zone_load_balancing=dict(type='bool'),
+ deletion_protection=dict(type='bool'),
+ listeners=dict(type='list',
+ elements='dict',
+ options=dict(
+ Protocol=dict(type='str', required=True),
+ Port=dict(type='int', required=True),
+ SslPolicy=dict(type='str'),
+ Certificates=dict(type='list', elements='dict'),
+ DefaultActions=dict(type='list', required=True, elements='dict')
+ )
+ ),
+ name=dict(required=True, type='str'),
+ purge_listeners=dict(default=True, type='bool'),
+ purge_tags=dict(default=True, type='bool'),
+ subnets=dict(type='list', elements='str'),
+ subnet_mappings=dict(type='list', elements='dict'),
+ scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']),
+ state=dict(choices=['present', 'absent'], type='str'),
+ tags=dict(type='dict'),
+ wait_timeout=dict(type='int'),
+ wait=dict(type='bool')
+ )
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ mutually_exclusive=[['subnets', 'subnet_mappings']])
+
+ # Check for subnets or subnet_mappings if state is present
+ state = module.params.get("state")
+ if state == 'present':
+ if module.params.get("subnets") is None and module.params.get("subnet_mappings") is None:
+ module.fail_json(msg="'subnets' or 'subnet_mappings' is required when state=present")
+
+ if state is None:
+ # See below, unless state==present we delete. Ouch.
+ module.deprecate('State currently defaults to absent. This is inconsistent with other modules'
+ ' and the default will be changed to `present` in Ansible 2.14',
+ date='2022-06-01', collection_name='community.aws')
+
+ # Quick check of listeners parameters
+ listeners = module.params.get("listeners")
+ if listeners is not None:
+ for listener in listeners:
+ for key in listener.keys():
+ protocols_list = ['TCP', 'TLS', 'UDP', 'TCP_UDP']
+ if key == 'Protocol' and listener[key] not in protocols_list:
+ module.fail_json(msg="'Protocol' must be either " + ", ".join(protocols_list))
+
+ connection = module.client('elbv2')
+ connection_ec2 = module.client('ec2')
+
+ elb = NetworkLoadBalancer(connection, connection_ec2, module)
+
+ if state == 'present':
+ create_or_update_elb(elb)
+ else:
+ delete_elb(elb)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_target.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_target.py
new file mode 100644
index 00000000..b8cda233
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_target.py
@@ -0,0 +1,353 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: elb_target
+version_added: 1.0.0
+short_description: Manage a target in a target group
+description:
+ - Used to register or deregister a target in a target group.
+author: "Rob White (@wimnat)"
+options:
+ deregister_unused:
+ description:
+ - The default behaviour for targets that are unused is to leave them registered.
+ - If instead you would like to remove them set I(deregister_unused=true).
+ default: false
+ type: bool
+ target_az:
+ description:
+ - An Availability Zone or C(all). This determines whether the target receives traffic from the load balancer nodes in the specified
+ Availability Zone or from all enabled Availability Zones for the load balancer. This parameter is not supported if the target
+ type of the target group is instance.
+ type: str
+ target_group_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the target group.
+ - Mutually exclusive of I(target_group_name).
+ type: str
+ target_group_name:
+ description:
+ - The name of the target group.
+ - Mutually exclusive of I(target_group_arn).
+ type: str
+ target_id:
+ description:
+ - The ID of the target.
+ required: true
+ type: str
+ target_port:
+ description:
+ - The port on which the target is listening. You can specify a port override. If a target is already registered,
+ you can register it again using a different port.
+ - The default port for a target is the port for the target group.
+ required: false
+ type: int
+ target_status:
+ description:
+ - Blocks and waits for the target status to equal given value. For more detail on target status see
+ U(https://docs.aws.amazon.com/elasticloadbalancing/latest/application/target-group-health-checks.html#target-health-states)
+ required: false
+ choices: [ 'initial', 'healthy', 'unhealthy', 'unused', 'draining', 'unavailable' ]
+ type: str
+ target_status_timeout:
+ description:
+ - Maximum time in seconds to wait for I(target_status) change.
+ required: false
+ default: 60
+ type: int
+ state:
+ description:
+ - Register or deregister the target.
+ required: true
+ choices: [ 'present', 'absent' ]
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+notes:
+ - If you specified a port override when you registered a target, you must specify both the target ID and the port when you deregister it.
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Register an IP address target to a target group
+ community.aws.elb_target:
+ target_group_name: myiptargetgroup
+ target_id: i-1234567
+ state: present
+
+- name: Register an instance target to a target group
+ community.aws.elb_target:
+ target_group_name: mytargetgroup
+ target_id: i-1234567
+ state: present
+
+- name: Deregister a target from a target group
+ community.aws.elb_target:
+ target_group_name: mytargetgroup
+ target_id: i-1234567
+ state: absent
+
+# Modify a target to use a different port
+- name: Register a target to a target group
+ community.aws.elb_target:
+ target_group_name: mytargetgroup
+ target_id: i-1234567
+ target_port: 8080
+ state: present
+
+'''
+
+RETURN = '''
+
+'''
+
+import traceback
+from time import time, sleep
+
+try:
+ import boto3
+ import botocore
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=['TargetGroupNotFound'])
+def describe_target_groups_with_backoff(connection, tg_name):
+ return connection.describe_target_groups(Names=[tg_name])
+
+
+def convert_tg_name_to_arn(connection, module, tg_name):
+
+ try:
+ response = describe_target_groups_with_backoff(connection, tg_name)
+ except ClientError as e:
+ module.fail_json(msg="Unable to describe target group {0}: {1}".format(tg_name, to_native(e)),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ except BotoCoreError as e:
+ module.fail_json(msg="Unable to describe target group {0}: {1}".format(tg_name, to_native(e)),
+ exception=traceback.format_exc())
+
+ tg_arn = response['TargetGroups'][0]['TargetGroupArn']
+
+ return tg_arn
+
+
+@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=['TargetGroupNotFound'])
+def describe_targets_with_backoff(connection, tg_arn, target):
+ if target is None:
+ tg = []
+ else:
+ tg = [target]
+
+ return connection.describe_target_health(TargetGroupArn=tg_arn, Targets=tg)
+
+
+def describe_targets(connection, module, tg_arn, target=None):
+
+ """
+ Describe targets in a target group
+
+ :param module: ansible module object
+ :param connection: boto3 connection
+ :param tg_arn: target group arn
+ :param target: dictionary containing target id and port
+ :return:
+ """
+
+ try:
+ targets = describe_targets_with_backoff(connection, tg_arn, target)['TargetHealthDescriptions']
+ if not targets:
+ return {}
+ return targets[0]
+ except ClientError as e:
+ module.fail_json(msg="Unable to describe target health for target {0}: {1}".format(target, to_native(e)),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ except BotoCoreError as e:
+ module.fail_json(msg="Unable to describe target health for target {0}: {1}".format(target, to_native(e)),
+ exception=traceback.format_exc())
+
+
+@AWSRetry.jittered_backoff(retries=10, delay=10)
+def register_target_with_backoff(connection, target_group_arn, target):
+ connection.register_targets(TargetGroupArn=target_group_arn, Targets=[target])
+
+
+def register_target(connection, module):
+
+ """
+ Registers a target to a target group
+
+ :param module: ansible module object
+ :param connection: boto3 connection
+ :return:
+ """
+
+ target_az = module.params.get("target_az")
+ target_group_arn = module.params.get("target_group_arn")
+ target_id = module.params.get("target_id")
+ target_port = module.params.get("target_port")
+ target_status = module.params.get("target_status")
+ target_status_timeout = module.params.get("target_status_timeout")
+ changed = False
+
+ if not target_group_arn:
+ target_group_arn = convert_tg_name_to_arn(connection, module, module.params.get("target_group_name"))
+
+ target = dict(Id=target_id)
+ if target_az:
+ target['AvailabilityZone'] = target_az
+ if target_port:
+ target['Port'] = target_port
+
+ target_description = describe_targets(connection, module, target_group_arn, target)
+
+ if 'Reason' in target_description['TargetHealth']:
+ if target_description['TargetHealth']['Reason'] == "Target.NotRegistered":
+ try:
+ register_target_with_backoff(connection, target_group_arn, target)
+ changed = True
+ if target_status:
+ target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout)
+ except ClientError as e:
+ module.fail_json(msg="Unable to deregister target {0}: {1}".format(target, to_native(e)),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ except BotoCoreError as e:
+ module.fail_json(msg="Unable to deregister target {0}: {1}".format(target, to_native(e)),
+ exception=traceback.format_exc())
+
+ # Get all targets for the target group
+ target_descriptions = describe_targets(connection, module, target_group_arn)
+
+ module.exit_json(changed=changed, target_health_descriptions=camel_dict_to_snake_dict(target_descriptions), target_group_arn=target_group_arn)
+
+
+@AWSRetry.jittered_backoff(retries=10, delay=10)
+def deregister_target_with_backoff(connection, target_group_arn, target):
+ connection.deregister_targets(TargetGroupArn=target_group_arn, Targets=[target])
+
+
+def deregister_target(connection, module):
+
+ """
+ Deregisters a target to a target group
+
+ :param module: ansible module object
+ :param connection: boto3 connection
+ :return:
+ """
+
+ deregister_unused = module.params.get("deregister_unused")
+ target_group_arn = module.params.get("target_group_arn")
+ target_id = module.params.get("target_id")
+ target_port = module.params.get("target_port")
+ target_status = module.params.get("target_status")
+ target_status_timeout = module.params.get("target_status_timeout")
+ changed = False
+
+ if not target_group_arn:
+ target_group_arn = convert_tg_name_to_arn(connection, module, module.params.get("target_group_name"))
+
+ target = dict(Id=target_id)
+ if target_port:
+ target['Port'] = target_port
+
+ target_description = describe_targets(connection, module, target_group_arn, target)
+ current_target_state = target_description['TargetHealth']['State']
+ current_target_reason = target_description['TargetHealth'].get('Reason')
+
+ needs_deregister = False
+
+ if deregister_unused and current_target_state == 'unused':
+ if current_target_reason != 'Target.NotRegistered':
+ needs_deregister = True
+ elif current_target_state not in ['unused', 'draining']:
+ needs_deregister = True
+
+ if needs_deregister:
+ try:
+ deregister_target_with_backoff(connection, target_group_arn, target)
+ changed = True
+ except ClientError as e:
+ module.fail_json(msg="Unable to deregister target {0}: {1}".format(target, to_native(e)),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ except BotoCoreError as e:
+ module.fail_json(msg="Unable to deregister target {0}: {1}".format(target, to_native(e)),
+ exception=traceback.format_exc())
+ else:
+ if current_target_reason != 'Target.NotRegistered' and current_target_state != 'draining':
+ module.warn(warning="Your specified target has an 'unused' state but is still registered to the target group. " +
+ "To force deregistration use the 'deregister_unused' option.")
+
+ if target_status:
+ target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout)
+
+ # Get all targets for the target group
+ target_descriptions = describe_targets(connection, module, target_group_arn)
+
+ module.exit_json(changed=changed, target_health_descriptions=camel_dict_to_snake_dict(target_descriptions), target_group_arn=target_group_arn)
+
+
+def target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout):
+ reached_state = False
+ timeout = target_status_timeout + time()
+ while time() < timeout:
+ health_state = describe_targets(connection, module, target_group_arn, target)['TargetHealth']['State']
+ if health_state == target_status:
+ reached_state = True
+ break
+ sleep(1)
+ if not reached_state:
+ module.fail_json(msg='Status check timeout of {0} exceeded, last status was {1}: '.format(target_status_timeout, health_state))
+
+
+def main():
+
+ argument_spec = dict(
+ deregister_unused=dict(type='bool', default=False),
+ target_az=dict(type='str'),
+ target_group_arn=dict(type='str'),
+ target_group_name=dict(type='str'),
+ target_id=dict(type='str', required=True),
+ target_port=dict(type='int'),
+ target_status=dict(choices=['initial', 'healthy', 'unhealthy', 'unused', 'draining', 'unavailable'], type='str'),
+ target_status_timeout=dict(type='int', default=60),
+ state=dict(required=True, choices=['present', 'absent'], type='str'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[['target_group_arn', 'target_group_name']],
+ )
+
+ try:
+ connection = module.client('elbv2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ register_target(connection, module)
+ else:
+ deregister_target(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_target_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_target_facts.py
new file mode 100644
index 00000000..92463233
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_target_facts.py
@@ -0,0 +1,436 @@
+#!/usr/bin/python
+# Copyright: (c) 2018, Yaakov Kuperman <ykuperman@gmail.com>
+# GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+
+DOCUMENTATION = '''
+---
+module: elb_target_info
+version_added: 1.0.0
+short_description: Gathers which target groups a target is associated with.
+description:
+ - This module will search through every target group in a region to find
+ which ones have registered a given instance ID or IP.
+ - This module was called C(elb_target_facts) before Ansible 2.9. The usage did not change.
+
+author: "Yaakov Kuperman (@yaakov-github)"
+options:
+ instance_id:
+ description:
+ - What instance ID to get information for.
+ type: str
+ required: true
+ get_unused_target_groups:
+ description:
+ - Whether or not to get target groups not used by any load balancers.
+ type: bool
+ default: true
+
+requirements:
+ - boto3
+ - botocore
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = """
+# practical use case - dynamically de-registering and re-registering nodes
+
+ - name: Get EC2 Metadata
+ amazon.aws.ec2_metadata_facts:
+
+ - name: Get initial list of target groups
+ delegate_to: localhost
+ community.aws.elb_target_info:
+ instance_id: "{{ ansible_ec2_instance_id }}"
+ region: "{{ ansible_ec2_placement_region }}"
+ register: target_info
+
+ - name: save fact for later
+ ansible.builtin.set_fact:
+ original_tgs: "{{ target_info.instance_target_groups }}"
+
+ - name: Deregister instance from all target groups
+ delegate_to: localhost
+ community.aws.elb_target:
+ target_group_arn: "{{ item.0.target_group_arn }}"
+ target_port: "{{ item.1.target_port }}"
+ target_az: "{{ item.1.target_az }}"
+ target_id: "{{ item.1.target_id }}"
+ state: absent
+ target_status: "draining"
+ region: "{{ ansible_ec2_placement_region }}"
+ with_subelements:
+ - "{{ original_tgs }}"
+ - "targets"
+
+ # This avoids having to wait for 'elb_target' to serially deregister each
+ # target group. An alternative would be to run all of the 'elb_target'
+ # tasks async and wait for them to finish.
+
+ - name: wait for all targets to deregister simultaneously
+ delegate_to: localhost
+ community.aws.elb_target_info:
+ get_unused_target_groups: false
+ instance_id: "{{ ansible_ec2_instance_id }}"
+ region: "{{ ansible_ec2_placement_region }}"
+ register: target_info
+ until: (target_info.instance_target_groups | length) == 0
+ retries: 60
+ delay: 10
+
+ - name: reregister in elbv2s
+ community.aws.elb_target:
+ region: "{{ ansible_ec2_placement_region }}"
+ target_group_arn: "{{ item.0.target_group_arn }}"
+ target_port: "{{ item.1.target_port }}"
+ target_az: "{{ item.1.target_az }}"
+ target_id: "{{ item.1.target_id }}"
+ state: present
+ target_status: "initial"
+ with_subelements:
+ - "{{ original_tgs }}"
+ - "targets"
+
+ # wait until all groups associated with this instance are 'healthy' or
+ # 'unused'
+ - name: wait for registration
+ community.aws.elb_target_info:
+ get_unused_target_groups: false
+ instance_id: "{{ ansible_ec2_instance_id }}"
+ region: "{{ ansible_ec2_placement_region }}"
+ register: target_info
+ until: (target_info.instance_target_groups |
+ map(attribute='targets') |
+ flatten |
+ map(attribute='target_health') |
+ rejectattr('state', 'equalto', 'healthy') |
+ rejectattr('state', 'equalto', 'unused') |
+ list |
+ length) == 0
+ retries: 61
+ delay: 10
+
+# using the target groups to generate AWS CLI commands to reregister the
+# instance - useful in case the playbook fails mid-run and manual
+# rollback is required
+ - name: "reregistration commands: ELBv2s"
+ ansible.builtin.debug:
+ msg: >
+ aws --region {{ansible_ec2_placement_region}} elbv2
+ register-targets --target-group-arn {{item.target_group_arn}}
+ --targets{%for target in item.targets%}
+ Id={{target.target_id}},
+ Port={{target.target_port}}{%if target.target_az%},AvailabilityZone={{target.target_az}}
+ {%endif%}
+ {%endfor%}
+ loop: "{{target_info.instance_target_groups}}"
+
+"""
+
+RETURN = """
+instance_target_groups:
+ description: a list of target groups to which the instance is registered to
+ returned: always
+ type: complex
+ contains:
+ target_group_arn:
+ description: The ARN of the target group
+ type: str
+ returned: always
+ sample:
+ - "arn:aws:elasticloadbalancing:eu-west-1:111111111111:targetgroup/target-group/deadbeefdeadbeef"
+ target_group_type:
+ description: Which target type is used for this group
+ returned: always
+ type: str
+ sample:
+ - ip
+ - instance
+ targets:
+ description: A list of targets that point to this instance ID
+ returned: always
+ type: complex
+ contains:
+ target_id:
+ description: the target ID referring to this instance
+ type: str
+ returned: always
+ sample:
+ - i-deadbeef
+ - 1.2.3.4
+ target_port:
+ description: which port this target is listening on
+ type: str
+ returned: always
+ sample:
+ - 80
+ target_az:
+ description: which availability zone is explicitly
+ associated with this target
+ type: str
+ returned: when an AZ is associated with this instance
+ sample:
+ - us-west-2a
+ target_health:
+ description:
+ - The target health description.
+ - See following link for all the possible values
+ U(https://boto3.readthedocs.io/en/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_target_health)
+ returned: always
+ type: complex
+ contains:
+ description:
+ description: description of target health
+ returned: if I(state!=present)
+ sample:
+ - "Target desregistration is in progress"
+ type: str
+ reason:
+ description: reason code for target health
+ returned: if I(state!=healthy)
+ sample:
+ - "Target.Deregistration in progress"
+ type: str
+ state:
+ description: health state
+ returned: always
+ sample:
+ - "healthy"
+ - "draining"
+ - "initial"
+ - "unhealthy"
+ - "unused"
+ - "unavailable"
+ type: str
+"""
+
+__metaclass__ = type
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ # we can handle the lack of boto3 based on the ec2 module
+ pass
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
+
+
+class Target(object):
+ """Models a target in a target group"""
+ def __init__(self, target_id, port, az, raw_target_health):
+ self.target_port = port
+ self.target_id = target_id
+ self.target_az = az
+ self.target_health = self.convert_target_health(raw_target_health)
+
+ def convert_target_health(self, raw_target_health):
+ return camel_dict_to_snake_dict(raw_target_health)
+
+
+class TargetGroup(object):
+ """Models an elbv2 target group"""
+
+ def __init__(self, **kwargs):
+ self.target_group_type = kwargs["target_group_type"]
+ self.target_group_arn = kwargs["target_group_arn"]
+ # the relevant targets associated with this group
+ self.targets = []
+
+ def add_target(self, target_id, target_port, target_az, raw_target_health):
+ self.targets.append(Target(target_id,
+ target_port,
+ target_az,
+ raw_target_health))
+
+ def to_dict(self):
+ object_dict = vars(self)
+ object_dict["targets"] = [vars(each) for each in self.get_targets()]
+ return object_dict
+
+ def get_targets(self):
+ return list(self.targets)
+
+
+class TargetInfoGatherer(object):
+
+ def __init__(self, module, instance_id, get_unused_target_groups):
+ self.module = module
+ try:
+ self.ec2 = self.module.client(
+ "ec2",
+ retry_decorator=AWSRetry.jittered_backoff(retries=10)
+ )
+ except (ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e,
+ msg="Couldn't connect to ec2"
+ )
+
+ try:
+ self.elbv2 = self.module.client(
+ "elbv2",
+ retry_decorator=AWSRetry.jittered_backoff(retries=10)
+ )
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e,
+ msg="Could not connect to elbv2"
+ )
+
+ self.instance_id = instance_id
+ self.get_unused_target_groups = get_unused_target_groups
+ self.tgs = self._get_target_groups()
+
+ def _get_instance_ips(self):
+ """Fetch all IPs associated with this instance so that we can determine
+ whether or not an instance is in an IP-based target group"""
+ try:
+ # get ahold of the instance in the API
+ reservations = self.ec2.describe_instances(
+ InstanceIds=[self.instance_id],
+ aws_retry=True
+ )["Reservations"]
+ except (BotoCoreError, ClientError) as e:
+ # typically this will happen if the instance doesn't exist
+ self.module.fail_json_aws(e,
+ msg="Could not get instance info" +
+ " for instance '%s'" %
+ (self.instance_id)
+ )
+
+ if len(reservations) < 1:
+ self.module.fail_json(
+ msg="Instance ID %s could not be found" % self.instance_id
+ )
+
+ instance = reservations[0]["Instances"][0]
+
+ # IPs are represented in a few places in the API, this should
+ # account for all of them
+ ips = set()
+ ips.add(instance["PrivateIpAddress"])
+ for nic in instance["NetworkInterfaces"]:
+ ips.add(nic["PrivateIpAddress"])
+ for ip in nic["PrivateIpAddresses"]:
+ ips.add(ip["PrivateIpAddress"])
+
+ return list(ips)
+
+ def _get_target_group_objects(self):
+ """helper function to build a list of TargetGroup objects based on
+ the AWS API"""
+ try:
+ paginator = self.elbv2.get_paginator(
+ "describe_target_groups"
+ )
+ tg_response = paginator.paginate().build_full_result()
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e,
+ msg="Could not describe target" +
+ " groups"
+ )
+
+ # build list of TargetGroup objects representing every target group in
+ # the system
+ target_groups = []
+ for each_tg in tg_response["TargetGroups"]:
+ if not self.get_unused_target_groups and \
+ len(each_tg["LoadBalancerArns"]) < 1:
+ # only collect target groups that actually are connected
+ # to LBs
+ continue
+
+ target_groups.append(
+ TargetGroup(target_group_arn=each_tg["TargetGroupArn"],
+ target_group_type=each_tg["TargetType"],
+ )
+ )
+ return target_groups
+
+ def _get_target_descriptions(self, target_groups):
+ """Helper function to build a list of all the target descriptions
+ for this target in a target group"""
+ # Build a list of all the target groups pointing to this instance
+ # based on the previous list
+ tgs = set()
+ # Loop through all the target groups
+ for tg in target_groups:
+ try:
+ # Get the list of targets for that target group
+ response = self.elbv2.describe_target_health(
+ TargetGroupArn=tg.target_group_arn,
+ aws_retry=True
+ )
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e,
+ msg="Could not describe target " +
+ "health for target group %s" %
+ tg.target_group_arn
+ )
+
+ for t in response["TargetHealthDescriptions"]:
+ # If the target group has this instance as a target, add to
+ # list. This logic also accounts for the possibility of a
+ # target being in the target group multiple times with
+ # overridden ports
+ if t["Target"]["Id"] == self.instance_id or \
+ t["Target"]["Id"] in self.instance_ips:
+
+ # The 'AvailabilityZone' parameter is a weird one, see the
+ # API docs for more. Basically it's only supposed to be
+ # there under very specific circumstances, so we need
+ # to account for that
+ az = t["Target"]["AvailabilityZone"] \
+ if "AvailabilityZone" in t["Target"] \
+ else None
+
+ tg.add_target(t["Target"]["Id"],
+ t["Target"]["Port"],
+ az,
+ t["TargetHealth"])
+ # since tgs is a set, each target group will be added only
+ # once, even though we call add on each successful match
+ tgs.add(tg)
+ return list(tgs)
+
+ def _get_target_groups(self):
+ # do this first since we need the IPs later on in this function
+ self.instance_ips = self._get_instance_ips()
+
+ # build list of target groups
+ target_groups = self._get_target_group_objects()
+ return self._get_target_descriptions(target_groups)
+
+
+def main():
+ argument_spec = dict(
+ instance_id={"required": True, "type": "str"},
+ get_unused_target_groups={"required": False,
+ "default": True, "type": "bool"}
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ if module._name == 'elb_target_facts':
+ module.deprecate("The 'elb_target_facts' module has been renamed to 'elb_target_info'", date='2021-12-01', collection_name='community.aws')
+
+ instance_id = module.params["instance_id"]
+ get_unused_target_groups = module.params["get_unused_target_groups"]
+
+ tg_gatherer = TargetInfoGatherer(module,
+ instance_id,
+ get_unused_target_groups
+ )
+
+ instance_target_groups = [each.to_dict() for each in tg_gatherer.tgs]
+
+ module.exit_json(instance_target_groups=instance_target_groups)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_target_group.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_target_group.py
new file mode 100644
index 00000000..e6c94f06
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_target_group.py
@@ -0,0 +1,865 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: elb_target_group
+version_added: 1.0.0
+short_description: Manage a target group for an Application or Network load balancer
+description:
+ - Manage an AWS Elastic Load Balancer target group. See
+ U(https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-target-groups.html) or
+ U(https://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-target-groups.html) for details.
+requirements: [ boto3 ]
+author: "Rob White (@wimnat)"
+options:
+ deregistration_delay_timeout:
+ description:
+ - The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused.
+ The range is 0-3600 seconds.
+ type: int
+ health_check_protocol:
+ description:
+ - The protocol the load balancer uses when performing health checks on targets.
+ required: false
+ choices: [ 'http', 'https', 'tcp', 'tls', 'udp', 'tcp_udp', 'HTTP', 'HTTPS', 'TCP', 'TLS', 'UDP', 'TCP_UDP']
+ type: str
+ health_check_port:
+ description:
+ - The port the load balancer uses when performing health checks on targets.
+ Can be set to 'traffic-port' to match target port.
+ - When not defined will default to the port on which each target receives traffic from the load balancer.
+ required: false
+ type: str
+ health_check_path:
+ description:
+ - The ping path that is the destination on the targets for health checks. The path must be defined in order to set a health check.
+ - Requires the I(health_check_protocol) parameter to be set.
+ required: false
+ type: str
+ health_check_interval:
+ description:
+ - The approximate amount of time, in seconds, between health checks of an individual target.
+ required: false
+ type: int
+ health_check_timeout:
+ description:
+ - The amount of time, in seconds, during which no response from a target means a failed health check.
+ required: false
+ type: int
+ healthy_threshold_count:
+ description:
+ - The number of consecutive health checks successes required before considering an unhealthy target healthy.
+ required: false
+ type: int
+ modify_targets:
+ description:
+ - Whether or not to alter existing targets in the group to match what is passed with the module
+ required: false
+ default: yes
+ type: bool
+ name:
+ description:
+ - The name of the target group.
+ required: true
+ type: str
+ port:
+ description:
+ - The port on which the targets receive traffic. This port is used unless you specify a port override when registering the target. Required if
+ I(state) is C(present).
+ required: false
+ type: int
+ protocol:
+ description:
+ - The protocol to use for routing traffic to the targets. Required when I(state) is C(present).
+ required: false
+ choices: [ 'http', 'https', 'tcp', 'tls', 'udp', 'tcp_udp', 'HTTP', 'HTTPS', 'TCP', 'TLS', 'UDP', 'TCP_UDP']
+ type: str
+ purge_tags:
+ description:
+ - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the tag parameter is not set then
+ tags will not be modified.
+ required: false
+ default: yes
+ type: bool
+ state:
+ description:
+ - Create or destroy the target group.
+ required: true
+ choices: [ 'present', 'absent' ]
+ type: str
+ stickiness_enabled:
+ description:
+ - Indicates whether sticky sessions are enabled.
+ type: bool
+ stickiness_lb_cookie_duration:
+ description:
+ - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load
+ balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds).
+ type: int
+ stickiness_type:
+ description:
+ - The type of sticky sessions.
+ - If not set AWS will default to C(lb_cookie) for Application Load Balancers or C(source_ip) for Network Load Balancers.
+ type: str
+ successful_response_codes:
+ description:
+ - The HTTP codes to use when checking for a successful response from a target.
+ - Accepts multiple values (for example, "200,202") or a range of values (for example, "200-299").
+ - Requires the I(health_check_protocol) parameter to be set.
+ required: false
+ type: str
+ tags:
+ description:
+ - A dictionary of one or more tags to assign to the target group.
+ required: false
+ type: dict
+ target_type:
+ description:
+ - The type of target that you must specify when registering targets with this target group. The possible values are
+ C(instance) (targets are specified by instance ID), C(ip) (targets are specified by IP address) or C(lambda) (target is specified by ARN).
+ Note that you can't specify targets for a target group using more than one type. Target type lambda only accept one target. When more than
+ one target is specified, only the first one is used. All additional targets are ignored.
+ If the target type is ip, specify IP addresses from the subnets of the virtual private cloud (VPC) for the target
+ group, the RFC 1918 range (10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16), and the RFC 6598 range (100.64.0.0/10).
+ You can't specify publicly routable IP addresses.
+ - The default behavior is C(instance).
+ required: false
+ choices: ['instance', 'ip', 'lambda']
+ type: str
+ targets:
+ description:
+ - A list of targets to assign to the target group. This parameter defaults to an empty list. Unless you set the 'modify_targets' parameter then
+ all existing targets will be removed from the group. The list should be an Id and a Port parameter. See the Examples for detail.
+ required: false
+ type: list
+ elements: dict
+ unhealthy_threshold_count:
+ description:
+ - The number of consecutive health check failures required before considering a target unhealthy.
+ required: false
+ type: int
+ vpc_id:
+ description:
+ - The identifier of the virtual private cloud (VPC). Required when I(state) is C(present).
+ required: false
+ type: str
+ wait:
+ description:
+ - Whether or not to wait for the target group.
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - The time to wait for the target group.
+ default: 200
+ type: int
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+notes:
+ - Once a target group has been created, only its health check can then be modified using subsequent calls
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create a target group with a default health check
+ community.aws.elb_target_group:
+ name: mytargetgroup
+ protocol: http
+ port: 80
+ vpc_id: vpc-01234567
+ state: present
+
+- name: Modify the target group with a custom health check
+ community.aws.elb_target_group:
+ name: mytargetgroup
+ protocol: http
+ port: 80
+ vpc_id: vpc-01234567
+ health_check_protocol: http
+ health_check_path: /health_check
+ health_check_port: 80
+ successful_response_codes: 200
+ health_check_interval: 15
+ health_check_timeout: 3
+ healthy_threshold_count: 4
+ unhealthy_threshold_count: 3
+ state: present
+
+- name: Delete a target group
+ community.aws.elb_target_group:
+ name: mytargetgroup
+ state: absent
+
+- name: Create a target group with instance targets
+ community.aws.elb_target_group:
+ name: mytargetgroup
+ protocol: http
+ port: 81
+ vpc_id: vpc-01234567
+ health_check_protocol: http
+ health_check_path: /
+ successful_response_codes: "200,250-260"
+ targets:
+ - Id: i-01234567
+ Port: 80
+ - Id: i-98765432
+ Port: 80
+ state: present
+ wait_timeout: 200
+ wait: True
+
+- name: Create a target group with IP address targets
+ community.aws.elb_target_group:
+ name: mytargetgroup
+ protocol: http
+ port: 81
+ vpc_id: vpc-01234567
+ health_check_protocol: http
+ health_check_path: /
+ successful_response_codes: "200,250-260"
+ target_type: ip
+ targets:
+ - Id: 10.0.0.10
+ Port: 80
+ AvailabilityZone: all
+ - Id: 10.0.0.20
+ Port: 80
+ state: present
+ wait_timeout: 200
+ wait: True
+
+# Using lambda as targets require that the target group
+# itself is allow to invoke the lambda function.
+# therefore you need first to create an empty target group
+# to receive its arn, second, allow the target group
+# to invoke the lambda function and third, add the target
+# to the target group
+- name: first, create empty target group
+ community.aws.elb_target_group:
+ name: my-lambda-targetgroup
+ target_type: lambda
+ state: present
+ modify_targets: False
+ register: out
+
+- name: second, allow invoke of the lambda
+ community.aws.lambda_policy:
+ state: "{{ state | default('present') }}"
+ function_name: my-lambda-function
+ statement_id: someID
+ action: lambda:InvokeFunction
+ principal: elasticloadbalancing.amazonaws.com
+ source_arn: "{{ out.target_group_arn }}"
+
+- name: third, add target
+ community.aws.elb_target_group:
+ name: my-lambda-targetgroup
+ target_type: lambda
+ state: present
+ targets:
+ - Id: arn:aws:lambda:eu-central-1:123456789012:function:my-lambda-function
+
+'''
+
+RETURN = r'''
+deregistration_delay_timeout_seconds:
+ description: The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused.
+ returned: when state present
+ type: int
+ sample: 300
+health_check_interval_seconds:
+ description: The approximate amount of time, in seconds, between health checks of an individual target.
+ returned: when state present
+ type: int
+ sample: 30
+health_check_path:
+ description: The destination for the health check request.
+ returned: when state present
+ type: str
+ sample: /index.html
+health_check_port:
+ description: The port to use to connect with the target.
+ returned: when state present
+ type: str
+ sample: traffic-port
+health_check_protocol:
+ description: The protocol to use to connect with the target.
+ returned: when state present
+ type: str
+ sample: HTTP
+health_check_timeout_seconds:
+ description: The amount of time, in seconds, during which no response means a failed health check.
+ returned: when state present
+ type: int
+ sample: 5
+healthy_threshold_count:
+ description: The number of consecutive health checks successes required before considering an unhealthy target healthy.
+ returned: when state present
+ type: int
+ sample: 5
+load_balancer_arns:
+ description: The Amazon Resource Names (ARN) of the load balancers that route traffic to this target group.
+ returned: when state present
+ type: list
+ sample: []
+matcher:
+ description: The HTTP codes to use when checking for a successful response from a target.
+ returned: when state present
+ type: dict
+ sample: {
+ "http_code": "200"
+ }
+port:
+ description: The port on which the targets are listening.
+ returned: when state present
+ type: int
+ sample: 80
+protocol:
+ description: The protocol to use for routing traffic to the targets.
+ returned: when state present
+ type: str
+ sample: HTTP
+stickiness_enabled:
+ description: Indicates whether sticky sessions are enabled.
+ returned: when state present
+ type: bool
+ sample: true
+stickiness_lb_cookie_duration_seconds:
+ description: The time period, in seconds, during which requests from a client should be routed to the same target.
+ returned: when state present
+ type: int
+ sample: 86400
+stickiness_type:
+ description: The type of sticky sessions.
+ returned: when state present
+ type: str
+ sample: lb_cookie
+tags:
+ description: The tags attached to the target group.
+ returned: when state present
+ type: dict
+ sample: "{
+ 'Tag': 'Example'
+ }"
+target_group_arn:
+ description: The Amazon Resource Name (ARN) of the target group.
+ returned: when state present
+ type: str
+ sample: "arn:aws:elasticloadbalancing:ap-southeast-2:01234567890:targetgroup/mytargetgroup/aabbccddee0044332211"
+target_group_name:
+ description: The name of the target group.
+ returned: when state present
+ type: str
+ sample: mytargetgroup
+unhealthy_threshold_count:
+ description: The number of consecutive health check failures required before considering the target unhealthy.
+ returned: when state present
+ type: int
+ sample: 2
+vpc_id:
+ description: The ID of the VPC for the targets.
+ returned: when state present
+ type: str
+ sample: vpc-0123456
+'''
+
+import time
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+
+
+def get_tg_attributes(connection, module, tg_arn):
+ try:
+ _attributes = connection.describe_target_group_attributes(TargetGroupArn=tg_arn, aws_retry=True)
+ tg_attributes = boto3_tag_list_to_ansible_dict(_attributes['Attributes'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get target group attributes")
+
+ # Replace '.' with '_' in attribute key names to make it more Ansibley
+ return dict((k.replace('.', '_'), v) for k, v in tg_attributes.items())
+
+
+def get_target_group_tags(connection, module, target_group_arn):
+ try:
+ _tags = connection.describe_tags(ResourceArns=[target_group_arn], aws_retry=True)
+ return _tags['TagDescriptions'][0]['Tags']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get target group tags")
+
+
+def get_target_group(connection, module, retry_missing=False):
+ extra_codes = ['TargetGroupNotFound'] if retry_missing else []
+ try:
+ target_group_paginator = connection.get_paginator('describe_target_groups').paginate(Names=[module.params.get("name")])
+ jittered_retry = AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=extra_codes)
+ result = jittered_retry(target_group_paginator.build_full_result)()
+ except is_boto3_error_code('TargetGroupNotFound'):
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't get target group")
+
+ return result['TargetGroups'][0]
+
+
+def wait_for_status(connection, module, target_group_arn, targets, status):
+ polling_increment_secs = 5
+ max_retries = (module.params.get('wait_timeout') // polling_increment_secs)
+ status_achieved = False
+
+ for x in range(0, max_retries):
+ try:
+ response = connection.describe_target_health(TargetGroupArn=target_group_arn, Targets=targets, aws_retry=True)
+ if response['TargetHealthDescriptions'][0]['TargetHealth']['State'] == status:
+ status_achieved = True
+ break
+ else:
+ time.sleep(polling_increment_secs)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't describe target health")
+
+ result = response
+ return status_achieved, result
+
+
+def fail_if_ip_target_type_not_supported(module):
+ if not module.botocore_at_least('1.7.2'):
+ module.fail_json(msg="target_type ip requires botocore version 1.7.2 or later. Version %s is installed" %
+ botocore.__version__)
+
+
+def create_or_update_target_group(connection, module):
+
+ changed = False
+ new_target_group = False
+ params = dict()
+ target_type = module.params.get("target_type")
+ params['Name'] = module.params.get("name")
+ params['TargetType'] = target_type
+ if target_type != "lambda":
+ params['Protocol'] = module.params.get("protocol").upper()
+ params['Port'] = module.params.get("port")
+ params['VpcId'] = module.params.get("vpc_id")
+ tags = module.params.get("tags")
+ purge_tags = module.params.get("purge_tags")
+ deregistration_delay_timeout = module.params.get("deregistration_delay_timeout")
+ stickiness_enabled = module.params.get("stickiness_enabled")
+ stickiness_lb_cookie_duration = module.params.get("stickiness_lb_cookie_duration")
+ stickiness_type = module.params.get("stickiness_type")
+
+ health_option_keys = [
+ "health_check_path", "health_check_protocol", "health_check_interval", "health_check_timeout",
+ "healthy_threshold_count", "unhealthy_threshold_count", "successful_response_codes"
+ ]
+ health_options = any([module.params[health_option_key] is not None for health_option_key in health_option_keys])
+
+ # Set health check if anything set
+ if health_options:
+
+ if module.params.get("health_check_protocol") is not None:
+ params['HealthCheckProtocol'] = module.params.get("health_check_protocol").upper()
+
+ if module.params.get("health_check_port") is not None:
+ params['HealthCheckPort'] = module.params.get("health_check_port")
+
+ if module.params.get("health_check_interval") is not None:
+ params['HealthCheckIntervalSeconds'] = module.params.get("health_check_interval")
+
+ if module.params.get("health_check_timeout") is not None:
+ params['HealthCheckTimeoutSeconds'] = module.params.get("health_check_timeout")
+
+ if module.params.get("healthy_threshold_count") is not None:
+ params['HealthyThresholdCount'] = module.params.get("healthy_threshold_count")
+
+ if module.params.get("unhealthy_threshold_count") is not None:
+ params['UnhealthyThresholdCount'] = module.params.get("unhealthy_threshold_count")
+
+ # Only need to check response code and path for http(s) health checks
+ protocol = module.params.get("health_check_protocol")
+ if protocol is not None and protocol.upper() in ['HTTP', 'HTTPS']:
+
+ if module.params.get("health_check_path") is not None:
+ params['HealthCheckPath'] = module.params.get("health_check_path")
+
+ if module.params.get("successful_response_codes") is not None:
+ params['Matcher'] = {}
+ params['Matcher']['HttpCode'] = module.params.get("successful_response_codes")
+
+ # Get target type
+ if target_type == 'ip':
+ fail_if_ip_target_type_not_supported(module)
+
+ # Get target group
+ tg = get_target_group(connection, module)
+
+ if tg:
+ diffs = [param for param in ('Port', 'Protocol', 'VpcId')
+ if tg.get(param) != params.get(param)]
+ if diffs:
+ module.fail_json(msg="Cannot modify %s parameter(s) for a target group" %
+ ", ".join(diffs))
+ # Target group exists so check health check parameters match what has been passed
+ health_check_params = dict()
+
+ # Modify health check if anything set
+ if health_options:
+
+ # Health check protocol
+ if 'HealthCheckProtocol' in params and tg['HealthCheckProtocol'] != params['HealthCheckProtocol']:
+ health_check_params['HealthCheckProtocol'] = params['HealthCheckProtocol']
+
+ # Health check port
+ if 'HealthCheckPort' in params and tg['HealthCheckPort'] != params['HealthCheckPort']:
+ health_check_params['HealthCheckPort'] = params['HealthCheckPort']
+
+ # Health check interval
+ if 'HealthCheckIntervalSeconds' in params and tg['HealthCheckIntervalSeconds'] != params['HealthCheckIntervalSeconds']:
+ health_check_params['HealthCheckIntervalSeconds'] = params['HealthCheckIntervalSeconds']
+
+ # Health check timeout
+ if 'HealthCheckTimeoutSeconds' in params and tg['HealthCheckTimeoutSeconds'] != params['HealthCheckTimeoutSeconds']:
+ health_check_params['HealthCheckTimeoutSeconds'] = params['HealthCheckTimeoutSeconds']
+
+ # Healthy threshold
+ if 'HealthyThresholdCount' in params and tg['HealthyThresholdCount'] != params['HealthyThresholdCount']:
+ health_check_params['HealthyThresholdCount'] = params['HealthyThresholdCount']
+
+ # Unhealthy threshold
+ if 'UnhealthyThresholdCount' in params and tg['UnhealthyThresholdCount'] != params['UnhealthyThresholdCount']:
+ health_check_params['UnhealthyThresholdCount'] = params['UnhealthyThresholdCount']
+
+ # Only need to check response code and path for http(s) health checks
+ if tg['HealthCheckProtocol'] in ['HTTP', 'HTTPS']:
+ # Health check path
+ if 'HealthCheckPath' in params and tg['HealthCheckPath'] != params['HealthCheckPath']:
+ health_check_params['HealthCheckPath'] = params['HealthCheckPath']
+
+ # Matcher (successful response codes)
+ # TODO: required and here?
+ if 'Matcher' in params:
+ current_matcher_list = tg['Matcher']['HttpCode'].split(',')
+ requested_matcher_list = params['Matcher']['HttpCode'].split(',')
+ if set(current_matcher_list) != set(requested_matcher_list):
+ health_check_params['Matcher'] = {}
+ health_check_params['Matcher']['HttpCode'] = ','.join(requested_matcher_list)
+
+ try:
+ if health_check_params:
+ connection.modify_target_group(TargetGroupArn=tg['TargetGroupArn'], aws_retry=True, **health_check_params)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't update target group")
+
+ # Do we need to modify targets?
+ if module.params.get("modify_targets"):
+ # get list of current target instances. I can't see anything like a describe targets in the doco so
+ # describe_target_health seems to be the only way to get them
+ try:
+ current_targets = connection.describe_target_health(
+ TargetGroupArn=tg['TargetGroupArn'], aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get target group health")
+
+ if module.params.get("targets"):
+
+ if target_type != "lambda":
+ params['Targets'] = module.params.get("targets")
+
+ # Correct type of target ports
+ for target in params['Targets']:
+ target['Port'] = int(target.get('Port', module.params.get('port')))
+
+ current_instance_ids = []
+
+ for instance in current_targets['TargetHealthDescriptions']:
+ current_instance_ids.append(instance['Target']['Id'])
+
+ new_instance_ids = []
+ for instance in params['Targets']:
+ new_instance_ids.append(instance['Id'])
+
+ add_instances = set(new_instance_ids) - set(current_instance_ids)
+
+ if add_instances:
+ instances_to_add = []
+ for target in params['Targets']:
+ if target['Id'] in add_instances:
+ instances_to_add.append({'Id': target['Id'], 'Port': target['Port']})
+
+ changed = True
+ try:
+ connection.register_targets(TargetGroupArn=tg['TargetGroupArn'], Targets=instances_to_add, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't register targets")
+
+ if module.params.get("wait"):
+ status_achieved, registered_instances = wait_for_status(connection, module, tg['TargetGroupArn'], instances_to_add, 'healthy')
+ if not status_achieved:
+ module.fail_json(msg='Error waiting for target registration to be healthy - please check the AWS console')
+
+ remove_instances = set(current_instance_ids) - set(new_instance_ids)
+
+ if remove_instances:
+ instances_to_remove = []
+ for target in current_targets['TargetHealthDescriptions']:
+ if target['Target']['Id'] in remove_instances:
+ instances_to_remove.append({'Id': target['Target']['Id'], 'Port': target['Target']['Port']})
+
+ changed = True
+ try:
+ connection.deregister_targets(TargetGroupArn=tg['TargetGroupArn'], Targets=instances_to_remove, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't remove targets")
+
+ if module.params.get("wait"):
+ status_achieved, registered_instances = wait_for_status(connection, module, tg['TargetGroupArn'], instances_to_remove, 'unused')
+ if not status_achieved:
+ module.fail_json(msg='Error waiting for target deregistration - please check the AWS console')
+
+ # register lambda target
+ else:
+ try:
+ changed = False
+ target = module.params.get("targets")[0]
+ if len(current_targets["TargetHealthDescriptions"]) == 0:
+ changed = True
+ else:
+ for item in current_targets["TargetHealthDescriptions"]:
+ if target["Id"] != item["Target"]["Id"]:
+ changed = True
+ break # only one target is possible with lambda
+
+ if changed:
+ if target.get("Id"):
+ response = connection.register_targets(
+ TargetGroupArn=tg['TargetGroupArn'],
+ Targets=[
+ {
+ "Id": target['Id']
+ }
+ ],
+ aws_retry=True
+ )
+
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(
+ e, msg="Couldn't register targets")
+ else:
+ if target_type != "lambda":
+
+ current_instances = current_targets['TargetHealthDescriptions']
+
+ if current_instances:
+ instances_to_remove = []
+ for target in current_targets['TargetHealthDescriptions']:
+ instances_to_remove.append({'Id': target['Target']['Id'], 'Port': target['Target']['Port']})
+
+ changed = True
+ try:
+ connection.deregister_targets(TargetGroupArn=tg['TargetGroupArn'], Targets=instances_to_remove, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't remove targets")
+
+ if module.params.get("wait"):
+ status_achieved, registered_instances = wait_for_status(connection, module, tg['TargetGroupArn'], instances_to_remove, 'unused')
+ if not status_achieved:
+ module.fail_json(msg='Error waiting for target deregistration - please check the AWS console')
+
+ # remove lambda targets
+ else:
+ changed = False
+ if current_targets["TargetHealthDescriptions"]:
+ changed = True
+ # only one target is possible with lambda
+ target_to_remove = current_targets["TargetHealthDescriptions"][0]["Target"]["Id"]
+ if changed:
+ connection.deregister_targets(
+ TargetGroupArn=tg['TargetGroupArn'], Targets=[{"Id": target_to_remove}], aws_retry=True)
+ else:
+ try:
+ connection.create_target_group(aws_retry=True, **params)
+ changed = True
+ new_target_group = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create target group")
+
+ tg = get_target_group(connection, module, retry_missing=True)
+
+ if module.params.get("targets"):
+ if target_type != "lambda":
+ params['Targets'] = module.params.get("targets")
+ try:
+ connection.register_targets(TargetGroupArn=tg['TargetGroupArn'], Targets=params['Targets'], aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't register targets")
+
+ if module.params.get("wait"):
+ status_achieved, registered_instances = wait_for_status(connection, module, tg['TargetGroupArn'], params['Targets'], 'healthy')
+ if not status_achieved:
+ module.fail_json(msg='Error waiting for target registration to be healthy - please check the AWS console')
+
+ else:
+ try:
+ target = module.params.get("targets")[0]
+ response = connection.register_targets(
+ TargetGroupArn=tg['TargetGroupArn'],
+ Targets=[
+ {
+ "Id": target["Id"]
+ }
+ ],
+ aws_retry=True
+ )
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(
+ e, msg="Couldn't register targets")
+
+ # Now set target group attributes
+ update_attributes = []
+
+ # Get current attributes
+ current_tg_attributes = get_tg_attributes(connection, module, tg['TargetGroupArn'])
+
+ if deregistration_delay_timeout is not None:
+ if str(deregistration_delay_timeout) != current_tg_attributes['deregistration_delay_timeout_seconds']:
+ update_attributes.append({'Key': 'deregistration_delay.timeout_seconds', 'Value': str(deregistration_delay_timeout)})
+ if stickiness_enabled is not None:
+ if stickiness_enabled and current_tg_attributes['stickiness_enabled'] != "true":
+ update_attributes.append({'Key': 'stickiness.enabled', 'Value': 'true'})
+ if stickiness_lb_cookie_duration is not None:
+ if str(stickiness_lb_cookie_duration) != current_tg_attributes['stickiness_lb_cookie_duration_seconds']:
+ update_attributes.append({'Key': 'stickiness.lb_cookie.duration_seconds', 'Value': str(stickiness_lb_cookie_duration)})
+ if stickiness_type is not None:
+ if stickiness_type != current_tg_attributes.get('stickiness_type'):
+ update_attributes.append({'Key': 'stickiness.type', 'Value': stickiness_type})
+
+ if update_attributes:
+ try:
+ connection.modify_target_group_attributes(TargetGroupArn=tg['TargetGroupArn'], Attributes=update_attributes, aws_retry=True)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ # Something went wrong setting attributes. If this target group was created during this task, delete it to leave a consistent state
+ if new_target_group:
+ connection.delete_target_group(TargetGroupArn=tg['TargetGroupArn'], aws_retry=True)
+ module.fail_json_aws(e, msg="Couldn't delete target group")
+
+ # Tags - only need to play with tags if tags parameter has been set to something
+ if tags:
+ # Get tags
+ current_tags = get_target_group_tags(connection, module, tg['TargetGroupArn'])
+
+ # Delete necessary tags
+ tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(current_tags), tags, purge_tags)
+ if tags_to_delete:
+ try:
+ connection.remove_tags(ResourceArns=[tg['TargetGroupArn']], TagKeys=tags_to_delete, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete tags from target group")
+ changed = True
+
+ # Add/update tags
+ if tags_need_modify:
+ try:
+ connection.add_tags(ResourceArns=[tg['TargetGroupArn']], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify), aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't add tags to target group")
+ changed = True
+
+ # Get the target group again
+ tg = get_target_group(connection, module)
+
+ # Get the target group attributes again
+ tg.update(get_tg_attributes(connection, module, tg['TargetGroupArn']))
+
+ # Convert tg to snake_case
+ snaked_tg = camel_dict_to_snake_dict(tg)
+
+ snaked_tg['tags'] = boto3_tag_list_to_ansible_dict(get_target_group_tags(connection, module, tg['TargetGroupArn']))
+
+ module.exit_json(changed=changed, **snaked_tg)
+
+
+def delete_target_group(connection, module):
+ changed = False
+ tg = get_target_group(connection, module)
+
+ if tg:
+ try:
+ connection.delete_target_group(TargetGroupArn=tg['TargetGroupArn'], aws_retry=True)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete target group")
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ protocols_list = ['http', 'https', 'tcp', 'tls', 'udp', 'tcp_udp', 'HTTP',
+ 'HTTPS', 'TCP', 'TLS', 'UDP', 'TCP_UDP']
+ argument_spec = dict(
+ deregistration_delay_timeout=dict(type='int'),
+ health_check_protocol=dict(choices=protocols_list),
+ health_check_port=dict(),
+ health_check_path=dict(),
+ health_check_interval=dict(type='int'),
+ health_check_timeout=dict(type='int'),
+ healthy_threshold_count=dict(type='int'),
+ modify_targets=dict(default=True, type='bool'),
+ name=dict(required=True),
+ port=dict(type='int'),
+ protocol=dict(choices=protocols_list),
+ purge_tags=dict(default=True, type='bool'),
+ stickiness_enabled=dict(type='bool'),
+ stickiness_type=dict(),
+ stickiness_lb_cookie_duration=dict(type='int'),
+ state=dict(required=True, choices=['present', 'absent']),
+ successful_response_codes=dict(),
+ tags=dict(default={}, type='dict'),
+ target_type=dict(choices=['instance', 'ip', 'lambda']),
+ targets=dict(type='list', elements='dict'),
+ unhealthy_threshold_count=dict(type='int'),
+ vpc_id=dict(),
+ wait_timeout=dict(type='int', default=200),
+ wait=dict(type='bool', default=False)
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[
+ ['target_type', 'instance', ['protocol', 'port', 'vpc_id']],
+ ['target_type', 'ip', ['protocol', 'port', 'vpc_id']],
+ ]
+ )
+
+ if module.params.get('target_type') is None:
+ module.params['target_type'] = 'instance'
+
+ connection = module.client('elbv2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+
+ if module.params.get('state') == 'present':
+ create_or_update_target_group(connection, module)
+ else:
+ delete_target_group(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_target_group_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_target_group_facts.py
new file mode 100644
index 00000000..a9694428
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_target_group_facts.py
@@ -0,0 +1,321 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: elb_target_group_info
+version_added: 1.0.0
+short_description: Gather information about ELB target groups in AWS
+description:
+ - Gather information about ELB target groups in AWS
+ - This module was called C(elb_target_group_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+author: Rob White (@wimnat)
+options:
+ load_balancer_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the load balancer.
+ required: false
+ type: str
+ target_group_arns:
+ description:
+ - The Amazon Resource Names (ARN) of the target groups.
+ required: false
+ type: list
+ elements: str
+ names:
+ description:
+ - The names of the target groups.
+ required: false
+ type: list
+ elements: str
+ collect_targets_health:
+ description:
+ - When set to "yes", output contains targets health description
+ required: false
+ default: no
+ type: bool
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all target groups
+ community.aws.elb_target_group_info:
+
+- name: Gather information about the target group attached to a particular ELB
+ community.aws.elb_target_group_info:
+ load_balancer_arn: "arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-elb/aabbccddeeff"
+
+- name: Gather information about a target groups named 'tg1' and 'tg2'
+ community.aws.elb_target_group_info:
+ names:
+ - tg1
+ - tg2
+
+'''
+
+RETURN = r'''
+target_groups:
+ description: a list of target groups
+ returned: always
+ type: complex
+ contains:
+ deregistration_delay_timeout_seconds:
+ description: The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused.
+ returned: always
+ type: int
+ sample: 300
+ health_check_interval_seconds:
+ description: The approximate amount of time, in seconds, between health checks of an individual target.
+ returned: always
+ type: int
+ sample: 30
+ health_check_path:
+ description: The destination for the health check request.
+ returned: always
+ type: str
+ sample: /index.html
+ health_check_port:
+ description: The port to use to connect with the target.
+ returned: always
+ type: str
+ sample: traffic-port
+ health_check_protocol:
+ description: The protocol to use to connect with the target.
+ returned: always
+ type: str
+ sample: HTTP
+ health_check_timeout_seconds:
+ description: The amount of time, in seconds, during which no response means a failed health check.
+ returned: always
+ type: int
+ sample: 5
+ healthy_threshold_count:
+ description: The number of consecutive health checks successes required before considering an unhealthy target healthy.
+ returned: always
+ type: int
+ sample: 5
+ load_balancer_arns:
+ description: The Amazon Resource Names (ARN) of the load balancers that route traffic to this target group.
+ returned: always
+ type: list
+ sample: []
+ matcher:
+ description: The HTTP codes to use when checking for a successful response from a target.
+ returned: always
+ type: dict
+ sample: {
+ "http_code": "200"
+ }
+ port:
+ description: The port on which the targets are listening.
+ returned: always
+ type: int
+ sample: 80
+ protocol:
+ description: The protocol to use for routing traffic to the targets.
+ returned: always
+ type: str
+ sample: HTTP
+ stickiness_enabled:
+ description: Indicates whether sticky sessions are enabled.
+ returned: always
+ type: bool
+ sample: true
+ stickiness_lb_cookie_duration_seconds:
+ description: Indicates whether sticky sessions are enabled.
+ returned: always
+ type: int
+ sample: 86400
+ stickiness_type:
+ description: The type of sticky sessions.
+ returned: always
+ type: str
+ sample: lb_cookie
+ tags:
+ description: The tags attached to the target group.
+ returned: always
+ type: dict
+ sample: "{
+ 'Tag': 'Example'
+ }"
+ target_group_arn:
+ description: The Amazon Resource Name (ARN) of the target group.
+ returned: always
+ type: str
+ sample: "arn:aws:elasticloadbalancing:ap-southeast-2:01234567890:targetgroup/mytargetgroup/aabbccddee0044332211"
+ targets_health_description:
+ description: Targets health description.
+ returned: when collect_targets_health is enabled
+ type: complex
+ contains:
+ health_check_port:
+ description: The port to check target health.
+ returned: always
+ type: str
+ sample: '80'
+ target:
+ description: The target metadata.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description: The ID of the target.
+ returned: always
+ type: str
+ sample: i-0123456789
+ port:
+ description: The port to use to connect with the target.
+ returned: always
+ type: int
+ sample: 80
+ target_health:
+ description: The target health status.
+ returned: always
+ type: complex
+ contains:
+ state:
+ description: The state of the target health.
+ returned: always
+ type: str
+ sample: healthy
+ target_group_name:
+ description: The name of the target group.
+ returned: always
+ type: str
+ sample: mytargetgroup
+ unhealthy_threshold_count:
+ description: The number of consecutive health check failures required before considering the target unhealthy.
+ returned: always
+ type: int
+ sample: 2
+ vpc_id:
+ description: The ID of the VPC for the targets.
+ returned: always
+ type: str
+ sample: vpc-0123456
+'''
+
+import traceback
+
+try:
+ import boto3
+ import botocore
+ from botocore.exceptions import ClientError, NoCredentialsError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def get_target_group_attributes(connection, module, target_group_arn):
+
+ try:
+ target_group_attributes = boto3_tag_list_to_ansible_dict(connection.describe_target_group_attributes(TargetGroupArn=target_group_arn)['Attributes'])
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to describe target group attributes")
+
+ # Replace '.' with '_' in attribute key names to make it more Ansibley
+ return dict((k.replace('.', '_'), v)
+ for (k, v) in target_group_attributes.items())
+
+
+def get_target_group_tags(connection, module, target_group_arn):
+
+ try:
+ return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags'])
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to describe group tags")
+
+
+def get_target_group_targets_health(connection, module, target_group_arn):
+
+ try:
+ return connection.describe_target_health(TargetGroupArn=target_group_arn)['TargetHealthDescriptions']
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to get target health")
+
+
+def list_target_groups(connection, module):
+
+ load_balancer_arn = module.params.get("load_balancer_arn")
+ target_group_arns = module.params.get("target_group_arns")
+ names = module.params.get("names")
+ collect_targets_health = module.params.get("collect_targets_health")
+
+ try:
+ target_group_paginator = connection.get_paginator('describe_target_groups')
+ if not load_balancer_arn and not target_group_arns and not names:
+ target_groups = target_group_paginator.paginate().build_full_result()
+ if load_balancer_arn:
+ target_groups = target_group_paginator.paginate(LoadBalancerArn=load_balancer_arn).build_full_result()
+ if target_group_arns:
+ target_groups = target_group_paginator.paginate(TargetGroupArns=target_group_arns).build_full_result()
+ if names:
+ target_groups = target_group_paginator.paginate(Names=names).build_full_result()
+ except is_boto3_error_code('TargetGroupNotFound'):
+ module.exit_json(target_groups=[])
+ except ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to list target groups")
+ except NoCredentialsError as e:
+ module.fail_json(msg="AWS authentication problem. " + to_native(e), exception=traceback.format_exc())
+
+ # Get the attributes and tags for each target group
+ for target_group in target_groups['TargetGroups']:
+ target_group.update(get_target_group_attributes(connection, module, target_group['TargetGroupArn']))
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_target_groups = [camel_dict_to_snake_dict(target_group) for target_group in target_groups['TargetGroups']]
+
+ # Get tags for each target group
+ for snaked_target_group in snaked_target_groups:
+ snaked_target_group['tags'] = get_target_group_tags(connection, module, snaked_target_group['target_group_arn'])
+ if collect_targets_health:
+ snaked_target_group['targets_health_description'] = [camel_dict_to_snake_dict(
+ target) for target in get_target_group_targets_health(connection, module, snaked_target_group['target_group_arn'])]
+
+ module.exit_json(target_groups=snaked_target_groups)
+
+
+def main():
+
+ argument_spec = dict(
+ load_balancer_arn=dict(type='str'),
+ target_group_arns=dict(type='list', elements='str'),
+ names=dict(type='list', elements='str'),
+ collect_targets_health=dict(default=False, type='bool', required=False),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[['load_balancer_arn', 'target_group_arns', 'names']],
+ supports_check_mode=True,
+ )
+ if module._name == 'elb_target_group_facts':
+ module.deprecate("The 'elb_target_group_facts' module has been renamed to 'elb_target_group_info'", date='2021-12-01', collection_name='community.aws')
+
+ try:
+ connection = module.client('elbv2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ list_target_groups(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_target_group_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_target_group_info.py
new file mode 100644
index 00000000..a9694428
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_target_group_info.py
@@ -0,0 +1,321 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: elb_target_group_info
+version_added: 1.0.0
+short_description: Gather information about ELB target groups in AWS
+description:
+ - Gather information about ELB target groups in AWS
+ - This module was called C(elb_target_group_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+author: Rob White (@wimnat)
+options:
+ load_balancer_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the load balancer.
+ required: false
+ type: str
+ target_group_arns:
+ description:
+ - The Amazon Resource Names (ARN) of the target groups.
+ required: false
+ type: list
+ elements: str
+ names:
+ description:
+ - The names of the target groups.
+ required: false
+ type: list
+ elements: str
+ collect_targets_health:
+ description:
+ - When set to "yes", output contains targets health description
+ required: false
+ default: no
+ type: bool
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all target groups
+ community.aws.elb_target_group_info:
+
+- name: Gather information about the target group attached to a particular ELB
+ community.aws.elb_target_group_info:
+ load_balancer_arn: "arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-elb/aabbccddeeff"
+
+- name: Gather information about a target groups named 'tg1' and 'tg2'
+ community.aws.elb_target_group_info:
+ names:
+ - tg1
+ - tg2
+
+'''
+
+RETURN = r'''
+target_groups:
+ description: a list of target groups
+ returned: always
+ type: complex
+ contains:
+ deregistration_delay_timeout_seconds:
+ description: The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused.
+ returned: always
+ type: int
+ sample: 300
+ health_check_interval_seconds:
+ description: The approximate amount of time, in seconds, between health checks of an individual target.
+ returned: always
+ type: int
+ sample: 30
+ health_check_path:
+ description: The destination for the health check request.
+ returned: always
+ type: str
+ sample: /index.html
+ health_check_port:
+ description: The port to use to connect with the target.
+ returned: always
+ type: str
+ sample: traffic-port
+ health_check_protocol:
+ description: The protocol to use to connect with the target.
+ returned: always
+ type: str
+ sample: HTTP
+ health_check_timeout_seconds:
+ description: The amount of time, in seconds, during which no response means a failed health check.
+ returned: always
+ type: int
+ sample: 5
+ healthy_threshold_count:
+ description: The number of consecutive health checks successes required before considering an unhealthy target healthy.
+ returned: always
+ type: int
+ sample: 5
+ load_balancer_arns:
+ description: The Amazon Resource Names (ARN) of the load balancers that route traffic to this target group.
+ returned: always
+ type: list
+ sample: []
+ matcher:
+ description: The HTTP codes to use when checking for a successful response from a target.
+ returned: always
+ type: dict
+ sample: {
+ "http_code": "200"
+ }
+ port:
+ description: The port on which the targets are listening.
+ returned: always
+ type: int
+ sample: 80
+ protocol:
+ description: The protocol to use for routing traffic to the targets.
+ returned: always
+ type: str
+ sample: HTTP
+ stickiness_enabled:
+ description: Indicates whether sticky sessions are enabled.
+ returned: always
+ type: bool
+ sample: true
+ stickiness_lb_cookie_duration_seconds:
+ description: Indicates whether sticky sessions are enabled.
+ returned: always
+ type: int
+ sample: 86400
+ stickiness_type:
+ description: The type of sticky sessions.
+ returned: always
+ type: str
+ sample: lb_cookie
+ tags:
+ description: The tags attached to the target group.
+ returned: always
+ type: dict
+ sample: "{
+ 'Tag': 'Example'
+ }"
+ target_group_arn:
+ description: The Amazon Resource Name (ARN) of the target group.
+ returned: always
+ type: str
+ sample: "arn:aws:elasticloadbalancing:ap-southeast-2:01234567890:targetgroup/mytargetgroup/aabbccddee0044332211"
+ targets_health_description:
+ description: Targets health description.
+ returned: when collect_targets_health is enabled
+ type: complex
+ contains:
+ health_check_port:
+ description: The port to check target health.
+ returned: always
+ type: str
+ sample: '80'
+ target:
+ description: The target metadata.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description: The ID of the target.
+ returned: always
+ type: str
+ sample: i-0123456789
+ port:
+ description: The port to use to connect with the target.
+ returned: always
+ type: int
+ sample: 80
+ target_health:
+ description: The target health status.
+ returned: always
+ type: complex
+ contains:
+ state:
+ description: The state of the target health.
+ returned: always
+ type: str
+ sample: healthy
+ target_group_name:
+ description: The name of the target group.
+ returned: always
+ type: str
+ sample: mytargetgroup
+ unhealthy_threshold_count:
+ description: The number of consecutive health check failures required before considering the target unhealthy.
+ returned: always
+ type: int
+ sample: 2
+ vpc_id:
+ description: The ID of the VPC for the targets.
+ returned: always
+ type: str
+ sample: vpc-0123456
+'''
+
+import traceback
+
+try:
+ import boto3
+ import botocore
+ from botocore.exceptions import ClientError, NoCredentialsError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def get_target_group_attributes(connection, module, target_group_arn):
+
+ try:
+ target_group_attributes = boto3_tag_list_to_ansible_dict(connection.describe_target_group_attributes(TargetGroupArn=target_group_arn)['Attributes'])
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to describe target group attributes")
+
+ # Replace '.' with '_' in attribute key names to make it more Ansibley
+ return dict((k.replace('.', '_'), v)
+ for (k, v) in target_group_attributes.items())
+
+
+def get_target_group_tags(connection, module, target_group_arn):
+
+ try:
+ return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags'])
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to describe group tags")
+
+
+def get_target_group_targets_health(connection, module, target_group_arn):
+
+ try:
+ return connection.describe_target_health(TargetGroupArn=target_group_arn)['TargetHealthDescriptions']
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to get target health")
+
+
+def list_target_groups(connection, module):
+
+ load_balancer_arn = module.params.get("load_balancer_arn")
+ target_group_arns = module.params.get("target_group_arns")
+ names = module.params.get("names")
+ collect_targets_health = module.params.get("collect_targets_health")
+
+ try:
+ target_group_paginator = connection.get_paginator('describe_target_groups')
+ if not load_balancer_arn and not target_group_arns and not names:
+ target_groups = target_group_paginator.paginate().build_full_result()
+ if load_balancer_arn:
+ target_groups = target_group_paginator.paginate(LoadBalancerArn=load_balancer_arn).build_full_result()
+ if target_group_arns:
+ target_groups = target_group_paginator.paginate(TargetGroupArns=target_group_arns).build_full_result()
+ if names:
+ target_groups = target_group_paginator.paginate(Names=names).build_full_result()
+ except is_boto3_error_code('TargetGroupNotFound'):
+ module.exit_json(target_groups=[])
+ except ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to list target groups")
+ except NoCredentialsError as e:
+ module.fail_json(msg="AWS authentication problem. " + to_native(e), exception=traceback.format_exc())
+
+ # Get the attributes and tags for each target group
+ for target_group in target_groups['TargetGroups']:
+ target_group.update(get_target_group_attributes(connection, module, target_group['TargetGroupArn']))
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_target_groups = [camel_dict_to_snake_dict(target_group) for target_group in target_groups['TargetGroups']]
+
+ # Get tags for each target group
+ for snaked_target_group in snaked_target_groups:
+ snaked_target_group['tags'] = get_target_group_tags(connection, module, snaked_target_group['target_group_arn'])
+ if collect_targets_health:
+ snaked_target_group['targets_health_description'] = [camel_dict_to_snake_dict(
+ target) for target in get_target_group_targets_health(connection, module, snaked_target_group['target_group_arn'])]
+
+ module.exit_json(target_groups=snaked_target_groups)
+
+
+def main():
+
+ argument_spec = dict(
+ load_balancer_arn=dict(type='str'),
+ target_group_arns=dict(type='list', elements='str'),
+ names=dict(type='list', elements='str'),
+ collect_targets_health=dict(default=False, type='bool', required=False),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[['load_balancer_arn', 'target_group_arns', 'names']],
+ supports_check_mode=True,
+ )
+ if module._name == 'elb_target_group_facts':
+ module.deprecate("The 'elb_target_group_facts' module has been renamed to 'elb_target_group_info'", date='2021-12-01', collection_name='community.aws')
+
+ try:
+ connection = module.client('elbv2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ list_target_groups(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_target_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_target_info.py
new file mode 100644
index 00000000..92463233
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/elb_target_info.py
@@ -0,0 +1,436 @@
+#!/usr/bin/python
+# Copyright: (c) 2018, Yaakov Kuperman <ykuperman@gmail.com>
+# GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+
+DOCUMENTATION = '''
+---
+module: elb_target_info
+version_added: 1.0.0
+short_description: Gathers which target groups a target is associated with.
+description:
+ - This module will search through every target group in a region to find
+ which ones have registered a given instance ID or IP.
+ - This module was called C(elb_target_facts) before Ansible 2.9. The usage did not change.
+
+author: "Yaakov Kuperman (@yaakov-github)"
+options:
+ instance_id:
+ description:
+ - What instance ID to get information for.
+ type: str
+ required: true
+ get_unused_target_groups:
+ description:
+ - Whether or not to get target groups not used by any load balancers.
+ type: bool
+ default: true
+
+requirements:
+ - boto3
+ - botocore
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = """
+# practical use case - dynamically de-registering and re-registering nodes
+
+ - name: Get EC2 Metadata
+ amazon.aws.ec2_metadata_facts:
+
+ - name: Get initial list of target groups
+ delegate_to: localhost
+ community.aws.elb_target_info:
+ instance_id: "{{ ansible_ec2_instance_id }}"
+ region: "{{ ansible_ec2_placement_region }}"
+ register: target_info
+
+ - name: save fact for later
+ ansible.builtin.set_fact:
+ original_tgs: "{{ target_info.instance_target_groups }}"
+
+ - name: Deregister instance from all target groups
+ delegate_to: localhost
+ community.aws.elb_target:
+ target_group_arn: "{{ item.0.target_group_arn }}"
+ target_port: "{{ item.1.target_port }}"
+ target_az: "{{ item.1.target_az }}"
+ target_id: "{{ item.1.target_id }}"
+ state: absent
+ target_status: "draining"
+ region: "{{ ansible_ec2_placement_region }}"
+ with_subelements:
+ - "{{ original_tgs }}"
+ - "targets"
+
+ # This avoids having to wait for 'elb_target' to serially deregister each
+ # target group. An alternative would be to run all of the 'elb_target'
+ # tasks async and wait for them to finish.
+
+ - name: wait for all targets to deregister simultaneously
+ delegate_to: localhost
+ community.aws.elb_target_info:
+ get_unused_target_groups: false
+ instance_id: "{{ ansible_ec2_instance_id }}"
+ region: "{{ ansible_ec2_placement_region }}"
+ register: target_info
+ until: (target_info.instance_target_groups | length) == 0
+ retries: 60
+ delay: 10
+
+ - name: reregister in elbv2s
+ community.aws.elb_target:
+ region: "{{ ansible_ec2_placement_region }}"
+ target_group_arn: "{{ item.0.target_group_arn }}"
+ target_port: "{{ item.1.target_port }}"
+ target_az: "{{ item.1.target_az }}"
+ target_id: "{{ item.1.target_id }}"
+ state: present
+ target_status: "initial"
+ with_subelements:
+ - "{{ original_tgs }}"
+ - "targets"
+
+ # wait until all groups associated with this instance are 'healthy' or
+ # 'unused'
+ - name: wait for registration
+ community.aws.elb_target_info:
+ get_unused_target_groups: false
+ instance_id: "{{ ansible_ec2_instance_id }}"
+ region: "{{ ansible_ec2_placement_region }}"
+ register: target_info
+ until: (target_info.instance_target_groups |
+ map(attribute='targets') |
+ flatten |
+ map(attribute='target_health') |
+ rejectattr('state', 'equalto', 'healthy') |
+ rejectattr('state', 'equalto', 'unused') |
+ list |
+ length) == 0
+ retries: 61
+ delay: 10
+
+# using the target groups to generate AWS CLI commands to reregister the
+# instance - useful in case the playbook fails mid-run and manual
+# rollback is required
+ - name: "reregistration commands: ELBv2s"
+ ansible.builtin.debug:
+ msg: >
+ aws --region {{ansible_ec2_placement_region}} elbv2
+ register-targets --target-group-arn {{item.target_group_arn}}
+ --targets{%for target in item.targets%}
+ Id={{target.target_id}},
+ Port={{target.target_port}}{%if target.target_az%},AvailabilityZone={{target.target_az}}
+ {%endif%}
+ {%endfor%}
+ loop: "{{target_info.instance_target_groups}}"
+
+"""
+
+RETURN = """
+instance_target_groups:
+ description: a list of target groups to which the instance is registered to
+ returned: always
+ type: complex
+ contains:
+ target_group_arn:
+ description: The ARN of the target group
+ type: str
+ returned: always
+ sample:
+ - "arn:aws:elasticloadbalancing:eu-west-1:111111111111:targetgroup/target-group/deadbeefdeadbeef"
+ target_group_type:
+ description: Which target type is used for this group
+ returned: always
+ type: str
+ sample:
+ - ip
+ - instance
+ targets:
+ description: A list of targets that point to this instance ID
+ returned: always
+ type: complex
+ contains:
+ target_id:
+ description: the target ID referring to this instance
+ type: str
+ returned: always
+ sample:
+ - i-deadbeef
+ - 1.2.3.4
+ target_port:
+ description: which port this target is listening on
+ type: str
+ returned: always
+ sample:
+ - 80
+ target_az:
+ description: which availability zone is explicitly
+ associated with this target
+ type: str
+ returned: when an AZ is associated with this instance
+ sample:
+ - us-west-2a
+ target_health:
+ description:
+ - The target health description.
+ - See following link for all the possible values
+ U(https://boto3.readthedocs.io/en/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_target_health)
+ returned: always
+ type: complex
+ contains:
+ description:
+ description: description of target health
+ returned: if I(state!=present)
+ sample:
+ - "Target desregistration is in progress"
+ type: str
+ reason:
+ description: reason code for target health
+ returned: if I(state!=healthy)
+ sample:
+ - "Target.Deregistration in progress"
+ type: str
+ state:
+ description: health state
+ returned: always
+ sample:
+ - "healthy"
+ - "draining"
+ - "initial"
+ - "unhealthy"
+ - "unused"
+ - "unavailable"
+ type: str
+"""
+
+__metaclass__ = type
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ # we can handle the lack of boto3 based on the ec2 module
+ pass
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
+
+
+class Target(object):
+ """Models a target in a target group"""
+ def __init__(self, target_id, port, az, raw_target_health):
+ self.target_port = port
+ self.target_id = target_id
+ self.target_az = az
+ self.target_health = self.convert_target_health(raw_target_health)
+
+ def convert_target_health(self, raw_target_health):
+ return camel_dict_to_snake_dict(raw_target_health)
+
+
+class TargetGroup(object):
+ """Models an elbv2 target group"""
+
+ def __init__(self, **kwargs):
+ self.target_group_type = kwargs["target_group_type"]
+ self.target_group_arn = kwargs["target_group_arn"]
+ # the relevant targets associated with this group
+ self.targets = []
+
+ def add_target(self, target_id, target_port, target_az, raw_target_health):
+ self.targets.append(Target(target_id,
+ target_port,
+ target_az,
+ raw_target_health))
+
+ def to_dict(self):
+ object_dict = vars(self)
+ object_dict["targets"] = [vars(each) for each in self.get_targets()]
+ return object_dict
+
+ def get_targets(self):
+ return list(self.targets)
+
+
+class TargetInfoGatherer(object):
+
+ def __init__(self, module, instance_id, get_unused_target_groups):
+ self.module = module
+ try:
+ self.ec2 = self.module.client(
+ "ec2",
+ retry_decorator=AWSRetry.jittered_backoff(retries=10)
+ )
+ except (ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e,
+ msg="Couldn't connect to ec2"
+ )
+
+ try:
+ self.elbv2 = self.module.client(
+ "elbv2",
+ retry_decorator=AWSRetry.jittered_backoff(retries=10)
+ )
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e,
+ msg="Could not connect to elbv2"
+ )
+
+ self.instance_id = instance_id
+ self.get_unused_target_groups = get_unused_target_groups
+ self.tgs = self._get_target_groups()
+
+ def _get_instance_ips(self):
+ """Fetch all IPs associated with this instance so that we can determine
+ whether or not an instance is in an IP-based target group"""
+ try:
+ # get ahold of the instance in the API
+ reservations = self.ec2.describe_instances(
+ InstanceIds=[self.instance_id],
+ aws_retry=True
+ )["Reservations"]
+ except (BotoCoreError, ClientError) as e:
+ # typically this will happen if the instance doesn't exist
+ self.module.fail_json_aws(e,
+ msg="Could not get instance info" +
+ " for instance '%s'" %
+ (self.instance_id)
+ )
+
+ if len(reservations) < 1:
+ self.module.fail_json(
+ msg="Instance ID %s could not be found" % self.instance_id
+ )
+
+ instance = reservations[0]["Instances"][0]
+
+ # IPs are represented in a few places in the API, this should
+ # account for all of them
+ ips = set()
+ ips.add(instance["PrivateIpAddress"])
+ for nic in instance["NetworkInterfaces"]:
+ ips.add(nic["PrivateIpAddress"])
+ for ip in nic["PrivateIpAddresses"]:
+ ips.add(ip["PrivateIpAddress"])
+
+ return list(ips)
+
+ def _get_target_group_objects(self):
+ """helper function to build a list of TargetGroup objects based on
+ the AWS API"""
+ try:
+ paginator = self.elbv2.get_paginator(
+ "describe_target_groups"
+ )
+ tg_response = paginator.paginate().build_full_result()
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e,
+ msg="Could not describe target" +
+ " groups"
+ )
+
+ # build list of TargetGroup objects representing every target group in
+ # the system
+ target_groups = []
+ for each_tg in tg_response["TargetGroups"]:
+ if not self.get_unused_target_groups and \
+ len(each_tg["LoadBalancerArns"]) < 1:
+ # only collect target groups that actually are connected
+ # to LBs
+ continue
+
+ target_groups.append(
+ TargetGroup(target_group_arn=each_tg["TargetGroupArn"],
+ target_group_type=each_tg["TargetType"],
+ )
+ )
+ return target_groups
+
+ def _get_target_descriptions(self, target_groups):
+ """Helper function to build a list of all the target descriptions
+ for this target in a target group"""
+ # Build a list of all the target groups pointing to this instance
+ # based on the previous list
+ tgs = set()
+ # Loop through all the target groups
+ for tg in target_groups:
+ try:
+ # Get the list of targets for that target group
+ response = self.elbv2.describe_target_health(
+ TargetGroupArn=tg.target_group_arn,
+ aws_retry=True
+ )
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e,
+ msg="Could not describe target " +
+ "health for target group %s" %
+ tg.target_group_arn
+ )
+
+ for t in response["TargetHealthDescriptions"]:
+ # If the target group has this instance as a target, add to
+ # list. This logic also accounts for the possibility of a
+ # target being in the target group multiple times with
+ # overridden ports
+ if t["Target"]["Id"] == self.instance_id or \
+ t["Target"]["Id"] in self.instance_ips:
+
+ # The 'AvailabilityZone' parameter is a weird one, see the
+ # API docs for more. Basically it's only supposed to be
+ # there under very specific circumstances, so we need
+ # to account for that
+ az = t["Target"]["AvailabilityZone"] \
+ if "AvailabilityZone" in t["Target"] \
+ else None
+
+ tg.add_target(t["Target"]["Id"],
+ t["Target"]["Port"],
+ az,
+ t["TargetHealth"])
+ # since tgs is a set, each target group will be added only
+ # once, even though we call add on each successful match
+ tgs.add(tg)
+ return list(tgs)
+
+ def _get_target_groups(self):
+ # do this first since we need the IPs later on in this function
+ self.instance_ips = self._get_instance_ips()
+
+ # build list of target groups
+ target_groups = self._get_target_group_objects()
+ return self._get_target_descriptions(target_groups)
+
+
+def main():
+ argument_spec = dict(
+ instance_id={"required": True, "type": "str"},
+ get_unused_target_groups={"required": False,
+ "default": True, "type": "bool"}
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ if module._name == 'elb_target_facts':
+ module.deprecate("The 'elb_target_facts' module has been renamed to 'elb_target_info'", date='2021-12-01', collection_name='community.aws')
+
+ instance_id = module.params["instance_id"]
+ get_unused_target_groups = module.params["get_unused_target_groups"]
+
+ tg_gatherer = TargetInfoGatherer(module,
+ instance_id,
+ get_unused_target_groups
+ )
+
+ instance_target_groups = [each.to_dict() for each in tg_gatherer.tgs]
+
+ module.exit_json(instance_target_groups=instance_target_groups)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/execute_lambda.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/execute_lambda.py
new file mode 100644
index 00000000..ca97f661
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/execute_lambda.py
@@ -0,0 +1,268 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: execute_lambda
+version_added: 1.0.0
+short_description: Execute an AWS Lambda function
+description:
+ - This module executes AWS Lambda functions, allowing synchronous and asynchronous
+ invocation.
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+author: "Ryan Scott Brown (@ryansb) <ryansb@redhat.com>"
+requirements:
+ - python >= 2.6
+ - boto3
+notes:
+ - Async invocation will always return an empty C(output) key.
+ - Synchronous invocation may result in a function timeout, resulting in an
+ empty C(output) key.
+options:
+ name:
+ description:
+ - The name of the function to be invoked. This can only be used for
+ invocations within the calling account. To invoke a function in another
+ account, use I(function_arn) to specify the full ARN.
+ type: str
+ function_arn:
+ description:
+ - The name of the function to be invoked
+ type: str
+ tail_log:
+ description:
+ - If I(tail_log=yes), the result of the task will include the last 4 KB
+ of the CloudWatch log for the function execution. Log tailing only
+ works if you use synchronous invocation I(wait=yes). This is usually
+ used for development or testing Lambdas.
+ type: bool
+ default: false
+ wait:
+ description:
+ - Whether to wait for the function results or not. If I(wait=no)
+ the task will not return any results. To wait for the Lambda function
+ to complete, set I(wait=yes) and the result will be available in the
+ I(output) key.
+ type: bool
+ default: true
+ dry_run:
+ description:
+ - Do not *actually* invoke the function. A C(DryRun) call will check that
+ the caller has permissions to call the function, especially for
+ checking cross-account permissions.
+ type: bool
+ default: false
+ version_qualifier:
+ description:
+ - Which version/alias of the function to run. This defaults to the
+ C(LATEST) revision, but can be set to any existing version or alias.
+ See U(https://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html)
+ for details.
+ type: str
+ payload:
+ description:
+ - A dictionary in any form to be provided as input to the Lambda function.
+ default: {}
+ type: dict
+'''
+
+EXAMPLES = '''
+- community.aws.execute_lambda:
+ name: test-function
+ # the payload is automatically serialized and sent to the function
+ payload:
+ foo: bar
+ value: 8
+ register: response
+
+# Test that you have sufficient permissions to execute a Lambda function in
+# another account
+- community.aws.execute_lambda:
+ function_arn: arn:aws:lambda:us-east-1:123456789012:function/some-function
+ dry_run: true
+
+- community.aws.execute_lambda:
+ name: test-function
+ payload:
+ foo: bar
+ value: 8
+ wait: true
+ tail_log: true
+ register: response
+ # the response will have a `logs` key that will contain a log (up to 4KB) of the function execution in Lambda
+
+# Pass the Lambda event payload as a json file.
+- community.aws.execute_lambda:
+ name: test-function
+ payload: "{{ lookup('file','lambda_event.json') }}"
+ register: response
+
+- community.aws.execute_lambda:
+ name: test-function
+ version_qualifier: PRODUCTION
+'''
+
+RETURN = '''
+output:
+ description: Function output if wait=true and the function returns a value
+ returned: success
+ type: dict
+ sample: "{ 'output': 'something' }"
+logs:
+ description: The last 4KB of the function logs. Only provided if I(tail_log) is true
+ type: str
+ returned: if I(tail_log) == true
+status:
+ description: C(StatusCode) of API call exit (200 for synchronous invokes, 202 for async)
+ type: int
+ sample: 200
+ returned: always
+'''
+
+import base64
+import json
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+def main():
+ argument_spec = dict(
+ name=dict(),
+ function_arn=dict(),
+ wait=dict(default=True, type='bool'),
+ tail_log=dict(default=False, type='bool'),
+ dry_run=dict(default=False, type='bool'),
+ version_qualifier=dict(),
+ payload=dict(default={}, type='dict'),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['name', 'function_arn'],
+ ]
+ )
+
+ name = module.params.get('name')
+ function_arn = module.params.get('function_arn')
+ await_return = module.params.get('wait')
+ dry_run = module.params.get('dry_run')
+ tail_log = module.params.get('tail_log')
+ version_qualifier = module.params.get('version_qualifier')
+ payload = module.params.get('payload')
+
+ if not (name or function_arn):
+ module.fail_json(msg="Must provide either a function_arn or a name to invoke.")
+
+ try:
+ client = module.client('lambda')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ invoke_params = {}
+
+ if await_return:
+ # await response
+ invoke_params['InvocationType'] = 'RequestResponse'
+ else:
+ # fire and forget
+ invoke_params['InvocationType'] = 'Event'
+ if dry_run or module.check_mode:
+ # dry_run overrides invocation type
+ invoke_params['InvocationType'] = 'DryRun'
+
+ if tail_log and await_return:
+ invoke_params['LogType'] = 'Tail'
+ elif tail_log and not await_return:
+ module.fail_json(msg="The `tail_log` parameter is only available if "
+ "the invocation waits for the function to complete. "
+ "Set `wait` to true or turn off `tail_log`.")
+ else:
+ invoke_params['LogType'] = 'None'
+
+ if version_qualifier:
+ invoke_params['Qualifier'] = version_qualifier
+
+ if payload:
+ invoke_params['Payload'] = json.dumps(payload)
+
+ if function_arn:
+ invoke_params['FunctionName'] = function_arn
+ elif name:
+ invoke_params['FunctionName'] = name
+
+ try:
+ response = client.invoke(**invoke_params)
+ except botocore.exceptions.ClientError as ce:
+ if ce.response['Error']['Code'] == 'ResourceNotFoundException':
+ module.fail_json(msg="Could not find Lambda to execute. Make sure "
+ "the ARN is correct and your profile has "
+ "permissions to execute this function.",
+ exception=traceback.format_exc())
+ module.fail_json(msg="Client-side error when invoking Lambda, check inputs and specific error",
+ exception=traceback.format_exc())
+ except botocore.exceptions.ParamValidationError as ve:
+ module.fail_json(msg="Parameters to `invoke` failed to validate",
+ exception=traceback.format_exc())
+ except Exception as e:
+ module.fail_json(msg="Unexpected failure while invoking Lambda function",
+ exception=traceback.format_exc())
+
+ results = {
+ 'logs': '',
+ 'status': response['StatusCode'],
+ 'output': '',
+ }
+
+ if response.get('LogResult'):
+ try:
+ # logs are base64 encoded in the API response
+ results['logs'] = base64.b64decode(response.get('LogResult', ''))
+ except Exception as e:
+ module.fail_json(msg="Failed while decoding logs", exception=traceback.format_exc())
+
+ if invoke_params['InvocationType'] == 'RequestResponse':
+ try:
+ results['output'] = json.loads(response['Payload'].read().decode('utf8'))
+ except Exception as e:
+ module.fail_json(msg="Failed while decoding function return value", exception=traceback.format_exc())
+
+ if isinstance(results.get('output'), dict) and any(
+ [results['output'].get('stackTrace'), results['output'].get('errorMessage')]):
+ # AWS sends back stack traces and error messages when a function failed
+ # in a RequestResponse (synchronous) context.
+ template = ("Function executed, but there was an error in the Lambda function. "
+ "Message: {errmsg}, Type: {type}, Stack Trace: {trace}")
+ error_data = {
+ # format the stacktrace sent back as an array into a multiline string
+ 'trace': '\n'.join(
+ [' '.join([
+ str(x) for x in line # cast line numbers to strings
+ ]) for line in results.get('output', {}).get('stackTrace', [])]
+ ),
+ 'errmsg': results['output'].get('errorMessage'),
+ 'type': results['output'].get('errorType')
+ }
+ module.fail_json(msg=template.format(**error_data), result=results)
+
+ module.exit_json(changed=True, result=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam.py
new file mode 100644
index 00000000..823bfb89
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam.py
@@ -0,0 +1,869 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: iam
+version_added: 1.0.0
+short_description: Manage IAM users, groups, roles and keys
+description:
+ - Allows for the management of IAM users, user API keys, groups, roles.
+options:
+ iam_type:
+ description:
+ - Type of IAM resource.
+ choices: ["user", "group", "role"]
+ type: str
+ required: true
+ name:
+ description:
+ - Name of IAM resource to create or identify.
+ required: true
+ type: str
+ new_name:
+ description:
+ - When I(state=update), will replace I(name) with I(new_name) on IAM resource.
+ type: str
+ new_path:
+ description:
+ - When I(state=update), will replace the path with new_path on the IAM resource.
+ type: str
+ state:
+ description:
+ - Whether to create, delete or update the IAM resource. Note, roles cannot be updated.
+ required: true
+ choices: [ "present", "absent", "update" ]
+ type: str
+ path:
+ description:
+ - When creating or updating, specify the desired path of the resource.
+ - If I(state=present), it will replace the current path to match what is passed in when they do not match.
+ default: "/"
+ type: str
+ trust_policy:
+ description:
+ - The inline (JSON or YAML) trust policy document that grants an entity permission to assume the role.
+ - Mutually exclusive with I(trust_policy_filepath).
+ type: dict
+ trust_policy_filepath:
+ description:
+ - The path to the trust policy document that grants an entity permission to assume the role.
+ - Mutually exclusive with I(trust_policy).
+ type: str
+ access_key_state:
+ description:
+ - When type is user, it creates, removes, deactivates or activates a user's access key(s). Note that actions apply only to keys specified.
+ choices: [ "create", "remove", "active", "inactive", "Create", "Remove", "Active", "Inactive"]
+ type: str
+ key_count:
+ description:
+ - When I(access_key_state=create) it will ensure this quantity of keys are present.
+ default: 1
+ type: int
+ access_key_ids:
+ description:
+ - A list of the keys that you want affected by the I(access_key_state) parameter.
+ type: list
+ elements: str
+ groups:
+ description:
+ - A list of groups the user should belong to. When I(state=update), will gracefully remove groups not listed.
+ type: list
+ elements: str
+ password:
+ description:
+ - When I(type=user) and either I(state=present) or I(state=update), define the users login password.
+ - Note that this will always return 'changed'.
+ type: str
+ update_password:
+ default: always
+ choices: ['always', 'on_create']
+ description:
+ - When to update user passwords.
+ - I(update_password=always) will ensure the password is set to I(password).
+ - I(update_password=on_create) will only set the password for newly created users.
+ type: str
+notes:
+ - 'Currently boto does not support the removal of Managed Policies, the module will error out if your
+ user/group/role has managed policies when you try to do state=absent. They will need to be removed manually.'
+author:
+ - "Jonathan I. Davila (@defionscode)"
+ - "Paul Seiffert (@seiffert)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Basic user creation example
+- name: Create two new IAM users with API keys
+ community.aws.iam:
+ iam_type: user
+ name: "{{ item }}"
+ state: present
+ password: "{{ temp_pass }}"
+ access_key_state: create
+ loop:
+ - jcleese
+ - mpython
+
+# Advanced example, create two new groups and add the pre-existing user
+# jdavila to both groups.
+- name: Create Two Groups, Mario and Luigi
+ community.aws.iam:
+ iam_type: group
+ name: "{{ item }}"
+ state: present
+ loop:
+ - Mario
+ - Luigi
+ register: new_groups
+
+- name: Update user
+ community.aws.iam:
+ iam_type: user
+ name: jdavila
+ state: update
+ groups: "{{ item.created_group.group_name }}"
+ loop: "{{ new_groups.results }}"
+
+# Example of role with custom trust policy for Lambda service
+- name: Create IAM role with custom trust relationship
+ community.aws.iam:
+ iam_type: role
+ name: AAALambdaTestRole
+ state: present
+ trust_policy:
+ Version: '2012-10-17'
+ Statement:
+ - Action: sts:AssumeRole
+ Effect: Allow
+ Principal:
+ Service: lambda.amazonaws.com
+
+'''
+RETURN = r'''
+role_result:
+ description: the IAM.role dict returned by Boto
+ type: str
+ returned: if iam_type=role and state=present
+ sample: {
+ "arn": "arn:aws:iam::A1B2C3D4E5F6:role/my-new-role",
+ "assume_role_policy_document": "...truncated...",
+ "create_date": "2017-09-02T14:32:23Z",
+ "path": "/",
+ "role_id": "AROAA1B2C3D4E5F6G7H8I",
+ "role_name": "my-new-role"
+ }
+roles:
+ description: a list containing the name of the currently defined roles
+ type: list
+ returned: if iam_type=role and state=present
+ sample: [
+ "my-new-role",
+ "my-existing-role-1",
+ "my-existing-role-2",
+ "my-existing-role-3",
+ "my-existing-role-...",
+ ]
+'''
+
+import json
+import traceback
+
+try:
+ import boto.exception
+ import boto.iam
+ import boto.iam.connection
+except ImportError:
+ pass # Taken care of by ec2.HAS_BOTO
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto_exception
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+
+
+def _paginate(func, attr):
+ '''
+ paginates the results from func by continuously passing in
+ the returned marker if the results were truncated. this returns
+ an iterator over the items in the returned response. `attr` is
+ the name of the attribute to iterate over in the response.
+ '''
+ finished, marker = False, None
+ while not finished:
+ res = func(marker=marker)
+ for item in getattr(res, attr):
+ yield item
+
+ finished = res.is_truncated == 'false'
+ if not finished:
+ marker = res.marker
+
+
+def list_all_groups(iam):
+ return [item['group_name'] for item in _paginate(iam.get_all_groups, 'groups')]
+
+
+def list_all_users(iam):
+ return [item['user_name'] for item in _paginate(iam.get_all_users, 'users')]
+
+
+def list_all_roles(iam):
+ return [item['role_name'] for item in _paginate(iam.list_roles, 'roles')]
+
+
+def list_all_instance_profiles(iam):
+ return [item['instance_profile_name'] for item in _paginate(iam.list_instance_profiles, 'instance_profiles')]
+
+
+def create_user(module, iam, name, pwd, path, key_state, key_count):
+ key_qty = 0
+ keys = []
+ try:
+ user_meta = iam.create_user(
+ name, path).create_user_response.create_user_result.user
+ changed = True
+ if pwd is not None:
+ pwd = iam.create_login_profile(name, pwd)
+ if key_state in ['create']:
+ if key_count:
+ while key_count > key_qty:
+ keys.append(iam.create_access_key(
+ user_name=name).create_access_key_response.
+ create_access_key_result.
+ access_key)
+ key_qty += 1
+ else:
+ keys = None
+ except boto.exception.BotoServerError as err:
+ module.fail_json(changed=False, msg=str(err))
+ else:
+ user_info = dict(created_user=user_meta, password=pwd, access_keys=keys)
+ return (user_info, changed)
+
+
+def delete_dependencies_first(module, iam, name):
+ changed = False
+ # try to delete any keys
+ try:
+ current_keys = [ck['access_key_id'] for ck in
+ iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
+ for key in current_keys:
+ iam.delete_access_key(key, name)
+ changed = True
+ except boto.exception.BotoServerError as err:
+ module.fail_json(changed=changed, msg="Failed to delete keys: %s" % err, exception=traceback.format_exc())
+
+ # try to delete login profiles
+ try:
+ login_profile = iam.get_login_profiles(name).get_login_profile_response
+ iam.delete_login_profile(name)
+ changed = True
+ except boto.exception.BotoServerError as err:
+ error_msg = boto_exception(err)
+ if 'Login Profile for User ' + name + ' cannot be found.' not in error_msg:
+ module.fail_json(changed=changed, msg="Failed to delete login profile: %s" % err, exception=traceback.format_exc())
+
+ # try to detach policies
+ try:
+ for policy in iam.get_all_user_policies(name).list_user_policies_result.policy_names:
+ iam.delete_user_policy(name, policy)
+ changed = True
+ except boto.exception.BotoServerError as err:
+ error_msg = boto_exception(err)
+ if 'must detach all policies first' in error_msg:
+ module.fail_json(changed=changed, msg="All inline policies have been removed. Though it appears"
+ "that %s has Managed Polices. This is not "
+ "currently supported by boto. Please detach the policies "
+ "through the console and try again." % name)
+ module.fail_json(changed=changed, msg="Failed to delete policies: %s" % err, exception=traceback.format_exc())
+
+ # try to deactivate associated MFA devices
+ try:
+ mfa_devices = iam.get_all_mfa_devices(name).get('list_mfa_devices_response', {}).get('list_mfa_devices_result', {}).get('mfa_devices', [])
+ for device in mfa_devices:
+ iam.deactivate_mfa_device(name, device['serial_number'])
+ changed = True
+ except boto.exception.BotoServerError as err:
+ module.fail_json(changed=changed, msg="Failed to deactivate associated MFA devices: %s" % err, exception=traceback.format_exc())
+
+ return changed
+
+
+def delete_user(module, iam, name):
+ changed = delete_dependencies_first(module, iam, name)
+ try:
+ iam.delete_user(name)
+ except boto.exception.BotoServerError as ex:
+ module.fail_json(changed=changed, msg="Failed to delete user %s: %s" % (name, ex), exception=traceback.format_exc())
+ else:
+ changed = True
+ return name, changed
+
+
+def update_user(module, iam, name, new_name, new_path, key_state, key_count, keys, pwd, updated):
+ changed = False
+ name_change = False
+ if updated and new_name:
+ name = new_name
+ try:
+ current_keys = [ck['access_key_id'] for ck in
+ iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
+ status = [ck['status'] for ck in
+ iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
+ key_qty = len(current_keys)
+ except boto.exception.BotoServerError as err:
+ error_msg = boto_exception(err)
+ if 'cannot be found' in error_msg and updated:
+ current_keys = [ck['access_key_id'] for ck in
+ iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata]
+ status = [ck['status'] for ck in
+ iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata]
+ name = new_name
+ else:
+ module.fail_json(changed=False, msg=str(err))
+
+ updated_key_list = {}
+
+ if new_name or new_path:
+ c_path = iam.get_user(name).get_user_result.user['path']
+ if (name != new_name) or (c_path != new_path):
+ changed = True
+ try:
+ if not updated:
+ user = iam.update_user(
+ name, new_user_name=new_name, new_path=new_path).update_user_response.response_metadata
+ else:
+ user = iam.update_user(
+ name, new_path=new_path).update_user_response.response_metadata
+ user['updates'] = dict(
+ old_username=name, new_username=new_name, old_path=c_path, new_path=new_path)
+ except boto.exception.BotoServerError as err:
+ error_msg = boto_exception(err)
+ module.fail_json(changed=False, msg=str(err))
+ else:
+ if not updated:
+ name_change = True
+
+ if pwd:
+ try:
+ iam.update_login_profile(name, pwd)
+ changed = True
+ except boto.exception.BotoServerError:
+ try:
+ iam.create_login_profile(name, pwd)
+ changed = True
+ except boto.exception.BotoServerError as err:
+ error_msg = boto_exception(str(err))
+ if 'Password does not conform to the account password policy' in error_msg:
+ module.fail_json(changed=False, msg="Password doesn't conform to policy")
+ else:
+ module.fail_json(msg=error_msg)
+
+ try:
+ current_keys = [ck['access_key_id'] for ck in
+ iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
+ status = [ck['status'] for ck in
+ iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
+ key_qty = len(current_keys)
+ except boto.exception.BotoServerError as err:
+ error_msg = boto_exception(err)
+ if 'cannot be found' in error_msg and updated:
+ current_keys = [ck['access_key_id'] for ck in
+ iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata]
+ status = [ck['status'] for ck in
+ iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata]
+ name = new_name
+ else:
+ module.fail_json(changed=False, msg=str(err))
+
+ new_keys = []
+ if key_state == 'create':
+ try:
+ while key_count > key_qty:
+ new_keys.append(iam.create_access_key(
+ user_name=name).create_access_key_response.create_access_key_result.access_key)
+ key_qty += 1
+ changed = True
+
+ except boto.exception.BotoServerError as err:
+ module.fail_json(changed=False, msg=str(err))
+
+ if keys and key_state:
+ for access_key in keys:
+ if key_state in ('active', 'inactive'):
+ if access_key in current_keys:
+ for current_key, current_key_state in zip(current_keys, status):
+ if key_state != current_key_state.lower():
+ try:
+ iam.update_access_key(access_key, key_state.capitalize(), user_name=name)
+ changed = True
+ except boto.exception.BotoServerError as err:
+ module.fail_json(changed=False, msg=str(err))
+ else:
+ module.fail_json(msg="Supplied keys not found for %s. "
+ "Current keys: %s. "
+ "Supplied key(s): %s" %
+ (name, current_keys, keys)
+ )
+
+ if key_state == 'remove':
+ if access_key in current_keys:
+ try:
+ iam.delete_access_key(access_key, user_name=name)
+ except boto.exception.BotoServerError as err:
+ module.fail_json(changed=False, msg=str(err))
+ else:
+ changed = True
+
+ try:
+ final_keys, final_key_status = \
+ [ck['access_key_id'] for ck in
+ iam.get_all_access_keys(name).
+ list_access_keys_result.
+ access_key_metadata],\
+ [ck['status'] for ck in
+ iam.get_all_access_keys(name).
+ list_access_keys_result.
+ access_key_metadata]
+ except boto.exception.BotoServerError as err:
+ module.fail_json(changed=changed, msg=str(err))
+
+ for fk, fks in zip(final_keys, final_key_status):
+ updated_key_list.update({fk: fks})
+
+ return name_change, updated_key_list, changed, new_keys
+
+
+def set_users_groups(module, iam, name, groups, updated=None,
+ new_name=None):
+ """ Sets groups for a user, will purge groups not explicitly passed, while
+ retaining pre-existing groups that also are in the new list.
+ """
+ changed = False
+
+ if updated:
+ name = new_name
+
+ try:
+ orig_users_groups = [og['group_name'] for og in iam.get_groups_for_user(
+ name).list_groups_for_user_result.groups]
+ remove_groups = [
+ rg for rg in frozenset(orig_users_groups).difference(groups)]
+ new_groups = [
+ ng for ng in frozenset(groups).difference(orig_users_groups)]
+ except boto.exception.BotoServerError as err:
+ module.fail_json(changed=changed, msg=str(err))
+ else:
+ if len(orig_users_groups) > 0:
+ for new in new_groups:
+ iam.add_user_to_group(new, name)
+ for rm in remove_groups:
+ iam.remove_user_from_group(rm, name)
+ else:
+ for group in groups:
+ try:
+ iam.add_user_to_group(group, name)
+ except boto.exception.BotoServerError as err:
+ error_msg = boto_exception(err)
+ if ('The group with name %s cannot be found.' % group) in error_msg:
+ module.fail_json(changed=False, msg="Group %s doesn't exist" % group)
+
+ if len(remove_groups) > 0 or len(new_groups) > 0:
+ changed = True
+
+ return (groups, changed)
+
+
+def create_group(module=None, iam=None, name=None, path=None):
+ changed = False
+ try:
+ iam.create_group(
+ name, path).create_group_response.create_group_result.group
+ except boto.exception.BotoServerError as err:
+ module.fail_json(changed=changed, msg=str(err))
+ else:
+ changed = True
+ return name, changed
+
+
+def delete_group(module=None, iam=None, name=None):
+ changed = False
+ try:
+ iam.delete_group(name)
+ except boto.exception.BotoServerError as err:
+ error_msg = boto_exception(err)
+ if ('must delete policies first') in error_msg:
+ for policy in iam.get_all_group_policies(name).list_group_policies_result.policy_names:
+ iam.delete_group_policy(name, policy)
+ try:
+ iam.delete_group(name)
+ except boto.exception.BotoServerError as err:
+ error_msg = boto_exception(err)
+ if ('must delete policies first') in error_msg:
+ module.fail_json(changed=changed, msg="All inline policies have been removed. Though it appears"
+ "that %s has Managed Polices. This is not "
+ "currently supported by boto. Please detach the policies "
+ "through the console and try again." % name)
+ else:
+ module.fail_json(changed=changed, msg=str(error_msg))
+ else:
+ changed = True
+ else:
+ module.fail_json(changed=changed, msg=str(error_msg))
+ else:
+ changed = True
+ return changed, name
+
+
+def update_group(module=None, iam=None, name=None, new_name=None, new_path=None):
+ changed = False
+ try:
+ current_group_path = iam.get_group(
+ name).get_group_response.get_group_result.group['path']
+ if new_path:
+ if current_group_path != new_path:
+ iam.update_group(name, new_path=new_path)
+ changed = True
+ if new_name:
+ if name != new_name:
+ iam.update_group(name, new_group_name=new_name, new_path=new_path)
+ changed = True
+ name = new_name
+ except boto.exception.BotoServerError as err:
+ module.fail_json(changed=changed, msg=str(err))
+
+ return changed, name, new_path, current_group_path
+
+
+def create_role(module, iam, name, path, role_list, prof_list, trust_policy_doc):
+ changed = False
+ iam_role_result = None
+ instance_profile_result = None
+ try:
+ if name not in role_list:
+ changed = True
+ iam_role_result = iam.create_role(name,
+ assume_role_policy_document=trust_policy_doc,
+ path=path).create_role_response.create_role_result.role
+
+ if name not in prof_list:
+ instance_profile_result = iam.create_instance_profile(name, path=path) \
+ .create_instance_profile_response.create_instance_profile_result.instance_profile
+ iam.add_role_to_instance_profile(name, name)
+ else:
+ instance_profile_result = iam.get_instance_profile(name).get_instance_profile_response.get_instance_profile_result.instance_profile
+ except boto.exception.BotoServerError as err:
+ module.fail_json(changed=changed, msg=str(err))
+ else:
+ updated_role_list = list_all_roles(iam)
+ iam_role_result = iam.get_role(name).get_role_response.get_role_result.role
+ return changed, updated_role_list, iam_role_result, instance_profile_result
+
+
+def delete_role(module, iam, name, role_list, prof_list):
+ changed = False
+ iam_role_result = None
+ instance_profile_result = None
+ try:
+ if name in role_list:
+ cur_ins_prof = [rp['instance_profile_name'] for rp in
+ iam.list_instance_profiles_for_role(name).
+ list_instance_profiles_for_role_result.
+ instance_profiles]
+ for profile in cur_ins_prof:
+ iam.remove_role_from_instance_profile(profile, name)
+ try:
+ iam.delete_role(name)
+ except boto.exception.BotoServerError as err:
+ error_msg = boto_exception(err)
+ if ('must detach all policies first') in error_msg:
+ for policy in iam.list_role_policies(name).list_role_policies_result.policy_names:
+ iam.delete_role_policy(name, policy)
+ try:
+ iam_role_result = iam.delete_role(name)
+ except boto.exception.BotoServerError as err:
+ error_msg = boto_exception(err)
+ if ('must detach all policies first') in error_msg:
+ module.fail_json(changed=changed, msg="All inline policies have been removed. Though it appears"
+ "that %s has Managed Polices. This is not "
+ "currently supported by boto. Please detach the policies "
+ "through the console and try again." % name)
+ else:
+ module.fail_json(changed=changed, msg=str(err))
+ else:
+ changed = True
+
+ else:
+ changed = True
+
+ for prof in prof_list:
+ if name == prof:
+ instance_profile_result = iam.delete_instance_profile(name)
+ except boto.exception.BotoServerError as err:
+ module.fail_json(changed=changed, msg=str(err))
+ else:
+ updated_role_list = list_all_roles(iam)
+ return changed, updated_role_list, iam_role_result, instance_profile_result
+
+
+def main():
+ argument_spec = dict(
+ iam_type=dict(required=True, choices=['user', 'group', 'role']),
+ groups=dict(type='list', default=None, required=False, elements='str'),
+ state=dict(required=True, choices=['present', 'absent', 'update']),
+ password=dict(default=None, required=False, no_log=True),
+ # setting no_log=False on update_password avoids a false positive warning about not setting no_log
+ update_password=dict(default='always', required=False, choices=['always', 'on_create'], no_log=False),
+ access_key_state=dict(default=None, required=False, choices=[
+ 'active', 'inactive', 'create', 'remove',
+ 'Active', 'Inactive', 'Create', 'Remove']),
+ access_key_ids=dict(type='list', default=None, required=False, elements='str'),
+ key_count=dict(type='int', default=1, required=False),
+ name=dict(required=True),
+ trust_policy_filepath=dict(default=None, required=False),
+ trust_policy=dict(type='dict', default=None, required=False),
+ new_name=dict(default=None, required=False),
+ path=dict(default='/', required=False),
+ new_path=dict(default=None, required=False),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[['trust_policy', 'trust_policy_filepath']],
+ check_boto3=False,
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='This module requires boto, please install it')
+
+ state = module.params.get('state').lower()
+ iam_type = module.params.get('iam_type').lower()
+ groups = module.params.get('groups')
+ name = module.params.get('name')
+ new_name = module.params.get('new_name')
+ password = module.params.get('password')
+ update_pw = module.params.get('update_password')
+ path = module.params.get('path')
+ new_path = module.params.get('new_path')
+ key_count = module.params.get('key_count')
+ key_state = module.params.get('access_key_state')
+ trust_policy = module.params.get('trust_policy')
+ trust_policy_filepath = module.params.get('trust_policy_filepath')
+ key_ids = module.params.get('access_key_ids')
+
+ if key_state:
+ key_state = key_state.lower()
+ if any([n in key_state for n in ['active', 'inactive']]) and not key_ids:
+ module.fail_json(changed=False, msg="At least one access key has to be defined in order"
+ " to use 'active' or 'inactive'")
+
+ if iam_type == 'user' and module.params.get('password') is not None:
+ pwd = module.params.get('password')
+ elif iam_type != 'user' and module.params.get('password') is not None:
+ module.fail_json(msg="a password is being specified when the iam_type "
+ "is not user. Check parameters")
+ else:
+ pwd = None
+
+ if iam_type != 'user' and (module.params.get('access_key_state') is not None or
+ module.params.get('access_key_id') is not None):
+ module.fail_json(msg="the IAM type must be user, when IAM access keys "
+ "are being modified. Check parameters")
+
+ if iam_type == 'role' and state == 'update':
+ module.fail_json(changed=False, msg="iam_type: role, cannot currently be updated, "
+ "please specify present or absent")
+
+ # check if trust_policy is present -- it can be inline JSON or a file path to a JSON file
+ if trust_policy_filepath:
+ try:
+ with open(trust_policy_filepath, 'r') as json_data:
+ trust_policy_doc = json.dumps(json.load(json_data))
+ except Exception as e:
+ module.fail_json(msg=str(e) + ': ' + trust_policy_filepath)
+ elif trust_policy:
+ try:
+ trust_policy_doc = json.dumps(trust_policy)
+ except Exception as e:
+ module.fail_json(msg=str(e) + ': ' + trust_policy)
+ else:
+ trust_policy_doc = None
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
+
+ try:
+ if region:
+ iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs)
+ else:
+ iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg=str(e))
+
+ result = {}
+ changed = False
+
+ try:
+ orig_group_list = list_all_groups(iam)
+
+ orig_user_list = list_all_users(iam)
+
+ orig_role_list = list_all_roles(iam)
+
+ orig_prof_list = list_all_instance_profiles(iam)
+ except boto.exception.BotoServerError as err:
+ module.fail_json(msg=err.message)
+
+ if iam_type == 'user':
+ been_updated = False
+ user_groups = None
+ user_exists = any([n in [name, new_name] for n in orig_user_list])
+ if user_exists:
+ current_path = iam.get_user(name).get_user_result.user['path']
+ if not new_path and current_path != path:
+ new_path = path
+ path = current_path
+
+ if state == 'present' and not user_exists and not new_name:
+ (meta, changed) = create_user(
+ module, iam, name, password, path, key_state, key_count)
+ keys = iam.get_all_access_keys(name).list_access_keys_result.\
+ access_key_metadata
+ if groups:
+ (user_groups, changed) = set_users_groups(
+ module, iam, name, groups, been_updated, new_name)
+ module.exit_json(
+ user_meta=meta, groups=user_groups, keys=keys, changed=changed)
+
+ elif state in ['present', 'update'] and user_exists:
+ if update_pw == 'on_create':
+ password = None
+ if name not in orig_user_list and new_name in orig_user_list:
+ been_updated = True
+ name_change, key_list, user_changed, new_key = update_user(
+ module, iam, name, new_name, new_path, key_state, key_count, key_ids, password, been_updated)
+ if new_key:
+ user_meta = {'access_keys': list(new_key)}
+ user_meta['access_keys'].extend(
+ [{'access_key_id': key, 'status': value} for key, value in key_list.items() if
+ key not in [it['access_key_id'] for it in new_key]])
+ else:
+ user_meta = {
+ 'access_keys': [{'access_key_id': key, 'status': value} for key, value in key_list.items()]}
+
+ if name_change and new_name:
+ orig_name = name
+ name = new_name
+ if isinstance(groups, list):
+ user_groups, groups_changed = set_users_groups(
+ module, iam, name, groups, been_updated, new_name)
+ if groups_changed == user_changed:
+ changed = groups_changed
+ else:
+ changed = True
+ else:
+ changed = user_changed
+ if new_name and new_path:
+ module.exit_json(changed=changed, groups=user_groups, old_user_name=orig_name,
+ new_user_name=new_name, old_path=path, new_path=new_path, keys=key_list,
+ created_keys=new_key, user_meta=user_meta)
+ elif new_name and not new_path and not been_updated:
+ module.exit_json(
+ changed=changed, groups=user_groups, old_user_name=orig_name, new_user_name=new_name, keys=key_list,
+ created_keys=new_key, user_meta=user_meta)
+ elif new_name and not new_path and been_updated:
+ module.exit_json(
+ changed=changed, groups=user_groups, user_name=new_name, keys=key_list, key_state=key_state,
+ created_keys=new_key, user_meta=user_meta)
+ elif not new_name and new_path:
+ module.exit_json(
+ changed=changed, groups=user_groups, user_name=name, old_path=path, new_path=new_path,
+ keys=key_list, created_keys=new_key, user_meta=user_meta)
+ else:
+ module.exit_json(
+ changed=changed, groups=user_groups, user_name=name, keys=key_list, created_keys=new_key,
+ user_meta=user_meta)
+
+ elif state == 'update' and not user_exists:
+ module.fail_json(
+ msg="The user %s does not exist. No update made." % name)
+
+ elif state == 'absent':
+ if user_exists:
+ try:
+ set_users_groups(module, iam, name, '')
+ name, changed = delete_user(module, iam, name)
+ module.exit_json(deleted_user=name, changed=changed)
+
+ except Exception as ex:
+ module.fail_json(changed=changed, msg=str(ex))
+ else:
+ module.exit_json(
+ changed=False, msg="User %s is already absent from your AWS IAM users" % name)
+
+ elif iam_type == 'group':
+ group_exists = name in orig_group_list
+
+ if state == 'present' and not group_exists:
+ new_group, changed = create_group(module=module, iam=iam, name=name, path=path)
+ module.exit_json(changed=changed, group_name=new_group)
+ elif state in ['present', 'update'] and group_exists:
+ changed, updated_name, updated_path, cur_path = update_group(
+ module=module, iam=iam, name=name, new_name=new_name,
+ new_path=new_path)
+
+ if new_path and new_name:
+ module.exit_json(changed=changed, old_group_name=name,
+ new_group_name=updated_name, old_path=cur_path,
+ new_group_path=updated_path)
+
+ if new_path and not new_name:
+ module.exit_json(changed=changed, group_name=name,
+ old_path=cur_path,
+ new_group_path=updated_path)
+
+ if not new_path and new_name:
+ module.exit_json(changed=changed, old_group_name=name,
+ new_group_name=updated_name, group_path=cur_path)
+
+ if not new_path and not new_name:
+ module.exit_json(
+ changed=changed, group_name=name, group_path=cur_path)
+
+ elif state == 'update' and not group_exists:
+ module.fail_json(
+ changed=changed, msg="Update Failed. Group %s doesn't seem to exist!" % name)
+
+ elif state == 'absent':
+ if name in orig_group_list:
+ removed_group, changed = delete_group(module=module, iam=iam, name=name)
+ module.exit_json(changed=changed, delete_group=removed_group)
+ else:
+ module.exit_json(changed=changed, msg="Group already absent")
+
+ elif iam_type == 'role':
+ role_list = []
+ if state == 'present':
+ changed, role_list, role_result, instance_profile_result = create_role(
+ module, iam, name, path, orig_role_list, orig_prof_list, trust_policy_doc)
+ elif state == 'absent':
+ changed, role_list, role_result, instance_profile_result = delete_role(
+ module, iam, name, orig_role_list, orig_prof_list)
+ elif state == 'update':
+ module.fail_json(
+ changed=False, msg='Role update not currently supported by boto.')
+ module.exit_json(changed=changed, roles=role_list, role_result=role_result,
+ instance_profile_result=instance_profile_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_cert.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_cert.py
new file mode 100644
index 00000000..96c9bcca
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_cert.py
@@ -0,0 +1,313 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: iam_cert
+version_added: 1.0.0
+short_description: Manage server certificates for use on ELBs and CloudFront
+description:
+ - Allows for the management of server certificates.
+options:
+ name:
+ description:
+ - Name of certificate to add, update or remove.
+ required: true
+ type: str
+ new_name:
+ description:
+ - When I(state=present), this will update the name of the cert.
+ - The I(cert), I(key) and I(cert_chain) parameters will be ignored if this is defined.
+ type: str
+ new_path:
+ description:
+ - When I(state=present), this will update the path of the cert.
+ - The I(cert), I(key) and I(cert_chain) parameters will be ignored if this is defined.
+ type: str
+ state:
+ description:
+ - Whether to create (or update) or delete the certificate.
+ - If I(new_path) or I(new_name) is defined, specifying present will attempt to make an update these.
+ required: true
+ choices: [ "present", "absent" ]
+ type: str
+ path:
+ description:
+ - When creating or updating, specify the desired path of the certificate.
+ default: "/"
+ type: str
+ cert_chain:
+ description:
+ - The path to, or content of, the CA certificate chain in PEM encoded format.
+ As of 2.4 content is accepted. If the parameter is not a file, it is assumed to be content.
+ type: str
+ cert:
+ description:
+ - The path to, or content of the certificate body in PEM encoded format.
+ As of 2.4 content is accepted. If the parameter is not a file, it is assumed to be content.
+ type: str
+ key:
+ description:
+ - The path to, or content of the private key in PEM encoded format.
+ As of 2.4 content is accepted. If the parameter is not a file, it is assumed to be content.
+ type: str
+ dup_ok:
+ description:
+ - By default the module will not upload a certificate that is already uploaded into AWS.
+ - If I(dup_ok=True), it will upload the certificate as long as the name is unique.
+ - Defaults to C(false).
+ type: bool
+
+requirements: [ "boto" ]
+author: Jonathan I. Davila (@defionscode)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: Basic server certificate upload from local file
+ community.aws.iam_cert:
+ name: very_ssl
+ state: present
+ cert: "{{ lookup('file', 'path/to/cert') }}"
+ key: "{{ lookup('file', 'path/to/key') }}"
+ cert_chain: "{{ lookup('file', 'path/to/certchain') }}"
+
+- name: Basic server certificate upload
+ community.aws.iam_cert:
+ name: very_ssl
+ state: present
+ cert: path/to/cert
+ key: path/to/key
+ cert_chain: path/to/certchain
+
+- name: Server certificate upload using key string
+ community.aws.iam_cert:
+ name: very_ssl
+ state: present
+ path: "/a/cert/path/"
+ cert: body_of_somecert
+ key: vault_body_of_privcertkey
+ cert_chain: body_of_myverytrustedchain
+
+- name: Basic rename of existing certificate
+ community.aws.iam_cert:
+ name: very_ssl
+ new_name: new_very_ssl
+ state: present
+
+'''
+import os
+
+try:
+ import boto
+ import boto.iam
+ import boto.ec2
+except ImportError:
+ pass # Handled by HAS_BOTO
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO
+
+
+def cert_meta(iam, name):
+ certificate = iam.get_server_certificate(name).get_server_certificate_result.server_certificate
+ ocert = certificate.certificate_body
+ opath = certificate.server_certificate_metadata.path
+ ocert_id = certificate.server_certificate_metadata.server_certificate_id
+ upload_date = certificate.server_certificate_metadata.upload_date
+ exp = certificate.server_certificate_metadata.expiration
+ arn = certificate.server_certificate_metadata.arn
+ return opath, ocert, ocert_id, upload_date, exp, arn
+
+
+def dup_check(module, iam, name, new_name, cert, orig_cert_names, orig_cert_bodies, dup_ok):
+ update = False
+
+ # IAM cert names are case insensitive
+ names_lower = [n.lower() for n in [name, new_name] if n is not None]
+ orig_cert_names_lower = [ocn.lower() for ocn in orig_cert_names]
+
+ if any(ct in orig_cert_names_lower for ct in names_lower):
+ for i_name in names_lower:
+ if cert is not None:
+ try:
+ c_index = orig_cert_names_lower.index(i_name)
+ except NameError:
+ continue
+ else:
+ # NOTE: remove the carriage return to strictly compare the cert bodies.
+ slug_cert = cert.replace('\r', '')
+ slug_orig_cert_bodies = orig_cert_bodies[c_index].replace('\r', '')
+ if slug_orig_cert_bodies == slug_cert:
+ update = True
+ break
+ elif slug_cert.startswith(slug_orig_cert_bodies):
+ update = True
+ break
+ else:
+ module.fail_json(changed=False, msg='A cert with the name %s already exists and'
+ ' has a different certificate body associated'
+ ' with it. Certificates cannot have the same name' % orig_cert_names[c_index])
+ else:
+ update = True
+ break
+ elif cert in orig_cert_bodies and not dup_ok:
+ for crt_name, crt_body in zip(orig_cert_names, orig_cert_bodies):
+ if crt_body == cert:
+ module.fail_json(changed=False, msg='This certificate already'
+ ' exists under the name %s' % crt_name)
+
+ return update
+
+
+def cert_action(module, iam, name, cpath, new_name, new_path, state,
+ cert, key, cert_chain, orig_cert_names, orig_cert_bodies, dup_ok):
+ if state == 'present':
+ update = dup_check(module, iam, name, new_name, cert, orig_cert_names,
+ orig_cert_bodies, dup_ok)
+ if update:
+ opath, ocert, ocert_id, upload_date, exp, arn = cert_meta(iam, name)
+ changed = True
+ if new_name and new_path:
+ iam.update_server_cert(name, new_cert_name=new_name, new_path=new_path)
+ module.exit_json(changed=changed, original_name=name, new_name=new_name,
+ original_path=opath, new_path=new_path, cert_body=ocert,
+ upload_date=upload_date, expiration_date=exp, arn=arn)
+ elif new_name and not new_path:
+ iam.update_server_cert(name, new_cert_name=new_name)
+ module.exit_json(changed=changed, original_name=name, new_name=new_name,
+ cert_path=opath, cert_body=ocert,
+ upload_date=upload_date, expiration_date=exp, arn=arn)
+ elif not new_name and new_path:
+ iam.update_server_cert(name, new_path=new_path)
+ module.exit_json(changed=changed, name=new_name,
+ original_path=opath, new_path=new_path, cert_body=ocert,
+ upload_date=upload_date, expiration_date=exp, arn=arn)
+ else:
+ changed = False
+ module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,
+ upload_date=upload_date, expiration_date=exp, arn=arn,
+ msg='No new path or name specified. No changes made')
+ else:
+ changed = True
+ iam.upload_server_cert(name, cert, key, cert_chain=cert_chain, path=cpath)
+ opath, ocert, ocert_id, upload_date, exp, arn = cert_meta(iam, name)
+ module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,
+ upload_date=upload_date, expiration_date=exp, arn=arn)
+ elif state == 'absent':
+ if name in orig_cert_names:
+ changed = True
+ iam.delete_server_cert(name)
+ module.exit_json(changed=changed, deleted_cert=name)
+ else:
+ changed = False
+ module.exit_json(changed=changed, msg='Certificate with the name %s already absent' % name)
+
+
+def load_data(cert, key, cert_chain):
+ # if paths are provided rather than lookups read the files and return the contents
+ if cert and os.path.isfile(cert):
+ with open(cert, 'r') as cert_fh:
+ cert = cert_fh.read().rstrip()
+ if key and os.path.isfile(key):
+ with open(key, 'r') as key_fh:
+ key = key_fh.read().rstrip()
+ if cert_chain and os.path.isfile(cert_chain):
+ with open(cert_chain, 'r') as cert_chain_fh:
+ cert_chain = cert_chain_fh.read()
+ return cert, key, cert_chain
+
+
+def main():
+ argument_spec = dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True),
+ cert=dict(),
+ key=dict(no_log=True),
+ cert_chain=dict(),
+ new_name=dict(),
+ path=dict(default='/'),
+ new_path=dict(),
+ dup_ok=dict(type='bool'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['new_path', 'key'],
+ ['new_path', 'cert'],
+ ['new_path', 'cert_chain'],
+ ['new_name', 'key'],
+ ['new_name', 'cert'],
+ ['new_name', 'cert_chain'],
+ ],
+ check_boto3=False,
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg="Boto is required for this module")
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
+
+ try:
+ if region:
+ iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs)
+ else:
+ iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg=str(e))
+
+ state = module.params.get('state')
+ name = module.params.get('name')
+ path = module.params.get('path')
+ new_name = module.params.get('new_name')
+ new_path = module.params.get('new_path')
+ dup_ok = module.params.get('dup_ok')
+ if state == 'present' and not new_name and not new_path:
+ cert, key, cert_chain = load_data(cert=module.params.get('cert'),
+ key=module.params.get('key'),
+ cert_chain=module.params.get('cert_chain'))
+ else:
+ cert = key = cert_chain = None
+
+ orig_cert_names = [ctb['server_certificate_name'] for ctb in
+ iam.get_all_server_certs().list_server_certificates_result.server_certificate_metadata_list]
+ orig_cert_bodies = [iam.get_server_certificate(thing).get_server_certificate_result.certificate_body
+ for thing in orig_cert_names]
+ if new_name == name:
+ new_name = None
+ if new_path == path:
+ new_path = None
+
+ changed = False
+ try:
+ cert_action(module, iam, name, path, new_name, new_path, state,
+ cert, key, cert_chain, orig_cert_names, orig_cert_bodies, dup_ok)
+ except boto.exception.BotoServerError as err:
+ module.fail_json(changed=changed, msg=str(err), debug=[cert, key])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_cert_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_cert_facts.py
new file mode 100644
index 00000000..6e371856
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_cert_facts.py
@@ -0,0 +1,163 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: iam_server_certificate_info
+version_added: 1.0.0
+short_description: Retrieve the information of a server certificate
+description:
+ - Retrieve the attributes of a server certificate.
+ - This module was called C(iam_server_certificate_facts) before Ansible 2.9. The usage did not change.
+author: "Allen Sanabria (@linuxdynasty)"
+requirements: [boto3, botocore]
+options:
+ name:
+ description:
+ - The name of the server certificate you are retrieving attributes for.
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: Retrieve server certificate
+ community.aws.iam_server_certificate_info:
+ name: production-cert
+ register: server_cert
+
+- name: Fail if the server certificate name was not found
+ community.aws.iam_server_certificate_info:
+ name: production-cert
+ register: server_cert
+ failed_when: "{{ server_cert.results | length == 0 }}"
+'''
+
+RETURN = '''
+server_certificate_id:
+ description: The 21 character certificate id
+ returned: success
+ type: str
+ sample: "ADWAJXWTZAXIPIMQHMJPO"
+certificate_body:
+ description: The asn1der encoded PEM string
+ returned: success
+ type: str
+ sample: "-----BEGIN CERTIFICATE-----\nbunch of random data\n-----END CERTIFICATE-----"
+server_certificate_name:
+ description: The name of the server certificate
+ returned: success
+ type: str
+ sample: "server-cert-name"
+arn:
+ description: The Amazon resource name of the server certificate
+ returned: success
+ type: str
+ sample: "arn:aws:iam::911277865346:server-certificate/server-cert-name"
+path:
+ description: The path of the server certificate
+ returned: success
+ type: str
+ sample: "/"
+expiration:
+ description: The date and time this server certificate will expire, in ISO 8601 format.
+ returned: success
+ type: str
+ sample: "2017-06-15T12:00:00+00:00"
+upload_date:
+ description: The date and time this server certificate was uploaded, in ISO 8601 format.
+ returned: success
+ type: str
+ sample: "2015-04-25T00:36:40+00:00"
+'''
+
+
+try:
+ import boto3
+ import botocore
+ import botocore.exceptions
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+def get_server_certs(iam, name=None):
+ """Retrieve the attributes of a server certificate if it exists or all certs.
+ Args:
+ iam (botocore.client.IAM): The boto3 iam instance.
+
+ Kwargs:
+ name (str): The name of the server certificate.
+
+ Basic Usage:
+ >>> import boto3
+ >>> iam = boto3.client('iam')
+ >>> name = "server-cert-name"
+ >>> results = get_server_certs(iam, name)
+ {
+ "upload_date": "2015-04-25T00:36:40+00:00",
+ "server_certificate_id": "ADWAJXWTZAXIPIMQHMJPO",
+ "certificate_body": "-----BEGIN CERTIFICATE-----\nbunch of random data\n-----END CERTIFICATE-----",
+ "server_certificate_name": "server-cert-name",
+ "expiration": "2017-06-15T12:00:00+00:00",
+ "path": "/",
+ "arn": "arn:aws:iam::911277865346:server-certificate/server-cert-name"
+ }
+ """
+ results = dict()
+ try:
+ if name:
+ server_certs = [iam.get_server_certificate(ServerCertificateName=name)['ServerCertificate']]
+ else:
+ server_certs = iam.list_server_certificates()['ServerCertificateMetadataList']
+
+ for server_cert in server_certs:
+ if not name:
+ server_cert = iam.get_server_certificate(ServerCertificateName=server_cert['ServerCertificateName'])['ServerCertificate']
+ cert_md = server_cert['ServerCertificateMetadata']
+ results[cert_md['ServerCertificateName']] = {
+ 'certificate_body': server_cert['CertificateBody'],
+ 'server_certificate_id': cert_md['ServerCertificateId'],
+ 'server_certificate_name': cert_md['ServerCertificateName'],
+ 'arn': cert_md['Arn'],
+ 'path': cert_md['Path'],
+ 'expiration': cert_md['Expiration'].isoformat(),
+ 'upload_date': cert_md['UploadDate'].isoformat(),
+ }
+
+ except botocore.exceptions.ClientError:
+ pass
+
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,)
+ if module._name == 'iam_server_certificate_facts':
+ module.deprecate("The 'iam_server_certificate_facts' module has been renamed to 'iam_server_certificate_info'",
+ date='2021-12-01', collection_name='community.aws')
+
+ try:
+ iam = module.client('iam')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ cert_name = module.params.get('name')
+ results = get_server_certs(iam, cert_name)
+ module.exit_json(results=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_group.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_group.py
new file mode 100644
index 00000000..b55e3221
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_group.py
@@ -0,0 +1,436 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: iam_group
+version_added: 1.0.0
+short_description: Manage AWS IAM groups
+description:
+ - Manage AWS IAM groups.
+author:
+- Nick Aslanidis (@naslanidis)
+- Maksym Postument (@infectsoldier)
+options:
+ name:
+ description:
+ - The name of the group to create.
+ required: true
+ type: str
+ managed_policies:
+ description:
+ - A list of managed policy ARNs or friendly names to attach to the role.
+ - To embed an inline policy, use M(community.aws.iam_policy).
+ required: false
+ type: list
+ elements: str
+ aliases: ['managed_policy']
+ users:
+ description:
+ - A list of existing users to add as members of the group.
+ required: false
+ type: list
+ elements: str
+ state:
+ description:
+ - Create or remove the IAM group.
+ required: true
+ choices: [ 'present', 'absent' ]
+ type: str
+ purge_policies:
+ description:
+ - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched.
+ required: false
+ default: false
+ type: bool
+ aliases: ['purge_policy', 'purge_managed_policies']
+ purge_users:
+ description:
+ - When I(purge_users=true) users which are not included in I(users) will be detached.
+ required: false
+ default: false
+ type: bool
+requirements: [ botocore, boto3 ]
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create a group
+ community.aws.iam_group:
+ name: testgroup1
+ state: present
+
+- name: Create a group and attach a managed policy using its ARN
+ community.aws.iam_group:
+ name: testgroup1
+ managed_policies:
+ - arn:aws:iam::aws:policy/AmazonSNSFullAccess
+ state: present
+
+- name: Create a group with users as members and attach a managed policy using its ARN
+ community.aws.iam_group:
+ name: testgroup1
+ managed_policies:
+ - arn:aws:iam::aws:policy/AmazonSNSFullAccess
+ users:
+ - test_user1
+ - test_user2
+ state: present
+
+- name: Remove all managed policies from an existing group with an empty list
+ community.aws.iam_group:
+ name: testgroup1
+ state: present
+ purge_policies: true
+
+- name: Remove all group members from an existing group
+ community.aws.iam_group:
+ name: testgroup1
+ managed_policies:
+ - arn:aws:iam::aws:policy/AmazonSNSFullAccess
+ purge_users: true
+ state: present
+
+- name: Delete the group
+ community.aws.iam_group:
+ name: testgroup1
+ state: absent
+
+'''
+RETURN = r'''
+iam_group:
+ description: dictionary containing all the group information including group membership
+ returned: success
+ type: complex
+ contains:
+ group:
+ description: dictionary containing all the group information
+ returned: success
+ type: complex
+ contains:
+ arn:
+ description: the Amazon Resource Name (ARN) specifying the group
+ type: str
+ sample: "arn:aws:iam::1234567890:group/testgroup1"
+ create_date:
+ description: the date and time, in ISO 8601 date-time format, when the group was created
+ type: str
+ sample: "2017-02-08T04:36:28+00:00"
+ group_id:
+ description: the stable and unique string identifying the group
+ type: str
+ sample: AGPAIDBWE12NSFINE55TM
+ group_name:
+ description: the friendly name that identifies the group
+ type: str
+ sample: testgroup1
+ path:
+ description: the path to the group
+ type: str
+ sample: /
+ users:
+ description: list containing all the group members
+ returned: success
+ type: complex
+ contains:
+ arn:
+ description: the Amazon Resource Name (ARN) specifying the user
+ type: str
+ sample: "arn:aws:iam::1234567890:user/test_user1"
+ create_date:
+ description: the date and time, in ISO 8601 date-time format, when the user was created
+ type: str
+ sample: "2017-02-08T04:36:28+00:00"
+ user_id:
+ description: the stable and unique string identifying the user
+ type: str
+ sample: AIDAIZTPY123YQRS22YU2
+ user_name:
+ description: the friendly name that identifies the user
+ type: str
+ sample: testgroup1
+ path:
+ description: the path to the user
+ type: str
+ sample: /
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def compare_attached_group_policies(current_attached_policies, new_attached_policies):
+
+ # If new_attached_policies is None it means we want to remove all policies
+ if len(current_attached_policies) > 0 and new_attached_policies is None:
+ return False
+
+ current_attached_policies_arn_list = []
+ for policy in current_attached_policies:
+ current_attached_policies_arn_list.append(policy['PolicyArn'])
+
+ if set(current_attached_policies_arn_list) == set(new_attached_policies):
+ return True
+ else:
+ return False
+
+
+def compare_group_members(current_group_members, new_group_members):
+
+ # If new_attached_policies is None it means we want to remove all policies
+ if len(current_group_members) > 0 and new_group_members is None:
+ return False
+ if set(current_group_members) == set(new_group_members):
+ return True
+ else:
+ return False
+
+
+def convert_friendly_names_to_arns(connection, module, policy_names):
+
+ if not any([not policy.startswith('arn:') for policy in policy_names if policy is not None]):
+ return policy_names
+ allpolicies = {}
+ paginator = connection.get_paginator('list_policies')
+ policies = paginator.paginate().build_full_result()['Policies']
+
+ for policy in policies:
+ allpolicies[policy['PolicyName']] = policy['Arn']
+ allpolicies[policy['Arn']] = policy['Arn']
+ try:
+ return [allpolicies[policy] for policy in policy_names]
+ except KeyError as e:
+ module.fail_json(msg="Couldn't find policy: " + str(e))
+
+
+def create_or_update_group(connection, module):
+
+ params = dict()
+ params['GroupName'] = module.params.get('name')
+ managed_policies = module.params.get('managed_policies')
+ users = module.params.get('users')
+ purge_users = module.params.get('purge_users')
+ purge_policies = module.params.get('purge_policies')
+ changed = False
+ if managed_policies:
+ managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies)
+
+ # Get group
+ try:
+ group = get_group(connection, module, params['GroupName'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't get group")
+
+ # If group is None, create it
+ if group is None:
+ # Check mode means we would create the group
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ try:
+ group = connection.create_group(**params)
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't create group")
+
+ # Manage managed policies
+ current_attached_policies = get_attached_policy_list(connection, module, params['GroupName'])
+ if not compare_attached_group_policies(current_attached_policies, managed_policies):
+ current_attached_policies_arn_list = []
+ for policy in current_attached_policies:
+ current_attached_policies_arn_list.append(policy['PolicyArn'])
+
+ # If managed_policies has a single empty element we want to remove all attached policies
+ if purge_policies:
+ # Detach policies not present
+ for policy_arn in list(set(current_attached_policies_arn_list) - set(managed_policies)):
+ changed = True
+ if not module.check_mode:
+ try:
+ connection.detach_group_policy(GroupName=params['GroupName'], PolicyArn=policy_arn)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't detach policy from group %s" % params['GroupName'])
+ # If there are policies to adjust that aren't in the current list, then things have changed
+ # Otherwise the only changes were in purging above
+ if set(managed_policies) - set(current_attached_policies_arn_list):
+ changed = True
+ # If there are policies in managed_policies attach each policy
+ if managed_policies != [None] and not module.check_mode:
+ for policy_arn in managed_policies:
+ try:
+ connection.attach_group_policy(GroupName=params['GroupName'], PolicyArn=policy_arn)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't attach policy to group %s" % params['GroupName'])
+
+ # Manage group memberships
+ try:
+ current_group_members = get_group(connection, module, params['GroupName'])['Users']
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName'])
+
+ current_group_members_list = []
+ for member in current_group_members:
+ current_group_members_list.append(member['UserName'])
+
+ if not compare_group_members(current_group_members_list, users):
+
+ if purge_users:
+ for user in list(set(current_group_members_list) - set(users)):
+ # Ensure we mark things have changed if any user gets purged
+ changed = True
+ # Skip actions for check mode
+ if not module.check_mode:
+ try:
+ connection.remove_user_from_group(GroupName=params['GroupName'], UserName=user)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't remove user %s from group %s" % (user, params['GroupName']))
+ # If there are users to adjust that aren't in the current list, then things have changed
+ # Otherwise the only changes were in purging above
+ if set(users) - set(current_group_members_list):
+ changed = True
+ # Skip actions for check mode
+ if users != [None] and not module.check_mode:
+ for user in users:
+ try:
+ connection.add_user_to_group(GroupName=params['GroupName'], UserName=user)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't add user %s to group %s" % (user, params['GroupName']))
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ # Get the group again
+ try:
+ group = get_group(connection, module, params['GroupName'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName'])
+
+ module.exit_json(changed=changed, iam_group=camel_dict_to_snake_dict(group))
+
+
+def destroy_group(connection, module):
+
+ params = dict()
+ params['GroupName'] = module.params.get('name')
+
+ try:
+ group = get_group(connection, module, params['GroupName'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName'])
+ if group:
+ # Check mode means we would remove this group
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # Remove any attached policies otherwise deletion fails
+ try:
+ for policy in get_attached_policy_list(connection, module, params['GroupName']):
+ connection.detach_group_policy(GroupName=params['GroupName'], PolicyArn=policy['PolicyArn'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't remove policy from group %s" % params['GroupName'])
+
+ # Remove any users in the group otherwise deletion fails
+ current_group_members_list = []
+ try:
+ current_group_members = get_group(connection, module, params['GroupName'])['Users']
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName'])
+ for member in current_group_members:
+ current_group_members_list.append(member['UserName'])
+ for user in current_group_members_list:
+ try:
+ connection.remove_user_from_group(GroupName=params['GroupName'], UserName=user)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, "Couldn't remove user %s from group %s" % (user, params['GroupName']))
+
+ try:
+ connection.delete_group(**params)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, "Couldn't delete group %s" % params['GroupName'])
+
+ else:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=True)
+
+
+@AWSRetry.exponential_backoff()
+def get_group(connection, module, name):
+ try:
+ paginator = connection.get_paginator('get_group')
+ return paginator.paginate(GroupName=name).build_full_result()
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchEntity':
+ return None
+ else:
+ raise
+
+
+@AWSRetry.exponential_backoff()
+def get_attached_policy_list(connection, module, name):
+
+ try:
+ paginator = connection.get_paginator('list_attached_group_policies')
+ return paginator.paginate(GroupName=name).build_full_result()['AttachedPolicies']
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchEntity':
+ return None
+ else:
+ raise
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(required=True),
+ managed_policies=dict(default=[], type='list', aliases=['managed_policy'], elements='str'),
+ users=dict(default=[], type='list', elements='str'),
+ state=dict(choices=['present', 'absent'], required=True),
+ purge_users=dict(default=False, type='bool'),
+ purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ connection = module.client('iam')
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ create_or_update_group(connection, module)
+ else:
+ destroy_group(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_managed_policy.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_managed_policy.py
new file mode 100644
index 00000000..a0b7c3c4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_managed_policy.py
@@ -0,0 +1,389 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: iam_managed_policy
+version_added: 1.0.0
+short_description: Manage User Managed IAM policies
+description:
+ - Allows creating and removing managed IAM policies
+options:
+ policy_name:
+ description:
+ - The name of the managed policy.
+ required: True
+ type: str
+ policy_description:
+ description:
+ - A helpful description of this policy, this value is immutable and only set when creating a new policy.
+ default: ''
+ type: str
+ policy:
+ description:
+ - A properly json formatted policy
+ type: json
+ make_default:
+ description:
+ - Make this revision the default revision.
+ default: True
+ type: bool
+ only_version:
+ description:
+ - Remove all other non default revisions, if this is used with C(make_default) it will result in all other versions of this policy being deleted.
+ type: bool
+ default: false
+ state:
+ description:
+ - Should this managed policy be present or absent. Set to absent to detach all entities from this policy and remove it if found.
+ default: present
+ choices: [ "present", "absent" ]
+ type: str
+ fail_on_delete:
+ description:
+ - The I(fail_on_delete) option does nothing and will be removed after 2022-06-01
+ type: bool
+
+author: "Dan Kozlowski (@dkhenry)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements:
+ - boto3
+ - botocore
+'''
+
+EXAMPLES = '''
+# Create Policy ex nihilo
+- name: Create IAM Managed Policy
+ community.aws.iam_managed_policy:
+ policy_name: "ManagedPolicy"
+ policy_description: "A Helpful managed policy"
+ policy: "{{ lookup('template', 'managed_policy.json.j2') }}"
+ state: present
+
+# Update a policy with a new default version
+- name: Create IAM Managed Policy
+ community.aws.iam_managed_policy:
+ policy_name: "ManagedPolicy"
+ policy: "{{ lookup('file', 'managed_policy_update.json') }}"
+ state: present
+
+# Update a policy with a new non default version
+- name: Create IAM Managed Policy
+ community.aws.iam_managed_policy:
+ policy_name: "ManagedPolicy"
+ policy:
+ Version: "2012-10-17"
+ Statement:
+ - Effect: "Allow"
+ Action: "logs:CreateLogGroup"
+ Resource: "*"
+ make_default: false
+ state: present
+
+# Update a policy and make it the only version and the default version
+- name: Create IAM Managed Policy
+ community.aws.iam_managed_policy:
+ policy_name: "ManagedPolicy"
+ policy: |
+ {
+ "Version": "2012-10-17",
+ "Statement":[{
+ "Effect": "Allow",
+ "Action": "logs:PutRetentionPolicy",
+ "Resource": "*"
+ }]
+ }
+ only_version: true
+ state: present
+
+# Remove a policy
+- name: Create IAM Managed Policy
+ community.aws.iam_managed_policy:
+ policy_name: "ManagedPolicy"
+ state: absent
+'''
+
+RETURN = '''
+policy:
+ description: Returns the policy json structure, when state == absent this will return the value of the removed policy.
+ returned: success
+ type: str
+ sample: '{
+ "arn": "arn:aws:iam::aws:policy/AdministratorAccess "
+ "attachment_count": 0,
+ "create_date": "2017-03-01T15:42:55.981000+00:00",
+ "default_version_id": "v1",
+ "is_attachable": true,
+ "path": "/",
+ "policy_id": "ANPALM4KLDMTFXGOOJIHL",
+ "policy_name": "AdministratorAccess",
+ "update_date": "2017-03-01T15:42:55.981000+00:00"
+ }'
+'''
+
+import json
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def list_policies_with_backoff(iam):
+ paginator = iam.get_paginator('list_policies')
+ return paginator.paginate(Scope='Local').build_full_result()
+
+
+def get_policy_by_name(module, iam, name):
+ try:
+ response = list_policies_with_backoff(iam)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Couldn't list policies: %s" % str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ for policy in response['Policies']:
+ if policy['PolicyName'] == name:
+ return policy
+ return None
+
+
+def delete_oldest_non_default_version(module, iam, policy):
+ try:
+ versions = [v for v in iam.list_policy_versions(PolicyArn=policy['Arn'])['Versions']
+ if not v['IsDefaultVersion']]
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Couldn't list policy versions: %s" % str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ versions.sort(key=lambda v: v['CreateDate'], reverse=True)
+ for v in versions[-1:]:
+ try:
+ iam.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId'])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Couldn't delete policy version: %s" % str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+
+# This needs to return policy_version, changed
+def get_or_create_policy_version(module, iam, policy, policy_document):
+ try:
+ versions = iam.list_policy_versions(PolicyArn=policy['Arn'])['Versions']
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Couldn't list policy versions: %s" % str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ for v in versions:
+ try:
+ document = iam.get_policy_version(PolicyArn=policy['Arn'],
+ VersionId=v['VersionId'])['PolicyVersion']['Document']
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Couldn't get policy version %s: %s" % (v['VersionId'], str(e)),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ # If the current policy matches the existing one
+ if not compare_policies(document, json.loads(to_native(policy_document))):
+ return v, False
+
+ # No existing version so create one
+ # There is a service limit (typically 5) of policy versions.
+ #
+ # Rather than assume that it is 5, we'll try to create the policy
+ # and if that doesn't work, delete the oldest non default policy version
+ # and try again.
+ try:
+ version = iam.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion']
+ return version, True
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'LimitExceeded':
+ delete_oldest_non_default_version(module, iam, policy)
+ try:
+ version = iam.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion']
+ return version, True
+ except botocore.exceptions.ClientError as second_e:
+ e = second_e
+ # Handle both when the exception isn't LimitExceeded or
+ # the second attempt still failed
+ module.fail_json(msg="Couldn't create policy version: %s" % str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+
+def set_if_default(module, iam, policy, policy_version, is_default):
+ if is_default and not policy_version['IsDefaultVersion']:
+ try:
+ iam.set_default_policy_version(PolicyArn=policy['Arn'], VersionId=policy_version['VersionId'])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Couldn't set default policy version: %s" % str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ return True
+ return False
+
+
+def set_if_only(module, iam, policy, policy_version, is_only):
+ if is_only:
+ try:
+ versions = [v for v in iam.list_policy_versions(PolicyArn=policy['Arn'])[
+ 'Versions'] if not v['IsDefaultVersion']]
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Couldn't list policy versions: %s" % str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ for v in versions:
+ try:
+ iam.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId'])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Couldn't delete policy version: %s" % str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ return len(versions) > 0
+ return False
+
+
+def detach_all_entities(module, iam, policy, **kwargs):
+ try:
+ entities = iam.list_entities_for_policy(PolicyArn=policy['Arn'], **kwargs)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Couldn't detach list entities for policy %s: %s" % (policy['PolicyName'], str(e)),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ for g in entities['PolicyGroups']:
+ try:
+ iam.detach_group_policy(PolicyArn=policy['Arn'], GroupName=g['GroupName'])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Couldn't detach group policy %s: %s" % (g['GroupName'], str(e)),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ for u in entities['PolicyUsers']:
+ try:
+ iam.detach_user_policy(PolicyArn=policy['Arn'], UserName=u['UserName'])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Couldn't detach user policy %s: %s" % (u['UserName'], str(e)),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ for r in entities['PolicyRoles']:
+ try:
+ iam.detach_role_policy(PolicyArn=policy['Arn'], RoleName=r['RoleName'])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Couldn't detach role policy %s: %s" % (r['RoleName'], str(e)),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ if entities['IsTruncated']:
+ detach_all_entities(module, iam, policy, marker=entities['Marker'])
+
+
+def main():
+ argument_spec = dict(
+ policy_name=dict(required=True),
+ policy_description=dict(default=''),
+ policy=dict(type='json'),
+ make_default=dict(type='bool', default=True),
+ only_version=dict(type='bool', default=False),
+ fail_on_delete=dict(type='bool', removed_at_date='2022-06-01', removed_from_collection='community.aws'),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=[['state', 'present', ['policy']]],
+ )
+
+ name = module.params.get('policy_name')
+ description = module.params.get('policy_description')
+ state = module.params.get('state')
+ default = module.params.get('make_default')
+ only = module.params.get('only_version')
+
+ policy = None
+
+ if module.params.get('policy') is not None:
+ policy = json.dumps(json.loads(module.params.get('policy')))
+
+ try:
+ iam = module.client('iam')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ p = get_policy_by_name(module, iam, name)
+ if state == 'present':
+ if p is None:
+ # No Policy so just create one
+ try:
+ rvalue = iam.create_policy(PolicyName=name, Path='/',
+ PolicyDocument=policy, Description=description)
+ except Exception as e:
+ module.fail_json(msg="Couldn't create policy %s: %s" % (name, to_native(e)),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ module.exit_json(changed=True, policy=camel_dict_to_snake_dict(rvalue['Policy']))
+ else:
+ policy_version, changed = get_or_create_policy_version(module, iam, p, policy)
+ changed = set_if_default(module, iam, p, policy_version, default) or changed
+ changed = set_if_only(module, iam, p, policy_version, only) or changed
+ # If anything has changed we needto refresh the policy
+ if changed:
+ try:
+ p = iam.get_policy(PolicyArn=p['Arn'])['Policy']
+ except Exception as e:
+ module.fail_json(msg="Couldn't get policy: %s" % to_native(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ module.exit_json(changed=changed, policy=camel_dict_to_snake_dict(p))
+ else:
+ # Check for existing policy
+ if p:
+ # Detach policy
+ detach_all_entities(module, iam, p)
+ # Delete Versions
+ try:
+ versions = iam.list_policy_versions(PolicyArn=p['Arn'])['Versions']
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Couldn't list policy versions: %s" % to_native(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ for v in versions:
+ if not v['IsDefaultVersion']:
+ try:
+ iam.delete_policy_version(PolicyArn=p['Arn'], VersionId=v['VersionId'])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Couldn't delete policy version %s: %s" %
+ (v['VersionId'], to_native(e)),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ # Delete policy
+ try:
+ iam.delete_policy(PolicyArn=p['Arn'])
+ except Exception as e:
+ module.fail_json(msg="Couldn't delete policy %s: %s" % (p['PolicyName'], to_native(e)),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ # This is the one case where we will return the old policy
+ module.exit_json(changed=True, policy=camel_dict_to_snake_dict(p))
+ else:
+ module.exit_json(changed=False, policy=None)
+# end main
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_mfa_device_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_mfa_device_facts.py
new file mode 100644
index 00000000..c79afab0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_mfa_device_facts.py
@@ -0,0 +1,107 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: iam_mfa_device_info
+version_added: 1.0.0
+short_description: List the MFA (Multi-Factor Authentication) devices registered for a user
+description:
+ - List the MFA (Multi-Factor Authentication) devices registered for a user
+ - This module was called C(iam_mfa_device_facts) before Ansible 2.9. The usage did not change.
+author: Victor Costan (@pwnall)
+options:
+ user_name:
+ description:
+ - The name of the user whose MFA devices will be listed
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements:
+ - boto3
+ - botocore
+'''
+
+RETURN = """
+mfa_devices:
+ description: The MFA devices registered for the given user
+ returned: always
+ type: list
+ sample:
+ - enable_date: "2016-03-11T23:25:36+00:00"
+ serial_number: arn:aws:iam::085120003701:mfa/pwnall
+ user_name: pwnall
+ - enable_date: "2016-03-11T23:25:37+00:00"
+ serial_number: arn:aws:iam::085120003702:mfa/pwnall
+ user_name: pwnall
+"""
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# more details: https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListMFADevices.html
+- name: List MFA devices
+ community.aws.iam_mfa_device_info:
+ register: mfa_devices
+
+# more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html
+- name: Assume an existing role
+ community.aws.sts_assume_role:
+ mfa_serial_number: "{{ mfa_devices.mfa_devices[0].serial_number }}"
+ role_arn: "arn:aws:iam::123456789012:role/someRole"
+ role_session_name: "someRoleSession"
+ register: assumed_role
+'''
+
+try:
+ import boto3
+ import botocore
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def list_mfa_devices(connection, module):
+ user_name = module.params.get('user_name')
+ changed = False
+
+ args = {}
+ if user_name is not None:
+ args['UserName'] = user_name
+ try:
+ response = connection.list_mfa_devices(**args)
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to list MFA devices")
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
+
+
+def main():
+ argument_spec = dict(
+ user_name=dict(required=False, default=None),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+ if module._name == 'iam_mfa_device_facts':
+ module.deprecate("The 'iam_mfa_device_facts' module has been renamed to 'iam_mfa_device_info'", date='2021-12-01', collection_name='community.aws')
+
+ try:
+ connection = module.client('iam')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ list_mfa_devices(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_mfa_device_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_mfa_device_info.py
new file mode 100644
index 00000000..c79afab0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_mfa_device_info.py
@@ -0,0 +1,107 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: iam_mfa_device_info
+version_added: 1.0.0
+short_description: List the MFA (Multi-Factor Authentication) devices registered for a user
+description:
+ - List the MFA (Multi-Factor Authentication) devices registered for a user
+ - This module was called C(iam_mfa_device_facts) before Ansible 2.9. The usage did not change.
+author: Victor Costan (@pwnall)
+options:
+ user_name:
+ description:
+ - The name of the user whose MFA devices will be listed
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements:
+ - boto3
+ - botocore
+'''
+
+RETURN = """
+mfa_devices:
+ description: The MFA devices registered for the given user
+ returned: always
+ type: list
+ sample:
+ - enable_date: "2016-03-11T23:25:36+00:00"
+ serial_number: arn:aws:iam::085120003701:mfa/pwnall
+ user_name: pwnall
+ - enable_date: "2016-03-11T23:25:37+00:00"
+ serial_number: arn:aws:iam::085120003702:mfa/pwnall
+ user_name: pwnall
+"""
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# more details: https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListMFADevices.html
+- name: List MFA devices
+ community.aws.iam_mfa_device_info:
+ register: mfa_devices
+
+# more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html
+- name: Assume an existing role
+ community.aws.sts_assume_role:
+ mfa_serial_number: "{{ mfa_devices.mfa_devices[0].serial_number }}"
+ role_arn: "arn:aws:iam::123456789012:role/someRole"
+ role_session_name: "someRoleSession"
+ register: assumed_role
+'''
+
+try:
+ import boto3
+ import botocore
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def list_mfa_devices(connection, module):
+ user_name = module.params.get('user_name')
+ changed = False
+
+ args = {}
+ if user_name is not None:
+ args['UserName'] = user_name
+ try:
+ response = connection.list_mfa_devices(**args)
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to list MFA devices")
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
+
+
+def main():
+ argument_spec = dict(
+ user_name=dict(required=False, default=None),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+ if module._name == 'iam_mfa_device_facts':
+ module.deprecate("The 'iam_mfa_device_facts' module has been renamed to 'iam_mfa_device_info'", date='2021-12-01', collection_name='community.aws')
+
+ try:
+ connection = module.client('iam')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ list_mfa_devices(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_password_policy.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_password_policy.py
new file mode 100644
index 00000000..d654a846
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_password_policy.py
@@ -0,0 +1,212 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: iam_password_policy
+version_added: 1.0.0
+short_description: Update an IAM Password Policy
+description:
+ - Module updates an IAM Password Policy on a given AWS account
+requirements: [ 'botocore', 'boto3' ]
+author:
+ - "Aaron Smith (@slapula)"
+options:
+ state:
+ description:
+ - Specifies the overall state of the password policy.
+ required: true
+ choices: ['present', 'absent']
+ type: str
+ min_pw_length:
+ description:
+ - Minimum password length.
+ default: 6
+ aliases: [minimum_password_length]
+ type: int
+ require_symbols:
+ description:
+ - Require symbols in password.
+ default: false
+ type: bool
+ require_numbers:
+ description:
+ - Require numbers in password.
+ default: false
+ type: bool
+ require_uppercase:
+ description:
+ - Require uppercase letters in password.
+ default: false
+ type: bool
+ require_lowercase:
+ description:
+ - Require lowercase letters in password.
+ default: false
+ type: bool
+ allow_pw_change:
+ description:
+ - Allow users to change their password.
+ default: false
+ type: bool
+ aliases: [allow_password_change]
+ pw_max_age:
+ description:
+ - Maximum age for a password in days. When this option is 0 then passwords
+ do not expire automatically.
+ default: 0
+ aliases: [password_max_age]
+ type: int
+ pw_reuse_prevent:
+ description:
+ - Prevent re-use of passwords.
+ default: 0
+ aliases: [password_reuse_prevent, prevent_reuse]
+ type: int
+ pw_expire:
+ description:
+ - Prevents users from change an expired password.
+ default: false
+ type: bool
+ aliases: [password_expire, expire]
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: Password policy for AWS account
+ community.aws.iam_password_policy:
+ state: present
+ min_pw_length: 8
+ require_symbols: false
+ require_numbers: true
+ require_uppercase: true
+ require_lowercase: true
+ allow_pw_change: true
+ pw_max_age: 60
+ pw_reuse_prevent: 5
+ pw_expire: false
+'''
+
+RETURN = ''' # '''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+class IAMConnection(object):
+ def __init__(self, module):
+ try:
+ self.connection = module.resource('iam')
+ self.module = module
+ except Exception as e:
+ module.fail_json(msg="Failed to connect to AWS: %s" % str(e))
+
+ def policy_to_dict(self, policy):
+ policy_attributes = [
+ 'allow_users_to_change_password', 'expire_passwords', 'hard_expiry',
+ 'max_password_age', 'minimum_password_length', 'password_reuse_prevention',
+ 'require_lowercase_characters', 'require_numbers', 'require_symbols', 'require_uppercase_characters'
+ ]
+ ret = {}
+ for attr in policy_attributes:
+ ret[attr] = getattr(policy, attr)
+ return ret
+
+ def update_password_policy(self, module, policy):
+ min_pw_length = module.params.get('min_pw_length')
+ require_symbols = module.params.get('require_symbols')
+ require_numbers = module.params.get('require_numbers')
+ require_uppercase = module.params.get('require_uppercase')
+ require_lowercase = module.params.get('require_lowercase')
+ allow_pw_change = module.params.get('allow_pw_change')
+ pw_max_age = module.params.get('pw_max_age')
+ pw_reuse_prevent = module.params.get('pw_reuse_prevent')
+ pw_expire = module.params.get('pw_expire')
+
+ update_parameters = dict(
+ MinimumPasswordLength=min_pw_length,
+ RequireSymbols=require_symbols,
+ RequireNumbers=require_numbers,
+ RequireUppercaseCharacters=require_uppercase,
+ RequireLowercaseCharacters=require_lowercase,
+ AllowUsersToChangePassword=allow_pw_change,
+ HardExpiry=pw_expire
+ )
+ if pw_reuse_prevent:
+ update_parameters.update(PasswordReusePrevention=pw_reuse_prevent)
+ if pw_max_age:
+ update_parameters.update(MaxPasswordAge=pw_max_age)
+
+ try:
+ original_policy = self.policy_to_dict(policy)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ original_policy = {}
+
+ try:
+ results = policy.update(**update_parameters)
+ policy.reload()
+ updated_policy = self.policy_to_dict(policy)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't update IAM Password Policy")
+
+ changed = (original_policy != updated_policy)
+ return (changed, updated_policy, camel_dict_to_snake_dict(results))
+
+ def delete_password_policy(self, policy):
+ try:
+ results = policy.delete()
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ if e.response['Error']['Code'] == 'NoSuchEntity':
+ self.module.exit_json(changed=False, task_status={'IAM': "Couldn't find IAM Password Policy"})
+ else:
+ self.module.fail_json_aws(e, msg="Couldn't delete IAM Password Policy")
+ return camel_dict_to_snake_dict(results)
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec={
+ 'state': dict(choices=['present', 'absent'], required=True),
+ 'min_pw_length': dict(type='int', aliases=['minimum_password_length'], default=6),
+ 'require_symbols': dict(type='bool', default=False),
+ 'require_numbers': dict(type='bool', default=False),
+ 'require_uppercase': dict(type='bool', default=False),
+ 'require_lowercase': dict(type='bool', default=False),
+ 'allow_pw_change': dict(type='bool', aliases=['allow_password_change'], default=False),
+ 'pw_max_age': dict(type='int', aliases=['password_max_age'], default=0),
+ 'pw_reuse_prevent': dict(type='int', aliases=['password_reuse_prevent', 'prevent_reuse'], default=0),
+ 'pw_expire': dict(type='bool', aliases=['password_expire', 'expire'], default=False),
+ },
+ supports_check_mode=True,
+ )
+
+ resource = IAMConnection(module)
+ policy = resource.connection.AccountPasswordPolicy()
+
+ state = module.params.get('state')
+
+ if state == 'present':
+ (changed, new_policy, update_result) = resource.update_password_policy(module, policy)
+ module.exit_json(changed=changed, task_status={'IAM': update_result}, policy=new_policy)
+
+ if state == 'absent':
+ delete_result = resource.delete_password_policy(policy)
+ module.exit_json(changed=True, task_status={'IAM': delete_result})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_policy.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_policy.py
new file mode 100644
index 00000000..819ed369
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_policy.py
@@ -0,0 +1,344 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: iam_policy
+version_added: 1.0.0
+short_description: Manage inline IAM policies for users, groups, and roles
+description:
+ - Allows uploading or removing inline IAM policies for IAM users, groups or roles.
+ - To administer managed policies please see M(community.aws.iam_user), M(community.aws.iam_role),
+ M(community.aws.iam_group) and M(community.aws.iam_managed_policy)
+options:
+ iam_type:
+ description:
+ - Type of IAM resource.
+ required: true
+ choices: [ "user", "group", "role"]
+ type: str
+ iam_name:
+ description:
+ - Name of IAM resource you wish to target for policy actions. In other words, the user name, group name or role name.
+ required: true
+ type: str
+ policy_name:
+ description:
+ - The name label for the policy to create or remove.
+ required: true
+ type: str
+ policy_document:
+ description:
+ - The path to the properly json formatted policy file.
+ - Mutually exclusive with I(policy_json).
+ - This option has been deprecated and will be removed in 2.14. The existing behavior can be
+ reproduced by using the I(policy_json) option and reading the file using the lookup plugin.
+ type: str
+ policy_json:
+ description:
+ - A properly json formatted policy as string.
+ - Mutually exclusive with I(policy_document).
+ - See U(https://github.com/ansible/ansible/issues/7005#issuecomment-42894813) on how to use it properly.
+ type: json
+ state:
+ description:
+ - Whether to create or delete the IAM policy.
+ choices: [ "present", "absent"]
+ default: present
+ type: str
+ skip_duplicates:
+ description:
+ - When I(skip_duplicates=true) the module looks for any policies that match the document you pass in. If there is a match it will not make
+ a new policy object with the same rules.
+ - The current default is C(true). However, this behavior can be confusing and as such the default will change to C(false) in 2.14. To maintain
+ the existing behavior explicitly set I(skip_duplicates=true).
+ type: bool
+
+author:
+ - "Jonathan I. Davila (@defionscode)"
+ - "Dennis Podkovyrin (@sbj-ss)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Create a policy with the name of 'Admin' to the group 'administrators'
+- name: Assign a policy called Admin to the administrators group
+ community.aws.iam_policy:
+ iam_type: group
+ iam_name: administrators
+ policy_name: Admin
+ state: present
+ policy_document: admin_policy.json
+
+# Advanced example, create two new groups and add a READ-ONLY policy to both
+# groups.
+- name: Create Two Groups, Mario and Luigi
+ community.aws.iam:
+ iam_type: group
+ name: "{{ item }}"
+ state: present
+ loop:
+ - Mario
+ - Luigi
+ register: new_groups
+
+- name: Apply READ-ONLY policy to new groups that have been recently created
+ community.aws.iam_policy:
+ iam_type: group
+ iam_name: "{{ item.created_group.group_name }}"
+ policy_name: "READ-ONLY"
+ policy_document: readonlypolicy.json
+ state: present
+ loop: "{{ new_groups.results }}"
+
+# Create a new S3 policy with prefix per user
+- name: Create S3 policy from template
+ community.aws.iam_policy:
+ iam_type: user
+ iam_name: "{{ item.user }}"
+ policy_name: "s3_limited_access_{{ item.prefix }}"
+ state: present
+ policy_json: " {{ lookup( 'template', 's3_policy.json.j2') }} "
+ loop:
+ - user: s3_user
+ prefix: s3_user_prefix
+
+'''
+import json
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies, AWSRetry
+from ansible.module_utils.six import string_types
+
+
+class PolicyError(Exception):
+ pass
+
+
+class Policy:
+
+ def __init__(self, client, name, policy_name, policy_document, policy_json, skip_duplicates, state, check_mode):
+ self.client = client
+ self.name = name
+ self.policy_name = policy_name
+ self.policy_document = policy_document
+ self.policy_json = policy_json
+ self.skip_duplicates = skip_duplicates
+ self.state = state
+ self.check_mode = check_mode
+ self.changed = False
+
+ @staticmethod
+ def _iam_type():
+ return ''
+
+ def _list(self, name):
+ return {}
+
+ def list(self):
+ return self._list(self.name).get('PolicyNames', [])
+
+ def _get(self, name, policy_name):
+ return '{}'
+
+ def get(self, policy_name):
+ return self._get(self.name, policy_name)['PolicyDocument']
+
+ def _put(self, name, policy_name, policy_doc):
+ pass
+
+ def put(self, policy_doc):
+ if not self.check_mode:
+ self._put(self.name, self.policy_name, json.dumps(policy_doc, sort_keys=True))
+ self.changed = True
+
+ def _delete(self, name, policy_name):
+ pass
+
+ def delete(self):
+ if self.policy_name not in self.list():
+ self.changed = False
+ return
+
+ self.changed = True
+ if not self.check_mode:
+ self._delete(self.name, self.policy_name)
+
+ def get_policy_text(self):
+ try:
+ if self.policy_document is not None:
+ return self.get_policy_from_document()
+ if self.policy_json is not None:
+ return self.get_policy_from_json()
+ except json.JSONDecodeError as e:
+ raise PolicyError('Failed to decode the policy as valid JSON: %s' % str(e))
+ return None
+
+ def get_policy_from_document(self):
+ try:
+ with open(self.policy_document, 'r') as json_data:
+ pdoc = json.load(json_data)
+ json_data.close()
+ except IOError as e:
+ if e.errno == 2:
+ raise PolicyError('policy_document {0:!r} does not exist'.format(self.policy_document))
+ raise
+ return pdoc
+
+ def get_policy_from_json(self):
+ if isinstance(self.policy_json, string_types):
+ pdoc = json.loads(self.policy_json)
+ else:
+ pdoc = self.policy_json
+ return pdoc
+
+ def create(self):
+ matching_policies = []
+ policy_doc = self.get_policy_text()
+ policy_match = False
+ for pol in self.list():
+ if not compare_policies(self.get(pol), policy_doc):
+ matching_policies.append(pol)
+ policy_match = True
+
+ if (self.policy_name not in matching_policies) and not (self.skip_duplicates and policy_match):
+ self.put(policy_doc)
+
+ def run(self):
+ if self.state == 'present':
+ self.create()
+ elif self.state == 'absent':
+ self.delete()
+ return {
+ 'changed': self.changed,
+ self._iam_type() + '_name': self.name,
+ 'policies': self.list()
+ }
+
+
+class UserPolicy(Policy):
+
+ @staticmethod
+ def _iam_type():
+ return 'user'
+
+ def _list(self, name):
+ return self.client.list_user_policies(aws_retry=True, UserName=name)
+
+ def _get(self, name, policy_name):
+ return self.client.get_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name)
+
+ def _put(self, name, policy_name, policy_doc):
+ return self.client.put_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name, PolicyDocument=policy_doc)
+
+ def _delete(self, name, policy_name):
+ return self.client.delete_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name)
+
+
+class RolePolicy(Policy):
+
+ @staticmethod
+ def _iam_type():
+ return 'role'
+
+ def _list(self, name):
+ return self.client.list_role_policies(aws_retry=True, RoleName=name)
+
+ def _get(self, name, policy_name):
+ return self.client.get_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name)
+
+ def _put(self, name, policy_name, policy_doc):
+ return self.client.put_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name, PolicyDocument=policy_doc)
+
+ def _delete(self, name, policy_name):
+ return self.client.delete_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name)
+
+
+class GroupPolicy(Policy):
+
+ @staticmethod
+ def _iam_type():
+ return 'group'
+
+ def _list(self, name):
+ return self.client.list_group_policies(aws_retry=True, GroupName=name)
+
+ def _get(self, name, policy_name):
+ return self.client.get_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name)
+
+ def _put(self, name, policy_name, policy_doc):
+ return self.client.put_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name, PolicyDocument=policy_doc)
+
+ def _delete(self, name, policy_name):
+ return self.client.delete_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name)
+
+
+def main():
+ argument_spec = dict(
+ iam_type=dict(required=True, choices=['user', 'group', 'role']),
+ state=dict(default='present', choices=['present', 'absent']),
+ iam_name=dict(required=True),
+ policy_name=dict(required=True),
+ policy_document=dict(default=None, required=False),
+ policy_json=dict(type='json', default=None, required=False),
+ skip_duplicates=dict(type='bool', default=None, required=False)
+ )
+ mutually_exclusive = [['policy_document', 'policy_json']]
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive, supports_check_mode=True)
+
+ skip_duplicates = module.params.get('skip_duplicates')
+
+ if (skip_duplicates is None):
+ module.deprecate('The skip_duplicates behaviour has caused confusion and'
+ ' will be disabled by default in Ansible 2.14',
+ date='2022-06-01', collection_name='community.aws')
+ skip_duplicates = True
+
+ if module.params.get('policy_document'):
+ module.deprecate('The policy_document option has been deprecated and'
+ ' will be removed in Ansible 2.14',
+ date='2022-06-01', collection_name='community.aws')
+
+ args = dict(
+ client=module.client('iam', retry_decorator=AWSRetry.jittered_backoff()),
+ name=module.params.get('iam_name'),
+ policy_name=module.params.get('policy_name'),
+ policy_document=module.params.get('policy_document'),
+ policy_json=module.params.get('policy_json'),
+ skip_duplicates=skip_duplicates,
+ state=module.params.get('state'),
+ check_mode=module.check_mode,
+ )
+ iam_type = module.params.get('iam_type')
+
+ try:
+ if iam_type == 'user':
+ policy = UserPolicy(**args)
+ elif iam_type == 'role':
+ policy = RolePolicy(**args)
+ elif iam_type == 'group':
+ policy = GroupPolicy(**args)
+
+ module.exit_json(**(policy.run()))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e)
+ except PolicyError as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_policy_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_policy_info.py
new file mode 100644
index 00000000..c919caec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_policy_info.py
@@ -0,0 +1,217 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: iam_policy_info
+version_added: 1.0.0
+short_description: Retrieve inline IAM policies for users, groups, and roles
+description:
+ - Supports fetching of inline IAM policies for IAM users, groups and roles.
+options:
+ iam_type:
+ description:
+ - Type of IAM resource you wish to retrieve inline policies for.
+ required: yes
+ choices: [ "user", "group", "role"]
+ type: str
+ iam_name:
+ description:
+ - Name of IAM resource you wish to retrieve inline policies for. In other words, the user name, group name or role name.
+ required: yes
+ type: str
+ policy_name:
+ description:
+ - Name of a specific IAM inline policy you with to retrieve.
+ required: no
+ type: str
+
+author:
+ - Mark Chappell (@tremble)
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: Describe all inline IAM policies on an IAM User
+ community.aws.iam_policy_info:
+ iam_type: user
+ iam_name: example_user
+
+- name: Describe a specific inline policy on an IAM Role
+ community.aws.iam_policy_info:
+ iam_type: role
+ iam_name: example_role
+ policy_name: example_policy
+
+'''
+RETURN = '''
+policies:
+ description: A list containing the matching IAM inline policy names and their data
+ returned: success
+ type: complex
+ contains:
+ policy_name:
+ description: The Name of the inline policy
+ returned: success
+ type: str
+ policy_document:
+ description: The JSON document representing the inline IAM policy
+ returned: success
+ type: list
+policy_names:
+ description: A list of matching names of the IAM inline policies on the queried object
+ returned: success
+ type: list
+all_policy_names:
+ description: A list of names of all of the IAM inline policies on the queried object
+ returned: success
+ type: list
+'''
+
+import json
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible.module_utils.six import string_types
+
+
+class PolicyError(Exception):
+ pass
+
+
+class Policy:
+
+ def __init__(self, client, name, policy_name):
+ self.client = client
+ self.name = name
+ self.policy_name = policy_name
+ self.changed = False
+
+ @staticmethod
+ def _iam_type():
+ return ''
+
+ def _list(self, name):
+ return {}
+
+ def list(self):
+ return self._list(self.name).get('PolicyNames', [])
+
+ def _get(self, name, policy_name):
+ return '{}'
+
+ def get(self, policy_name):
+ return self._get(self.name, policy_name)['PolicyDocument']
+
+ def get_all(self):
+ policies = list()
+ for policy in self.list():
+ policies.append({"policy_name": policy, "policy_document": self.get(policy)})
+ return policies
+
+ def run(self):
+ policy_list = self.list()
+ ret_val = {
+ 'changed': False,
+ self._iam_type() + '_name': self.name,
+ 'all_policy_names': policy_list
+ }
+ if self.policy_name is None:
+ ret_val.update(policies=self.get_all())
+ ret_val.update(policy_names=policy_list)
+ elif self.policy_name in policy_list:
+ ret_val.update(policies=[{
+ "policy_name": self.policy_name,
+ "policy_document": self.get(self.policy_name)}])
+ ret_val.update(policy_names=[self.policy_name])
+ return ret_val
+
+
+class UserPolicy(Policy):
+
+ @staticmethod
+ def _iam_type():
+ return 'user'
+
+ def _list(self, name):
+ return self.client.list_user_policies(aws_retry=True, UserName=name)
+
+ def _get(self, name, policy_name):
+ return self.client.get_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name)
+
+
+class RolePolicy(Policy):
+
+ @staticmethod
+ def _iam_type():
+ return 'role'
+
+ def _list(self, name):
+ return self.client.list_role_policies(aws_retry=True, RoleName=name)
+
+ def _get(self, name, policy_name):
+ return self.client.get_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name)
+
+
+class GroupPolicy(Policy):
+
+ @staticmethod
+ def _iam_type():
+ return 'group'
+
+ def _list(self, name):
+ return self.client.list_group_policies(aws_retry=True, GroupName=name)
+
+ def _get(self, name, policy_name):
+ return self.client.get_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name)
+
+
+def main():
+ argument_spec = dict(
+ iam_type=dict(required=True, choices=['user', 'group', 'role']),
+ iam_name=dict(required=True),
+ policy_name=dict(default=None, required=False),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ args = dict(
+ client=module.client('iam', retry_decorator=AWSRetry.jittered_backoff()),
+ name=module.params.get('iam_name'),
+ policy_name=module.params.get('policy_name'),
+ )
+ iam_type = module.params.get('iam_type')
+
+ try:
+ if iam_type == 'user':
+ policy = UserPolicy(**args)
+ elif iam_type == 'role':
+ policy = RolePolicy(**args)
+ elif iam_type == 'group':
+ policy = GroupPolicy(**args)
+
+ module.exit_json(**(policy.run()))
+ except (BotoCoreError, ClientError) as e:
+ if e.response['Error']['Code'] == 'NoSuchEntity':
+ module.exit_json(changed=False, msg=e.response['Error']['Message'])
+ module.fail_json_aws(e)
+ except PolicyError as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_role.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_role.py
new file mode 100644
index 00000000..9a2eaca8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_role.py
@@ -0,0 +1,667 @@
+#!/usr/bin/python
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: iam_role
+version_added: 1.0.0
+short_description: Manage AWS IAM roles
+description:
+ - Manage AWS IAM roles.
+author: "Rob White (@wimnat)"
+options:
+ path:
+ description:
+ - The path to the role. For more information about paths, see U(https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html).
+ default: "/"
+ type: str
+ name:
+ description:
+ - The name of the role to create.
+ required: true
+ type: str
+ description:
+ description:
+ - Provides a description of the role.
+ type: str
+ boundary:
+ description:
+ - The ARN of an IAM managed policy to use to restrict the permissions this role can pass on to IAM roles/users that it creates.
+ - Boundaries cannot be set on Instance Profiles, as such if this option is specified then I(create_instance_profile) must be C(false).
+ - This is intended for roles/users that have permissions to create new IAM objects.
+ - For more information on boundaries, see U(https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html).
+ - Requires botocore 1.10.57 or above.
+ aliases: [boundary_policy_arn]
+ type: str
+ assume_role_policy_document:
+ description:
+ - The trust relationship policy document that grants an entity permission to assume the role.
+ - This parameter is required when I(state=present).
+ type: json
+ managed_policies:
+ description:
+ - A list of managed policy ARNs or, since Ansible 2.4, a list of either managed policy ARNs or friendly names.
+ - To remove all policies set I(purge_polices=true) and I(managed_policies=[None]).
+ - To embed an inline policy, use M(community.aws.iam_policy).
+ aliases: ['managed_policy']
+ type: list
+ elements: str
+ max_session_duration:
+ description:
+ - The maximum duration (in seconds) of a session when assuming the role.
+ - Valid values are between 1 and 12 hours (3600 and 43200 seconds).
+ type: int
+ purge_policies:
+ description:
+ - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched.
+ - By default I(purge_policies=true). In Ansible 2.14 this will be changed to I(purge_policies=false).
+ type: bool
+ aliases: ['purge_policy', 'purge_managed_policies']
+ state:
+ description:
+ - Create or remove the IAM role.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ create_instance_profile:
+ description:
+ - Creates an IAM instance profile along with the role.
+ default: true
+ type: bool
+ delete_instance_profile:
+ description:
+ - When I(delete_instance_profile=true) and I(state=absent) deleting a role will also delete the instance
+ profile created with the same I(name) as the role.
+ - Only applies when I(state=absent).
+ default: false
+ type: bool
+ tags:
+ description:
+ - Tag dict to apply to the queue.
+ - Requires botocore 1.12.46 or above.
+ type: dict
+ purge_tags:
+ description:
+ - Remove tags not listed in I(tags) when tags is specified.
+ default: true
+ type: bool
+requirements: [ botocore, boto3 ]
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create a role with description and tags
+ community.aws.iam_role:
+ name: mynewrole
+ assume_role_policy_document: "{{ lookup('file','policy.json') }}"
+ description: This is My New Role
+ tags:
+ env: dev
+
+- name: "Create a role and attach a managed policy called 'PowerUserAccess'"
+ community.aws.iam_role:
+ name: mynewrole
+ assume_role_policy_document: "{{ lookup('file','policy.json') }}"
+ managed_policies:
+ - arn:aws:iam::aws:policy/PowerUserAccess
+
+- name: Keep the role created above but remove all managed policies
+ community.aws.iam_role:
+ name: mynewrole
+ assume_role_policy_document: "{{ lookup('file','policy.json') }}"
+ managed_policies: []
+
+- name: Delete the role
+ community.aws.iam_role:
+ name: mynewrole
+ assume_role_policy_document: "{{ lookup('file', 'policy.json') }}"
+ state: absent
+
+'''
+RETURN = r'''
+iam_role:
+ description: dictionary containing the IAM Role data
+ returned: success
+ type: complex
+ contains:
+ path:
+ description: the path to the role
+ type: str
+ returned: always
+ sample: /
+ role_name:
+ description: the friendly name that identifies the role
+ type: str
+ returned: always
+ sample: myrole
+ role_id:
+ description: the stable and unique string identifying the role
+ type: str
+ returned: always
+ sample: ABCDEFF4EZ4ABCDEFV4ZC
+ arn:
+ description: the Amazon Resource Name (ARN) specifying the role
+ type: str
+ returned: always
+ sample: "arn:aws:iam::1234567890:role/mynewrole"
+ create_date:
+ description: the date and time, in ISO 8601 date-time format, when the role was created
+ type: str
+ returned: always
+ sample: "2016-08-14T04:36:28+00:00"
+ assume_role_policy_document:
+ description: the policy that grants an entity permission to assume the role
+ type: str
+ returned: always
+ sample: {
+ 'statement': [
+ {
+ 'action': 'sts:AssumeRole',
+ 'effect': 'Allow',
+ 'principal': {
+ 'service': 'ec2.amazonaws.com'
+ },
+ 'sid': ''
+ }
+ ],
+ 'version': '2012-10-17'
+ }
+ attached_policies:
+ description: a list of dicts containing the name and ARN of the managed IAM policies attached to the role
+ type: list
+ returned: always
+ sample: [
+ {
+ 'policy_arn': 'arn:aws:iam::aws:policy/PowerUserAccess',
+ 'policy_name': 'PowerUserAccess'
+ }
+ ]
+ tags:
+ description: role tags
+ type: dict
+ returned: always
+ sample: '{"Env": "Prod"}'
+'''
+
+import json
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, compare_policies
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AWSRetry,
+ ansible_dict_to_boto3_tag_list,
+ boto3_tag_list_to_ansible_dict,
+ compare_aws_tags,
+ )
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def compare_assume_role_policy_doc(current_policy_doc, new_policy_doc):
+ if not compare_policies(current_policy_doc, json.loads(new_policy_doc)):
+ return True
+ else:
+ return False
+
+
+@AWSRetry.jittered_backoff()
+def _list_policies(connection):
+ paginator = connection.get_paginator('list_policies')
+ return paginator.paginate().build_full_result()['Policies']
+
+
+def convert_friendly_names_to_arns(connection, module, policy_names):
+ if not any([not policy.startswith('arn:') for policy in policy_names]):
+ return policy_names
+ allpolicies = {}
+ policies = _list_policies(connection)
+
+ for policy in policies:
+ allpolicies[policy['PolicyName']] = policy['Arn']
+ allpolicies[policy['Arn']] = policy['Arn']
+ try:
+ return [allpolicies[policy] for policy in policy_names]
+ except KeyError as e:
+ module.fail_json_aws(e, msg="Couldn't find policy")
+
+
+def attach_policies(connection, module, policies_to_attach, params):
+ changed = False
+ for policy_arn in policies_to_attach:
+ try:
+ if not module.check_mode:
+ connection.attach_role_policy(RoleName=params['RoleName'], PolicyArn=policy_arn, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to attach policy {0} to role {1}".format(policy_arn, params['RoleName']))
+ changed = True
+ return changed
+
+
+def remove_policies(connection, module, policies_to_remove, params):
+ changed = False
+ for policy in policies_to_remove:
+ try:
+ if not module.check_mode:
+ connection.detach_role_policy(RoleName=params['RoleName'], PolicyArn=policy, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to detach policy {0} from {1}".format(policy, params['RoleName']))
+ changed = True
+ return changed
+
+
+def generate_create_params(module):
+ params = dict()
+ params['Path'] = module.params.get('path')
+ params['RoleName'] = module.params.get('name')
+ params['AssumeRolePolicyDocument'] = module.params.get('assume_role_policy_document')
+ if module.params.get('description') is not None:
+ params['Description'] = module.params.get('description')
+ if module.params.get('max_session_duration') is not None:
+ params['MaxSessionDuration'] = module.params.get('max_session_duration')
+ if module.params.get('boundary') is not None:
+ params['PermissionsBoundary'] = module.params.get('boundary')
+ if module.params.get('tags') is not None:
+ params['Tags'] = ansible_dict_to_boto3_tag_list(module.params.get('tags'))
+
+ return params
+
+
+def create_basic_role(connection, module, params):
+ """
+ Perform the Role creation.
+ Assumes tests for the role existing have already been performed.
+ """
+
+ try:
+ if not module.check_mode:
+ role = connection.create_role(aws_retry=True, **params)
+ # 'Description' is documented as key of the role returned by create_role
+ # but appears to be an AWS bug (the value is not returned using the AWS CLI either).
+ # Get the role after creating it.
+ role = get_role_with_backoff(connection, module, params['RoleName'])
+ else:
+ role = {'MadeInCheckMode': True}
+ role['AssumeRolePolicyDocument'] = json.loads(params['AssumeRolePolicyDocument'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to create role")
+
+ return role
+
+
+def update_role_assumed_policy(connection, module, params, role):
+ # Check Assumed Policy document
+ if compare_assume_role_policy_doc(role['AssumeRolePolicyDocument'], params['AssumeRolePolicyDocument']):
+ return False
+
+ if module.check_mode:
+ return True
+
+ try:
+ connection.update_assume_role_policy(
+ RoleName=params['RoleName'],
+ PolicyDocument=json.dumps(json.loads(params['AssumeRolePolicyDocument'])),
+ aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to update assume role policy for role {0}".format(params['RoleName']))
+ return True
+
+
+def update_role_description(connection, module, params, role):
+ # Check Description update
+ if params.get('Description') is None:
+ return False
+ if role.get('Description') == params['Description']:
+ return False
+
+ if module.check_mode:
+ return True
+
+ try:
+ connection.update_role_description(RoleName=params['RoleName'], Description=params['Description'], aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to update description for role {0}".format(params['RoleName']))
+ return True
+
+
+def update_role_max_session_duration(connection, module, params, role):
+ # Check MaxSessionDuration update
+ if params.get('MaxSessionDuration') is None:
+ return False
+ if role.get('MaxSessionDuration') == params['MaxSessionDuration']:
+ return False
+
+ if module.check_mode:
+ return True
+
+ try:
+ connection.update_role(RoleName=params['RoleName'], MaxSessionDuration=params['MaxSessionDuration'], aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to update maximum session duration for role {0}".format(params['RoleName']))
+ return True
+
+
+def update_role_permissions_boundary(connection, module, params, role):
+ # Check PermissionsBoundary
+ if params.get('PermissionsBoundary') is None:
+ return False
+ if params.get('PermissionsBoundary') == role.get('PermissionsBoundary', {}).get('PermissionsBoundaryArn', ''):
+ return False
+
+ if module.check_mode:
+ return True
+
+ if params.get('PermissionsBoundary') == '':
+ try:
+ connection.delete_role_permissions_boundary(RoleName=params['RoleName'], aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to remove permission boundary for role {0}".format(params['RoleName']))
+ else:
+ try:
+ connection.put_role_permissions_boundary(RoleName=params['RoleName'], PermissionsBoundary=params['PermissionsBoundary'], aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to update permission boundary for role {0}".format(params['RoleName']))
+ return True
+
+
+def update_managed_policies(connection, module, params, role, managed_policies, purge_policies):
+ # Check Managed Policies
+ if managed_policies is None:
+ return False
+
+ # If we're manipulating a fake role
+ if role.get('MadeInCheckMode', False):
+ role['AttachedPolicies'] = list(map(lambda x: {'PolicyArn': x, 'PolicyName': x.split(':')[5]}, managed_policies))
+ return True
+
+ # Get list of current attached managed policies
+ current_attached_policies = get_attached_policy_list(connection, module, params['RoleName'])
+ current_attached_policies_arn_list = [policy['PolicyArn'] for policy in current_attached_policies]
+
+ if len(managed_policies) == 1 and managed_policies[0] is None:
+ managed_policies = []
+
+ policies_to_remove = set(current_attached_policies_arn_list) - set(managed_policies)
+ policies_to_attach = set(managed_policies) - set(current_attached_policies_arn_list)
+
+ changed = False
+
+ if purge_policies:
+ changed |= remove_policies(connection, module, policies_to_remove, params)
+
+ changed |= attach_policies(connection, module, policies_to_attach, params)
+
+ return changed
+
+
+def create_or_update_role(connection, module):
+
+ params = generate_create_params(module)
+ role_name = params['RoleName']
+ create_instance_profile = module.params.get('create_instance_profile')
+ purge_policies = module.params.get('purge_policies')
+ if purge_policies is None:
+ purge_policies = True
+ managed_policies = module.params.get('managed_policies')
+ if managed_policies:
+ # Attempt to list the policies early so we don't leave things behind if we can't find them.
+ managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies)
+
+ changed = False
+
+ # Get role
+ role = get_role(connection, module, role_name)
+
+ # If role is None, create it
+ if role is None:
+ role = create_basic_role(connection, module, params)
+ changed = True
+ else:
+ changed |= update_role_tags(connection, module, params, role)
+ changed |= update_role_assumed_policy(connection, module, params, role)
+ changed |= update_role_description(connection, module, params, role)
+ changed |= update_role_max_session_duration(connection, module, params, role)
+ changed |= update_role_permissions_boundary(connection, module, params, role)
+
+ if create_instance_profile:
+ changed |= create_instance_profiles(connection, module, params, role)
+
+ changed |= update_managed_policies(connection, module, params, role, managed_policies, purge_policies)
+
+ # Get the role again
+ if not role.get('MadeInCheckMode', False):
+ role = get_role(connection, module, params['RoleName'])
+ role['AttachedPolicies'] = get_attached_policy_list(connection, module, params['RoleName'])
+ role['tags'] = get_role_tags(connection, module)
+
+ module.exit_json(
+ changed=changed, iam_role=camel_dict_to_snake_dict(role, ignore_list=['tags']),
+ **camel_dict_to_snake_dict(role, ignore_list=['tags']))
+
+
+def create_instance_profiles(connection, module, params, role):
+
+ if role.get('MadeInCheckMode', False):
+ return False
+
+ # Fetch existing Profiles
+ try:
+ instance_profiles = connection.list_instance_profiles_for_role(RoleName=params['RoleName'], aws_retry=True)['InstanceProfiles']
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(params['RoleName']))
+
+ # Profile already exists
+ if any(p['InstanceProfileName'] == params['RoleName'] for p in instance_profiles):
+ return False
+
+ if module.check_mode:
+ return True
+
+ # Make sure an instance profile is created
+ try:
+ connection.create_instance_profile(InstanceProfileName=params['RoleName'], Path=params['Path'], aws_retry=True)
+ except ClientError as e:
+ # If the profile already exists, no problem, move on.
+ # Implies someone's changing things at the same time...
+ if e.response['Error']['Code'] == 'EntityAlreadyExists':
+ return False
+ else:
+ module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(params['RoleName']))
+ except BotoCoreError as e:
+ module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(params['RoleName']))
+
+ # And attach the role to the profile
+ try:
+ connection.add_role_to_instance_profile(InstanceProfileName=params['RoleName'], RoleName=params['RoleName'], aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to attach role {0} to instance profile {0}".format(params['RoleName']))
+
+ return True
+
+
+def remove_instance_profiles(connection, module, role_params, role):
+ role_name = module.params.get('name')
+ delete_profiles = module.params.get("delete_instance_profile")
+
+ try:
+ instance_profiles = connection.list_instance_profiles_for_role(aws_retry=True, **role_params)['InstanceProfiles']
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name))
+
+ # Remove the role from the instance profile(s)
+ for profile in instance_profiles:
+ profile_name = profile['InstanceProfileName']
+ try:
+ if not module.check_mode:
+ connection.remove_role_from_instance_profile(aws_retry=True, InstanceProfileName=profile_name, **role_params)
+ if profile_name == role_name:
+ if delete_profiles:
+ try:
+ connection.delete_instance_profile(InstanceProfileName=profile_name, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to remove instance profile {0}".format(profile_name))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to remove role {0} from instance profile {1}".format(role_name, profile_name))
+
+
+def destroy_role(connection, module):
+
+ role_name = module.params.get('name')
+ role = get_role(connection, module, role_name)
+ role_params = dict()
+ role_params['RoleName'] = role_name
+ boundary_params = dict(role_params)
+ boundary_params['PermissionsBoundary'] = ''
+
+ if role is None:
+ module.exit_json(changed=False)
+
+ # Before we try to delete the role we need to remove any
+ # - attached instance profiles
+ # - attached managed policies
+ # - permissions boundary
+ remove_instance_profiles(connection, module, role_params, role)
+ update_managed_policies(connection, module, role_params, role, [], True)
+ update_role_permissions_boundary(connection, module, boundary_params, role)
+
+ try:
+ if not module.check_mode:
+ connection.delete_role(aws_retry=True, **role_params)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to delete role")
+
+ module.exit_json(changed=True)
+
+
+def get_role_with_backoff(connection, module, name):
+ try:
+ return AWSRetry.jittered_backoff(catch_extra_error_codes=['NoSuchEntity'])(connection.get_role)(RoleName=name)['Role']
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to get role {0}".format(name))
+
+
+def get_role(connection, module, name):
+ try:
+ return connection.get_role(RoleName=name, aws_retry=True)['Role']
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchEntity':
+ return None
+ else:
+ module.fail_json_aws(e, msg="Unable to get role {0}".format(name))
+ except BotoCoreError as e:
+ module.fail_json_aws(e, msg="Unable to get role {0}".format(name))
+
+
+def get_attached_policy_list(connection, module, name):
+ try:
+ return connection.list_attached_role_policies(RoleName=name, aws_retry=True)['AttachedPolicies']
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name))
+
+
+def get_role_tags(connection, module):
+ role_name = module.params.get('name')
+ if not hasattr(connection, 'list_role_tags'):
+ return {}
+ try:
+ return boto3_tag_list_to_ansible_dict(connection.list_role_tags(RoleName=role_name, aws_retry=True)['Tags'])
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to list tags for role {0}".format(role_name))
+
+
+def update_role_tags(connection, module, params, role):
+ new_tags = params.get('Tags')
+ if new_tags is None:
+ return False
+ new_tags = boto3_tag_list_to_ansible_dict(new_tags)
+
+ role_name = module.params.get('name')
+ purge_tags = module.params.get('purge_tags')
+
+ try:
+ existing_tags = boto3_tag_list_to_ansible_dict(connection.list_role_tags(RoleName=role_name, aws_retry=True)['Tags'])
+ except (ClientError, KeyError):
+ existing_tags = {}
+
+ tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags)
+
+ if not module.check_mode:
+ try:
+ if tags_to_remove:
+ connection.untag_role(RoleName=role_name, TagKeys=tags_to_remove, aws_retry=True)
+ if tags_to_add:
+ connection.tag_role(RoleName=role_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add), aws_retry=True)
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Unable to set tags for role %s' % role_name)
+
+ changed = bool(tags_to_add) or bool(tags_to_remove)
+ return changed
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ path=dict(type='str', default="/"),
+ assume_role_policy_document=dict(type='json'),
+ managed_policies=dict(type='list', aliases=['managed_policy'], elements='str'),
+ max_session_duration=dict(type='int'),
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ description=dict(type='str'),
+ boundary=dict(type='str', aliases=['boundary_policy_arn']),
+ create_instance_profile=dict(type='bool', default=True),
+ delete_instance_profile=dict(type='bool', default=False),
+ purge_policies=dict(type='bool', aliases=['purge_policy', 'purge_managed_policies']),
+ tags=dict(type='dict'),
+ purge_tags=dict(type='bool', default=True),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[('state', 'present', ['assume_role_policy_document'])],
+ supports_check_mode=True)
+
+ if module.params.get('purge_policies') is None:
+ module.deprecate('In Ansible 2.14 the default value of purge_policies will change from true to false.'
+ ' To maintain the existing behaviour explicitly set purge_policies=true', date='2022-06-01', collection_name='community.aws')
+
+ if module.params.get('boundary'):
+ if module.params.get('create_instance_profile'):
+ module.fail_json(msg="When using a boundary policy, `create_instance_profile` must be set to `false`.")
+ if not module.params.get('boundary').startswith('arn:aws:iam'):
+ module.fail_json(msg="Boundary policy must be an ARN")
+ if module.params.get('tags') is not None and not module.botocore_at_least('1.12.46'):
+ module.fail_json(msg="When managing tags botocore must be at least v1.12.46. "
+ "Current versions: boto3-{boto3_version} botocore-{botocore_version}".format(**module._gather_versions()))
+ if module.params.get('boundary') is not None and not module.botocore_at_least('1.10.57'):
+ module.fail_json(msg="When using a boundary policy, botocore must be at least v1.10.57. "
+ "Current versions: boto3-{boto3_version} botocore-{botocore_version}".format(**module._gather_versions()))
+ if module.params.get('max_session_duration'):
+ max_session_duration = module.params.get('max_session_duration')
+ if max_session_duration < 3600 or max_session_duration > 43200:
+ module.fail_json(msg="max_session_duration must be between 1 and 12 hours (3600 and 43200 seconds)")
+ if module.params.get('path'):
+ path = module.params.get('path')
+ if not path.endswith('/') or not path.startswith('/'):
+ module.fail_json(msg="path must begin and end with /")
+
+ connection = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ create_or_update_role(connection, module)
+ else:
+ destroy_role(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_role_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_role_facts.py
new file mode 100644
index 00000000..95eabdb9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_role_facts.py
@@ -0,0 +1,254 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: iam_role_info
+version_added: 1.0.0
+short_description: Gather information on IAM roles
+description:
+ - Gathers information about IAM roles.
+ - This module was called C(iam_role_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+author:
+ - "Will Thames (@willthames)"
+options:
+ name:
+ description:
+ - Name of a role to search for.
+ - Mutually exclusive with I(path_prefix).
+ aliases:
+ - role_name
+ type: str
+ path_prefix:
+ description:
+ - Prefix of role to restrict IAM role search for.
+ - Mutually exclusive with I(name).
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: find all existing IAM roles
+ community.aws.iam_role_info:
+ register: result
+
+- name: describe a single role
+ community.aws.iam_role_info:
+ name: MyIAMRole
+
+- name: describe all roles matching a path prefix
+ community.aws.iam_role_info:
+ path_prefix: /application/path
+'''
+
+RETURN = '''
+iam_roles:
+ description: List of IAM roles
+ returned: always
+ type: complex
+ contains:
+ arn:
+ description: Amazon Resource Name for IAM role.
+ returned: always
+ type: str
+ sample: arn:aws:iam::123456789012:role/AnsibleTestRole
+ assume_role_policy_document:
+ description: Policy Document describing what can assume the role.
+ returned: always
+ type: str
+ create_date:
+ description: Date IAM role was created.
+ returned: always
+ type: str
+ sample: '2017-10-23T00:05:08+00:00'
+ inline_policies:
+ description: List of names of inline policies.
+ returned: always
+ type: list
+ sample: []
+ managed_policies:
+ description: List of attached managed policies.
+ returned: always
+ type: complex
+ contains:
+ policy_arn:
+ description: Amazon Resource Name for the policy.
+ returned: always
+ type: str
+ sample: arn:aws:iam::123456789012:policy/AnsibleTestEC2Policy
+ policy_name:
+ description: Name of managed policy.
+ returned: always
+ type: str
+ sample: AnsibleTestEC2Policy
+ instance_profiles:
+ description: List of attached instance profiles.
+ returned: always
+ type: complex
+ contains:
+ arn:
+ description: Amazon Resource Name for the instance profile.
+ returned: always
+ type: str
+ sample: arn:aws:iam::123456789012:instance-profile/AnsibleTestEC2Policy
+ create_date:
+ description: Date instance profile was created.
+ returned: always
+ type: str
+ sample: '2017-10-23T00:05:08+00:00'
+ instance_profile_id:
+ description: Amazon Identifier for the instance profile.
+ returned: always
+ type: str
+ sample: AROAII7ABCD123456EFGH
+ instance_profile_name:
+ description: Name of instance profile.
+ returned: always
+ type: str
+ sample: AnsibleTestEC2Policy
+ path:
+ description: Path of instance profile.
+ returned: always
+ type: str
+ sample: /
+ roles:
+ description: List of roles associated with this instance profile.
+ returned: always
+ type: list
+ sample: []
+ path:
+ description: Path of role.
+ returned: always
+ type: str
+ sample: /
+ role_id:
+ description: Amazon Identifier for the role.
+ returned: always
+ type: str
+ sample: AROAII7ABCD123456EFGH
+ role_name:
+ description: Name of the role.
+ returned: always
+ type: str
+ sample: AnsibleTestRole
+ tags:
+ description: Role tags.
+ type: dict
+ returned: always
+ sample: '{"Env": "Prod"}'
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, AWSRetry
+
+
+@AWSRetry.exponential_backoff()
+def list_iam_roles_with_backoff(client, **kwargs):
+ paginator = client.get_paginator('list_roles')
+ return paginator.paginate(**kwargs).build_full_result()
+
+
+@AWSRetry.exponential_backoff()
+def list_iam_role_policies_with_backoff(client, role_name):
+ paginator = client.get_paginator('list_role_policies')
+ return paginator.paginate(RoleName=role_name).build_full_result()['PolicyNames']
+
+
+@AWSRetry.exponential_backoff()
+def list_iam_attached_role_policies_with_backoff(client, role_name):
+ paginator = client.get_paginator('list_attached_role_policies')
+ return paginator.paginate(RoleName=role_name).build_full_result()['AttachedPolicies']
+
+
+@AWSRetry.exponential_backoff()
+def list_iam_instance_profiles_for_role_with_backoff(client, role_name):
+ paginator = client.get_paginator('list_instance_profiles_for_role')
+ return paginator.paginate(RoleName=role_name).build_full_result()['InstanceProfiles']
+
+
+def describe_iam_role(module, client, role):
+ name = role['RoleName']
+ try:
+ role['InlinePolicies'] = list_iam_role_policies_with_backoff(client, name)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get inline policies for role %s" % name)
+ try:
+ role['ManagedPolicies'] = list_iam_attached_role_policies_with_backoff(client, name)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get managed policies for role %s" % name)
+ try:
+ role['InstanceProfiles'] = list_iam_instance_profiles_for_role_with_backoff(client, name)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get instance profiles for role %s" % name)
+ try:
+ role['tags'] = boto3_tag_list_to_ansible_dict(role['Tags'])
+ del role['Tags']
+ except KeyError:
+ role['tags'] = {}
+ return role
+
+
+def describe_iam_roles(module, client):
+ name = module.params['name']
+ path_prefix = module.params['path_prefix']
+ if name:
+ try:
+ roles = [client.get_role(RoleName=name)['Role']]
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchEntity':
+ return []
+ else:
+ module.fail_json_aws(e, msg="Couldn't get IAM role %s" % name)
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Couldn't get IAM role %s" % name)
+ else:
+ params = dict()
+ if path_prefix:
+ if not path_prefix.startswith('/'):
+ path_prefix = '/' + path_prefix
+ if not path_prefix.endswith('/'):
+ path_prefix = path_prefix + '/'
+ params['PathPrefix'] = path_prefix
+ try:
+ roles = list_iam_roles_with_backoff(client, **params)['Roles']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't list IAM roles")
+ return [camel_dict_to_snake_dict(describe_iam_role(module, client, role), ignore_list=['tags']) for role in roles]
+
+
+def main():
+ """
+ Module action handler
+ """
+ argument_spec = dict(
+ name=dict(aliases=['role_name']),
+ path_prefix=dict(),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['name', 'path_prefix']])
+ if module._name == 'iam_role_facts':
+ module.deprecate("The 'iam_role_facts' module has been renamed to 'iam_role_info'", date='2021-12-01', collection_name='community.aws')
+
+ client = module.client('iam')
+
+ module.exit_json(changed=False, iam_roles=describe_iam_roles(module, client))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_role_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_role_info.py
new file mode 100644
index 00000000..95eabdb9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_role_info.py
@@ -0,0 +1,254 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: iam_role_info
+version_added: 1.0.0
+short_description: Gather information on IAM roles
+description:
+ - Gathers information about IAM roles.
+ - This module was called C(iam_role_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+author:
+ - "Will Thames (@willthames)"
+options:
+ name:
+ description:
+ - Name of a role to search for.
+ - Mutually exclusive with I(path_prefix).
+ aliases:
+ - role_name
+ type: str
+ path_prefix:
+ description:
+ - Prefix of role to restrict IAM role search for.
+ - Mutually exclusive with I(name).
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: find all existing IAM roles
+ community.aws.iam_role_info:
+ register: result
+
+- name: describe a single role
+ community.aws.iam_role_info:
+ name: MyIAMRole
+
+- name: describe all roles matching a path prefix
+ community.aws.iam_role_info:
+ path_prefix: /application/path
+'''
+
+RETURN = '''
+iam_roles:
+ description: List of IAM roles
+ returned: always
+ type: complex
+ contains:
+ arn:
+ description: Amazon Resource Name for IAM role.
+ returned: always
+ type: str
+ sample: arn:aws:iam::123456789012:role/AnsibleTestRole
+ assume_role_policy_document:
+ description: Policy Document describing what can assume the role.
+ returned: always
+ type: str
+ create_date:
+ description: Date IAM role was created.
+ returned: always
+ type: str
+ sample: '2017-10-23T00:05:08+00:00'
+ inline_policies:
+ description: List of names of inline policies.
+ returned: always
+ type: list
+ sample: []
+ managed_policies:
+ description: List of attached managed policies.
+ returned: always
+ type: complex
+ contains:
+ policy_arn:
+ description: Amazon Resource Name for the policy.
+ returned: always
+ type: str
+ sample: arn:aws:iam::123456789012:policy/AnsibleTestEC2Policy
+ policy_name:
+ description: Name of managed policy.
+ returned: always
+ type: str
+ sample: AnsibleTestEC2Policy
+ instance_profiles:
+ description: List of attached instance profiles.
+ returned: always
+ type: complex
+ contains:
+ arn:
+ description: Amazon Resource Name for the instance profile.
+ returned: always
+ type: str
+ sample: arn:aws:iam::123456789012:instance-profile/AnsibleTestEC2Policy
+ create_date:
+ description: Date instance profile was created.
+ returned: always
+ type: str
+ sample: '2017-10-23T00:05:08+00:00'
+ instance_profile_id:
+ description: Amazon Identifier for the instance profile.
+ returned: always
+ type: str
+ sample: AROAII7ABCD123456EFGH
+ instance_profile_name:
+ description: Name of instance profile.
+ returned: always
+ type: str
+ sample: AnsibleTestEC2Policy
+ path:
+ description: Path of instance profile.
+ returned: always
+ type: str
+ sample: /
+ roles:
+ description: List of roles associated with this instance profile.
+ returned: always
+ type: list
+ sample: []
+ path:
+ description: Path of role.
+ returned: always
+ type: str
+ sample: /
+ role_id:
+ description: Amazon Identifier for the role.
+ returned: always
+ type: str
+ sample: AROAII7ABCD123456EFGH
+ role_name:
+ description: Name of the role.
+ returned: always
+ type: str
+ sample: AnsibleTestRole
+ tags:
+ description: Role tags.
+ type: dict
+ returned: always
+ sample: '{"Env": "Prod"}'
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, AWSRetry
+
+
+@AWSRetry.exponential_backoff()
+def list_iam_roles_with_backoff(client, **kwargs):
+ paginator = client.get_paginator('list_roles')
+ return paginator.paginate(**kwargs).build_full_result()
+
+
+@AWSRetry.exponential_backoff()
+def list_iam_role_policies_with_backoff(client, role_name):
+ paginator = client.get_paginator('list_role_policies')
+ return paginator.paginate(RoleName=role_name).build_full_result()['PolicyNames']
+
+
+@AWSRetry.exponential_backoff()
+def list_iam_attached_role_policies_with_backoff(client, role_name):
+ paginator = client.get_paginator('list_attached_role_policies')
+ return paginator.paginate(RoleName=role_name).build_full_result()['AttachedPolicies']
+
+
+@AWSRetry.exponential_backoff()
+def list_iam_instance_profiles_for_role_with_backoff(client, role_name):
+ paginator = client.get_paginator('list_instance_profiles_for_role')
+ return paginator.paginate(RoleName=role_name).build_full_result()['InstanceProfiles']
+
+
+def describe_iam_role(module, client, role):
+ name = role['RoleName']
+ try:
+ role['InlinePolicies'] = list_iam_role_policies_with_backoff(client, name)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get inline policies for role %s" % name)
+ try:
+ role['ManagedPolicies'] = list_iam_attached_role_policies_with_backoff(client, name)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get managed policies for role %s" % name)
+ try:
+ role['InstanceProfiles'] = list_iam_instance_profiles_for_role_with_backoff(client, name)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get instance profiles for role %s" % name)
+ try:
+ role['tags'] = boto3_tag_list_to_ansible_dict(role['Tags'])
+ del role['Tags']
+ except KeyError:
+ role['tags'] = {}
+ return role
+
+
+def describe_iam_roles(module, client):
+ name = module.params['name']
+ path_prefix = module.params['path_prefix']
+ if name:
+ try:
+ roles = [client.get_role(RoleName=name)['Role']]
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchEntity':
+ return []
+ else:
+ module.fail_json_aws(e, msg="Couldn't get IAM role %s" % name)
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Couldn't get IAM role %s" % name)
+ else:
+ params = dict()
+ if path_prefix:
+ if not path_prefix.startswith('/'):
+ path_prefix = '/' + path_prefix
+ if not path_prefix.endswith('/'):
+ path_prefix = path_prefix + '/'
+ params['PathPrefix'] = path_prefix
+ try:
+ roles = list_iam_roles_with_backoff(client, **params)['Roles']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't list IAM roles")
+ return [camel_dict_to_snake_dict(describe_iam_role(module, client, role), ignore_list=['tags']) for role in roles]
+
+
+def main():
+ """
+ Module action handler
+ """
+ argument_spec = dict(
+ name=dict(aliases=['role_name']),
+ path_prefix=dict(),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['name', 'path_prefix']])
+ if module._name == 'iam_role_facts':
+ module.deprecate("The 'iam_role_facts' module has been renamed to 'iam_role_info'", date='2021-12-01', collection_name='community.aws')
+
+ client = module.client('iam')
+
+ module.exit_json(changed=False, iam_roles=describe_iam_roles(module, client))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_saml_federation.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_saml_federation.py
new file mode 100644
index 00000000..214cbe74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_saml_federation.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: iam_saml_federation
+version_added: 1.0.0
+short_description: Maintain IAM SAML federation configuration.
+requirements:
+ - boto3
+description:
+ - Provides a mechanism to manage AWS IAM SAML Identity Federation providers (create/update/delete metadata).
+options:
+ name:
+ description:
+ - The name of the provider to create.
+ required: true
+ type: str
+ saml_metadata_document:
+ description:
+ - The XML document generated by an identity provider (IdP) that supports SAML 2.0.
+ type: str
+ state:
+ description:
+ - Whether to create or delete identity provider. If 'present' is specified it will attempt to update the identity provider matching the name field.
+ default: present
+ choices: [ "present", "absent" ]
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+author:
+ - Tony (@axc450)
+ - Aidan Rowe (@aidan-)
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+# It is assumed that their matching environment variables are set.
+# Creates a new iam saml identity provider if not present
+- name: saml provider
+ community.aws.iam_saml_federation:
+ name: example1
+ # the > below opens an indented block, so no escaping/quoting is needed when in the indentation level under this key
+ saml_metadata_document: >
+ <?xml version="1.0"?>...
+ <md:EntityDescriptor
+# Creates a new iam saml identity provider if not present
+- name: saml provider
+ community.aws.iam_saml_federation:
+ name: example2
+ saml_metadata_document: "{{ item }}"
+ with_file: /path/to/idp/metdata.xml
+# Removes iam saml identity provider
+- name: remove saml provider
+ community.aws.iam_saml_federation:
+ name: example3
+ state: absent
+'''
+
+RETURN = '''
+saml_provider:
+ description: Details of the SAML Identity Provider that was created/modified.
+ type: complex
+ returned: present
+ contains:
+ arn:
+ description: The ARN of the identity provider.
+ type: str
+ returned: present
+ sample: "arn:aws:iam::123456789012:saml-provider/my_saml_provider"
+ metadata_document:
+ description: The XML metadata document that includes information about an identity provider.
+ type: str
+ returned: present
+ create_date:
+ description: The date and time when the SAML provider was created in ISO 8601 date-time format.
+ type: str
+ returned: present
+ sample: "2017-02-08T04:36:28+00:00"
+ expire_date:
+ description: The expiration date and time for the SAML provider in ISO 8601 date-time format.
+ type: str
+ returned: present
+ sample: "2017-02-08T04:36:28+00:00"
+'''
+
+try:
+ import botocore.exceptions
+except ImportError:
+ pass
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+class SAMLProviderManager:
+ """Handles SAML Identity Provider configuration"""
+
+ def __init__(self, module):
+ self.module = module
+
+ try:
+ self.conn = module.client('iam')
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Unknown boto error")
+
+ # use retry decorator for boto3 calls
+ @AWSRetry.backoff(tries=3, delay=5)
+ def _list_saml_providers(self):
+ return self.conn.list_saml_providers()
+
+ @AWSRetry.backoff(tries=3, delay=5)
+ def _get_saml_provider(self, arn):
+ return self.conn.get_saml_provider(SAMLProviderArn=arn)
+
+ @AWSRetry.backoff(tries=3, delay=5)
+ def _update_saml_provider(self, arn, metadata):
+ return self.conn.update_saml_provider(SAMLProviderArn=arn, SAMLMetadataDocument=metadata)
+
+ @AWSRetry.backoff(tries=3, delay=5)
+ def _create_saml_provider(self, metadata, name):
+ return self.conn.create_saml_provider(SAMLMetadataDocument=metadata, Name=name)
+
+ @AWSRetry.backoff(tries=3, delay=5)
+ def _delete_saml_provider(self, arn):
+ return self.conn.delete_saml_provider(SAMLProviderArn=arn)
+
+ def _get_provider_arn(self, name):
+ providers = self._list_saml_providers()
+ for p in providers['SAMLProviderList']:
+ provider_name = p['Arn'].split('/', 1)[1]
+ if name == provider_name:
+ return p['Arn']
+
+ return None
+
+ def create_or_update_saml_provider(self, name, metadata):
+ if not metadata:
+ self.module.fail_json(msg="saml_metadata_document must be defined for present state")
+
+ res = {'changed': False}
+ try:
+ arn = self._get_provider_arn(name)
+ except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Could not get the ARN of the identity provider '{0}'".format(name))
+
+ if arn: # see if metadata needs updating
+ try:
+ resp = self._get_saml_provider(arn)
+ except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Could not retrieve the identity provider '{0}'".format(name))
+
+ if metadata.strip() != resp['SAMLMetadataDocument'].strip():
+ # provider needs updating
+ res['changed'] = True
+ if not self.module.check_mode:
+ try:
+ resp = self._update_saml_provider(arn, metadata)
+ res['saml_provider'] = self._build_res(resp['SAMLProviderArn'])
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Could not update the identity provider '{0}'".format(name))
+
+ else: # create
+ res['changed'] = True
+ if not self.module.check_mode:
+ try:
+ resp = self._create_saml_provider(metadata, name)
+ res['saml_provider'] = self._build_res(resp['SAMLProviderArn'])
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Could not create the identity provider '{0}'".format(name))
+
+ self.module.exit_json(**res)
+
+ def delete_saml_provider(self, name):
+ res = {'changed': False}
+ try:
+ arn = self._get_provider_arn(name)
+ except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Could not get the ARN of the identity provider '{0}'".format(name))
+
+ if arn: # delete
+ res['changed'] = True
+ if not self.module.check_mode:
+ try:
+ self._delete_saml_provider(arn)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Could not delete the identity provider '{0}'".format(name))
+
+ self.module.exit_json(**res)
+
+ def _build_res(self, arn):
+ saml_provider = self._get_saml_provider(arn)
+ return {
+ "arn": arn,
+ "metadata_document": saml_provider["SAMLMetadataDocument"],
+ "create_date": saml_provider["CreateDate"].isoformat(),
+ "expire_date": saml_provider["ValidUntil"].isoformat()
+ }
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ saml_metadata_document=dict(default=None, required=False),
+ state=dict(default='present', required=False, choices=['present', 'absent']),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[('state', 'present', ['saml_metadata_document'])]
+ )
+
+ name = module.params['name']
+ state = module.params.get('state')
+ saml_metadata_document = module.params.get('saml_metadata_document')
+
+ sp_man = SAMLProviderManager(module)
+
+ if state == 'present':
+ sp_man.create_or_update_saml_provider(name, saml_metadata_document)
+ elif state == 'absent':
+ sp_man.delete_saml_provider(name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_server_certificate_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_server_certificate_facts.py
new file mode 100644
index 00000000..6e371856
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_server_certificate_facts.py
@@ -0,0 +1,163 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: iam_server_certificate_info
+version_added: 1.0.0
+short_description: Retrieve the information of a server certificate
+description:
+ - Retrieve the attributes of a server certificate.
+ - This module was called C(iam_server_certificate_facts) before Ansible 2.9. The usage did not change.
+author: "Allen Sanabria (@linuxdynasty)"
+requirements: [boto3, botocore]
+options:
+ name:
+ description:
+ - The name of the server certificate you are retrieving attributes for.
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: Retrieve server certificate
+ community.aws.iam_server_certificate_info:
+ name: production-cert
+ register: server_cert
+
+- name: Fail if the server certificate name was not found
+ community.aws.iam_server_certificate_info:
+ name: production-cert
+ register: server_cert
+ failed_when: "{{ server_cert.results | length == 0 }}"
+'''
+
+RETURN = '''
+server_certificate_id:
+ description: The 21 character certificate id
+ returned: success
+ type: str
+ sample: "ADWAJXWTZAXIPIMQHMJPO"
+certificate_body:
+ description: The asn1der encoded PEM string
+ returned: success
+ type: str
+ sample: "-----BEGIN CERTIFICATE-----\nbunch of random data\n-----END CERTIFICATE-----"
+server_certificate_name:
+ description: The name of the server certificate
+ returned: success
+ type: str
+ sample: "server-cert-name"
+arn:
+ description: The Amazon resource name of the server certificate
+ returned: success
+ type: str
+ sample: "arn:aws:iam::911277865346:server-certificate/server-cert-name"
+path:
+ description: The path of the server certificate
+ returned: success
+ type: str
+ sample: "/"
+expiration:
+ description: The date and time this server certificate will expire, in ISO 8601 format.
+ returned: success
+ type: str
+ sample: "2017-06-15T12:00:00+00:00"
+upload_date:
+ description: The date and time this server certificate was uploaded, in ISO 8601 format.
+ returned: success
+ type: str
+ sample: "2015-04-25T00:36:40+00:00"
+'''
+
+
+try:
+ import boto3
+ import botocore
+ import botocore.exceptions
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+def get_server_certs(iam, name=None):
+ """Retrieve the attributes of a server certificate if it exists or all certs.
+ Args:
+ iam (botocore.client.IAM): The boto3 iam instance.
+
+ Kwargs:
+ name (str): The name of the server certificate.
+
+ Basic Usage:
+ >>> import boto3
+ >>> iam = boto3.client('iam')
+ >>> name = "server-cert-name"
+ >>> results = get_server_certs(iam, name)
+ {
+ "upload_date": "2015-04-25T00:36:40+00:00",
+ "server_certificate_id": "ADWAJXWTZAXIPIMQHMJPO",
+ "certificate_body": "-----BEGIN CERTIFICATE-----\nbunch of random data\n-----END CERTIFICATE-----",
+ "server_certificate_name": "server-cert-name",
+ "expiration": "2017-06-15T12:00:00+00:00",
+ "path": "/",
+ "arn": "arn:aws:iam::911277865346:server-certificate/server-cert-name"
+ }
+ """
+ results = dict()
+ try:
+ if name:
+ server_certs = [iam.get_server_certificate(ServerCertificateName=name)['ServerCertificate']]
+ else:
+ server_certs = iam.list_server_certificates()['ServerCertificateMetadataList']
+
+ for server_cert in server_certs:
+ if not name:
+ server_cert = iam.get_server_certificate(ServerCertificateName=server_cert['ServerCertificateName'])['ServerCertificate']
+ cert_md = server_cert['ServerCertificateMetadata']
+ results[cert_md['ServerCertificateName']] = {
+ 'certificate_body': server_cert['CertificateBody'],
+ 'server_certificate_id': cert_md['ServerCertificateId'],
+ 'server_certificate_name': cert_md['ServerCertificateName'],
+ 'arn': cert_md['Arn'],
+ 'path': cert_md['Path'],
+ 'expiration': cert_md['Expiration'].isoformat(),
+ 'upload_date': cert_md['UploadDate'].isoformat(),
+ }
+
+ except botocore.exceptions.ClientError:
+ pass
+
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,)
+ if module._name == 'iam_server_certificate_facts':
+ module.deprecate("The 'iam_server_certificate_facts' module has been renamed to 'iam_server_certificate_info'",
+ date='2021-12-01', collection_name='community.aws')
+
+ try:
+ iam = module.client('iam')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ cert_name = module.params.get('name')
+ results = get_server_certs(iam, cert_name)
+ module.exit_json(results=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_server_certificate_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_server_certificate_info.py
new file mode 100644
index 00000000..6e371856
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_server_certificate_info.py
@@ -0,0 +1,163 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: iam_server_certificate_info
+version_added: 1.0.0
+short_description: Retrieve the information of a server certificate
+description:
+ - Retrieve the attributes of a server certificate.
+ - This module was called C(iam_server_certificate_facts) before Ansible 2.9. The usage did not change.
+author: "Allen Sanabria (@linuxdynasty)"
+requirements: [boto3, botocore]
+options:
+ name:
+ description:
+ - The name of the server certificate you are retrieving attributes for.
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: Retrieve server certificate
+ community.aws.iam_server_certificate_info:
+ name: production-cert
+ register: server_cert
+
+- name: Fail if the server certificate name was not found
+ community.aws.iam_server_certificate_info:
+ name: production-cert
+ register: server_cert
+ failed_when: "{{ server_cert.results | length == 0 }}"
+'''
+
+RETURN = '''
+server_certificate_id:
+ description: The 21 character certificate id
+ returned: success
+ type: str
+ sample: "ADWAJXWTZAXIPIMQHMJPO"
+certificate_body:
+ description: The asn1der encoded PEM string
+ returned: success
+ type: str
+ sample: "-----BEGIN CERTIFICATE-----\nbunch of random data\n-----END CERTIFICATE-----"
+server_certificate_name:
+ description: The name of the server certificate
+ returned: success
+ type: str
+ sample: "server-cert-name"
+arn:
+ description: The Amazon resource name of the server certificate
+ returned: success
+ type: str
+ sample: "arn:aws:iam::911277865346:server-certificate/server-cert-name"
+path:
+ description: The path of the server certificate
+ returned: success
+ type: str
+ sample: "/"
+expiration:
+ description: The date and time this server certificate will expire, in ISO 8601 format.
+ returned: success
+ type: str
+ sample: "2017-06-15T12:00:00+00:00"
+upload_date:
+ description: The date and time this server certificate was uploaded, in ISO 8601 format.
+ returned: success
+ type: str
+ sample: "2015-04-25T00:36:40+00:00"
+'''
+
+
+try:
+ import boto3
+ import botocore
+ import botocore.exceptions
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+def get_server_certs(iam, name=None):
+ """Retrieve the attributes of a server certificate if it exists or all certs.
+ Args:
+ iam (botocore.client.IAM): The boto3 iam instance.
+
+ Kwargs:
+ name (str): The name of the server certificate.
+
+ Basic Usage:
+ >>> import boto3
+ >>> iam = boto3.client('iam')
+ >>> name = "server-cert-name"
+ >>> results = get_server_certs(iam, name)
+ {
+ "upload_date": "2015-04-25T00:36:40+00:00",
+ "server_certificate_id": "ADWAJXWTZAXIPIMQHMJPO",
+ "certificate_body": "-----BEGIN CERTIFICATE-----\nbunch of random data\n-----END CERTIFICATE-----",
+ "server_certificate_name": "server-cert-name",
+ "expiration": "2017-06-15T12:00:00+00:00",
+ "path": "/",
+ "arn": "arn:aws:iam::911277865346:server-certificate/server-cert-name"
+ }
+ """
+ results = dict()
+ try:
+ if name:
+ server_certs = [iam.get_server_certificate(ServerCertificateName=name)['ServerCertificate']]
+ else:
+ server_certs = iam.list_server_certificates()['ServerCertificateMetadataList']
+
+ for server_cert in server_certs:
+ if not name:
+ server_cert = iam.get_server_certificate(ServerCertificateName=server_cert['ServerCertificateName'])['ServerCertificate']
+ cert_md = server_cert['ServerCertificateMetadata']
+ results[cert_md['ServerCertificateName']] = {
+ 'certificate_body': server_cert['CertificateBody'],
+ 'server_certificate_id': cert_md['ServerCertificateId'],
+ 'server_certificate_name': cert_md['ServerCertificateName'],
+ 'arn': cert_md['Arn'],
+ 'path': cert_md['Path'],
+ 'expiration': cert_md['Expiration'].isoformat(),
+ 'upload_date': cert_md['UploadDate'].isoformat(),
+ }
+
+ except botocore.exceptions.ClientError:
+ pass
+
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,)
+ if module._name == 'iam_server_certificate_facts':
+ module.deprecate("The 'iam_server_certificate_facts' module has been renamed to 'iam_server_certificate_info'",
+ date='2021-12-01', collection_name='community.aws')
+
+ try:
+ iam = module.client('iam')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ cert_name = module.params.get('name')
+ results = get_server_certs(iam, cert_name)
+ module.exit_json(results=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_user.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_user.py
new file mode 100644
index 00000000..6b8efcda
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_user.py
@@ -0,0 +1,369 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: iam_user
+version_added: 1.0.0
+short_description: Manage AWS IAM users
+description:
+ - Manage AWS IAM users.
+author: Josh Souza (@joshsouza)
+options:
+ name:
+ description:
+ - The name of the user to create.
+ required: true
+ type: str
+ managed_policies:
+ description:
+ - A list of managed policy ARNs or friendly names to attach to the user.
+ - To embed an inline policy, use M(community.aws.iam_policy).
+ required: false
+ type: list
+ elements: str
+ aliases: ['managed_policy']
+ state:
+ description:
+ - Create or remove the IAM user.
+ required: true
+ choices: [ 'present', 'absent' ]
+ type: str
+ purge_policies:
+ description:
+ - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched.
+ required: false
+ default: false
+ type: bool
+ aliases: ['purge_policy', 'purge_managed_policies']
+requirements: [ botocore, boto3 ]
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+# Note: This module does not allow management of groups that users belong to.
+# Groups should manage their membership directly using `iam_group`,
+# as users belong to them.
+
+- name: Create a user
+ community.aws.iam_user:
+ name: testuser1
+ state: present
+
+- name: Create a user and attach a managed policy using its ARN
+ community.aws.iam_user:
+ name: testuser1
+ managed_policies:
+ - arn:aws:iam::aws:policy/AmazonSNSFullAccess
+ state: present
+
+- name: Remove all managed policies from an existing user with an empty list
+ community.aws.iam_user:
+ name: testuser1
+ state: present
+ purge_policies: true
+
+- name: Delete the user
+ community.aws.iam_user:
+ name: testuser1
+ state: absent
+
+'''
+RETURN = r'''
+user:
+ description: dictionary containing all the user information
+ returned: success
+ type: complex
+ contains:
+ arn:
+ description: the Amazon Resource Name (ARN) specifying the user
+ type: str
+ sample: "arn:aws:iam::1234567890:user/testuser1"
+ create_date:
+ description: the date and time, in ISO 8601 date-time format, when the user was created
+ type: str
+ sample: "2017-02-08T04:36:28+00:00"
+ user_id:
+ description: the stable and unique string identifying the user
+ type: str
+ sample: AGPAIDBWE12NSFINE55TM
+ user_name:
+ description: the friendly name that identifies the user
+ type: str
+ sample: testuser1
+ path:
+ description: the path to the user
+ type: str
+ sample: /
+'''
+
+from ansible.module_utils._text import to_native
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+import traceback
+
+try:
+ from botocore.exceptions import ClientError, ParamValidationError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def compare_attached_policies(current_attached_policies, new_attached_policies):
+
+ # If new_attached_policies is None it means we want to remove all policies
+ if len(current_attached_policies) > 0 and new_attached_policies is None:
+ return False
+
+ current_attached_policies_arn_list = []
+ for policy in current_attached_policies:
+ current_attached_policies_arn_list.append(policy['PolicyArn'])
+
+ if not set(current_attached_policies_arn_list).symmetric_difference(set(new_attached_policies)):
+ return True
+ else:
+ return False
+
+
+def convert_friendly_names_to_arns(connection, module, policy_names):
+
+ # List comprehension that looks for any policy in the 'policy_names' list
+ # that does not begin with 'arn'. If there aren't any, short circuit.
+ # If there are, translate friendly name to the full arn
+ if not any([not policy.startswith('arn:') for policy in policy_names if policy is not None]):
+ return policy_names
+ allpolicies = {}
+ paginator = connection.get_paginator('list_policies')
+ policies = paginator.paginate().build_full_result()['Policies']
+
+ for policy in policies:
+ allpolicies[policy['PolicyName']] = policy['Arn']
+ allpolicies[policy['Arn']] = policy['Arn']
+ try:
+ return [allpolicies[policy] for policy in policy_names]
+ except KeyError as e:
+ module.fail_json(msg="Couldn't find policy: " + str(e))
+
+
+def create_or_update_user(connection, module):
+
+ params = dict()
+ params['UserName'] = module.params.get('name')
+ managed_policies = module.params.get('managed_policies')
+ purge_policies = module.params.get('purge_policies')
+ changed = False
+ if managed_policies:
+ managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies)
+
+ # Get user
+ user = get_user(connection, module, params['UserName'])
+
+ # If user is None, create it
+ if user is None:
+ # Check mode means we would create the user
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ try:
+ connection.create_user(**params)
+ changed = True
+ except ClientError as e:
+ module.fail_json(msg="Unable to create user: {0}".format(to_native(e)), exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ except ParamValidationError as e:
+ module.fail_json(msg="Unable to create user: {0}".format(to_native(e)), exception=traceback.format_exc())
+
+ # Manage managed policies
+ current_attached_policies = get_attached_policy_list(connection, module, params['UserName'])
+ if not compare_attached_policies(current_attached_policies, managed_policies):
+ current_attached_policies_arn_list = []
+ for policy in current_attached_policies:
+ current_attached_policies_arn_list.append(policy['PolicyArn'])
+
+ # If managed_policies has a single empty element we want to remove all attached policies
+ if purge_policies:
+ # Detach policies not present
+ for policy_arn in list(set(current_attached_policies_arn_list) - set(managed_policies)):
+ changed = True
+ if not module.check_mode:
+ try:
+ connection.detach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn)
+ except ClientError as e:
+ module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format(
+ policy_arn, params['UserName'], to_native(e)),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ except ParamValidationError as e:
+ module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format(
+ policy_arn, params['UserName'], to_native(e)),
+ exception=traceback.format_exc())
+
+ # If there are policies to adjust that aren't in the current list, then things have changed
+ # Otherwise the only changes were in purging above
+ if set(managed_policies).difference(set(current_attached_policies_arn_list)):
+ changed = True
+ # If there are policies in managed_policies attach each policy
+ if managed_policies != [None] and not module.check_mode:
+ for policy_arn in managed_policies:
+ try:
+ connection.attach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn)
+ except ClientError as e:
+ module.fail_json(msg="Unable to attach policy {0} to user {1}: {2}".format(
+ policy_arn, params['UserName'], to_native(e)),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ except ParamValidationError as e:
+ module.fail_json(msg="Unable to attach policy {0} to user {1}: {2}".format(
+ policy_arn, params['UserName'], to_native(e)),
+ exception=traceback.format_exc())
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ # Get the user again
+ user = get_user(connection, module, params['UserName'])
+
+ module.exit_json(changed=changed, iam_user=camel_dict_to_snake_dict(user))
+
+
+def destroy_user(connection, module):
+
+ user_name = module.params.get('name')
+
+ user = get_user(connection, module, user_name)
+ # User is not present
+ if not user:
+ module.exit_json(changed=False)
+
+ # Check mode means we would remove this user
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # Remove any attached policies otherwise deletion fails
+ try:
+ for policy in get_attached_policy_list(connection, module, user_name):
+ connection.detach_user_policy(UserName=user_name, PolicyArn=policy['PolicyArn'])
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to delete user {0}".format(user_name))
+
+ try:
+ # Remove user's access keys
+ access_keys = connection.list_access_keys(UserName=user_name)["AccessKeyMetadata"]
+ for access_key in access_keys:
+ connection.delete_access_key(UserName=user_name, AccessKeyId=access_key["AccessKeyId"])
+
+ # Remove user's login profile (console password)
+ delete_user_login_profile(connection, module, user_name)
+
+ # Remove user's ssh public keys
+ ssh_public_keys = connection.list_ssh_public_keys(UserName=user_name)["SSHPublicKeys"]
+ for ssh_public_key in ssh_public_keys:
+ connection.delete_ssh_public_key(UserName=user_name, SSHPublicKeyId=ssh_public_key["SSHPublicKeyId"])
+
+ # Remove user's service specific credentials
+ service_credentials = connection.list_service_specific_credentials(UserName=user_name)["ServiceSpecificCredentials"]
+ for service_specific_credential in service_credentials:
+ connection.delete_service_specific_credential(
+ UserName=user_name,
+ ServiceSpecificCredentialId=service_specific_credential["ServiceSpecificCredentialId"]
+ )
+
+ # Remove user's signing certificates
+ signing_certificates = connection.list_signing_certificates(UserName=user_name)["Certificates"]
+ for signing_certificate in signing_certificates:
+ connection.delete_signing_certificate(
+ UserName=user_name,
+ CertificateId=signing_certificate["CertificateId"]
+ )
+
+ # Remove user's MFA devices
+ mfa_devices = connection.list_mfa_devices(UserName=user_name)["MFADevices"]
+ for mfa_device in mfa_devices:
+ connection.deactivate_mfa_device(UserName=user_name, SerialNumber=mfa_device["SerialNumber"])
+
+ # Remove user's inline policies
+ inline_policies = connection.list_user_policies(UserName=user_name)["PolicyNames"]
+ for policy_name in inline_policies:
+ connection.delete_user_policy(UserName=user_name, PolicyName=policy_name)
+
+ # Remove user's group membership
+ user_groups = connection.list_groups_for_user(UserName=user_name)["Groups"]
+ for group in user_groups:
+ connection.remove_user_from_group(UserName=user_name, GroupName=group["GroupName"])
+
+ connection.delete_user(UserName=user_name)
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to delete user {0}".format(user_name))
+
+ module.exit_json(changed=True)
+
+
+def get_user(connection, module, name):
+
+ params = dict()
+ params['UserName'] = name
+
+ try:
+ return connection.get_user(**params)
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchEntity':
+ return None
+ else:
+ module.fail_json(msg="Unable to get user {0}: {1}".format(name, to_native(e)),
+ **camel_dict_to_snake_dict(e.response))
+
+
+def get_attached_policy_list(connection, module, name):
+
+ try:
+ return connection.list_attached_user_policies(UserName=name)['AttachedPolicies']
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchEntity':
+ return None
+ else:
+ module.fail_json_aws(e, msg="Unable to get policies for user {0}".format(name))
+
+
+def delete_user_login_profile(connection, module, user_name):
+
+ try:
+ return connection.delete_login_profile(UserName=user_name)
+ except ClientError as e:
+ if e.response["Error"]["Code"] == "NoSuchEntity":
+ return None
+ else:
+ module.fail_json_aws(e, msg="Unable to delete login profile for user {0}".format(user_name))
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(required=True, type='str'),
+ managed_policies=dict(default=[], type='list', aliases=['managed_policy'], elements='str'),
+ state=dict(choices=['present', 'absent'], required=True),
+ purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ connection = module.client('iam')
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ create_or_update_user(connection, module)
+ else:
+ destroy_user(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_user_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_user_info.py
new file mode 100644
index 00000000..8e1856b1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/iam_user_info.py
@@ -0,0 +1,179 @@
+#!/usr/bin/python
+
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: iam_user_info
+version_added: 1.0.0
+short_description: Gather IAM user(s) facts in AWS
+description:
+ - This module can be used to gather IAM user(s) facts in AWS.
+author:
+ - Constantin Bugneac (@Constantin07)
+ - Abhijeet Kasurde (@Akasurde)
+options:
+ name:
+ description:
+ - The name of the IAM user to look for.
+ required: false
+ type: str
+ group:
+ description:
+ - The group name name of the IAM user to look for. Mutually exclusive with C(path).
+ required: false
+ type: str
+ path:
+ description:
+ - The path to the IAM user. Mutually exclusive with C(group).
+ - If specified, then would get all user names whose path starts with user provided value.
+ required: false
+ default: '/'
+ type: str
+requirements:
+ - botocore
+ - boto3
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+# Gather facts about "test" user.
+- name: Get IAM user facts
+ community.aws.iam_user_info:
+ name: "test"
+
+# Gather facts about all users in the "dev" group.
+- name: Get IAM user facts
+ community.aws.iam_user_info:
+ group: "dev"
+
+# Gather facts about all users with "/division_abc/subdivision_xyz/" path.
+- name: Get IAM user facts
+ community.aws.iam_user_info:
+ path: "/division_abc/subdivision_xyz/"
+'''
+
+RETURN = r'''
+iam_users:
+ description: list of maching iam users
+ returned: success
+ type: complex
+ contains:
+ arn:
+ description: the ARN of the user
+ returned: if user exists
+ type: str
+ sample: "arn:aws:iam::156360693172:user/dev/test_user"
+ create_date:
+ description: the datetime user was created
+ returned: if user exists
+ type: str
+ sample: "2016-05-24T12:24:59+00:00"
+ password_last_used:
+ description: the last datetime the password was used by user
+ returned: if password was used at least once
+ type: str
+ sample: "2016-05-25T13:39:11+00:00"
+ path:
+ description: the path to user
+ returned: if user exists
+ type: str
+ sample: "/dev/"
+ user_id:
+ description: the unique user id
+ returned: if user exists
+ type: str
+ sample: "AIDUIOOCQKTUGI6QJLGH2"
+ user_name:
+ description: the user name
+ returned: if user exists
+ type: str
+ sample: "test_user"
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
+
+try:
+ import botocore
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+@AWSRetry.exponential_backoff()
+def list_iam_users_with_backoff(client, operation, **kwargs):
+ paginator = client.get_paginator(operation)
+ return paginator.paginate(**kwargs).build_full_result()
+
+
+def list_iam_users(connection, module):
+
+ name = module.params.get('name')
+ group = module.params.get('group')
+ path = module.params.get('path')
+
+ params = dict()
+ iam_users = []
+
+ if not group and not path:
+ if name:
+ params['UserName'] = name
+ try:
+ iam_users.append(connection.get_user(**params)['User'])
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get IAM user info for user %s" % name)
+
+ if group:
+ params['GroupName'] = group
+ try:
+ iam_users = list_iam_users_with_backoff(connection, 'get_group', **params)['Users']
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get IAM user info for group %s" % group)
+ if name:
+ iam_users = [user for user in iam_users if user['UserName'] == name]
+
+ if path and not group:
+ params['PathPrefix'] = path
+ try:
+ iam_users = list_iam_users_with_backoff(connection, 'list_users', **params)['Users']
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get IAM user info for path %s" % path)
+ if name:
+ iam_users = [user for user in iam_users if user['UserName'] == name]
+
+ module.exit_json(iam_users=[camel_dict_to_snake_dict(user) for user in iam_users])
+
+
+def main():
+ argument_spec = dict(
+ name=dict(),
+ group=dict(),
+ path=dict(default='/')
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['group', 'path']
+ ],
+ supports_check_mode=True
+ )
+
+ connection = module.client('iam')
+
+ list_iam_users(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/kinesis_stream.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/kinesis_stream.py
new file mode 100644
index 00000000..d9b3cc0d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/kinesis_stream.py
@@ -0,0 +1,1408 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: kinesis_stream
+version_added: 1.0.0
+short_description: Manage a Kinesis Stream.
+description:
+ - Create or Delete a Kinesis Stream.
+ - Update the retention period of a Kinesis Stream.
+ - Update Tags on a Kinesis Stream.
+ - Enable/disable server side encryption on a Kinesis Stream.
+requirements: [ boto3 ]
+author: Allen Sanabria (@linuxdynasty)
+options:
+ name:
+ description:
+ - The name of the Kinesis Stream you are managing.
+ required: true
+ type: str
+ shards:
+ description:
+ - The number of shards you want to have with this stream.
+ - This is required when I(state=present)
+ type: int
+ retention_period:
+ description:
+ - The length of time (in hours) data records are accessible after they are added to
+ the stream.
+ - The default retention period is 24 hours and can not be less than 24 hours.
+ - The maximum retention period is 168 hours.
+ - The retention period can be modified during any point in time.
+ type: int
+ state:
+ description:
+ - Create or Delete the Kinesis Stream.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ wait:
+ description:
+ - Wait for operation to complete before returning.
+ default: true
+ type: bool
+ wait_timeout:
+ description:
+ - How many seconds to wait for an operation to complete before timing out.
+ default: 300
+ type: int
+ tags:
+ description:
+ - "A dictionary of resource tags of the form: C({ tag1: value1, tag2: value2 })."
+ aliases: [ "resource_tags" ]
+ type: dict
+ encryption_state:
+ description:
+ - Enable or Disable encryption on the Kinesis Stream.
+ choices: [ 'enabled', 'disabled' ]
+ type: str
+ encryption_type:
+ description:
+ - The type of encryption.
+ - Defaults to C(KMS)
+ choices: ['KMS', 'NONE']
+ type: str
+ key_id:
+ description:
+ - The GUID or alias for the KMS key.
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Basic creation example:
+- name: Set up Kinesis Stream with 10 shards and wait for the stream to become ACTIVE
+ community.aws.kinesis_stream:
+ name: test-stream
+ shards: 10
+ wait: yes
+ wait_timeout: 600
+ register: test_stream
+
+# Basic creation example with tags:
+- name: Set up Kinesis Stream with 10 shards, tag the environment, and wait for the stream to become ACTIVE
+ community.aws.kinesis_stream:
+ name: test-stream
+ shards: 10
+ tags:
+ Env: development
+ wait: yes
+ wait_timeout: 600
+ register: test_stream
+
+# Basic creation example with tags and increase the retention period from the default 24 hours to 48 hours:
+- name: Set up Kinesis Stream with 10 shards, tag the environment, increase the retention period and wait for the stream to become ACTIVE
+ community.aws.kinesis_stream:
+ name: test-stream
+ retention_period: 48
+ shards: 10
+ tags:
+ Env: development
+ wait: yes
+ wait_timeout: 600
+ register: test_stream
+
+# Basic delete example:
+- name: Delete Kinesis Stream test-stream and wait for it to finish deleting.
+ community.aws.kinesis_stream:
+ name: test-stream
+ state: absent
+ wait: yes
+ wait_timeout: 600
+ register: test_stream
+
+# Basic enable encryption example:
+- name: Encrypt Kinesis Stream test-stream.
+ community.aws.kinesis_stream:
+ name: test-stream
+ state: present
+ shards: 1
+ encryption_state: enabled
+ encryption_type: KMS
+ key_id: alias/aws/kinesis
+ wait: yes
+ wait_timeout: 600
+ register: test_stream
+
+# Basic disable encryption example:
+- name: Encrypt Kinesis Stream test-stream.
+ community.aws.kinesis_stream:
+ name: test-stream
+ state: present
+ shards: 1
+ encryption_state: disabled
+ encryption_type: KMS
+ key_id: alias/aws/kinesis
+ wait: yes
+ wait_timeout: 600
+ register: test_stream
+'''
+
+RETURN = '''
+stream_name:
+ description: The name of the Kinesis Stream.
+ returned: when state == present.
+ type: str
+ sample: "test-stream"
+stream_arn:
+ description: The amazon resource identifier
+ returned: when state == present.
+ type: str
+ sample: "arn:aws:kinesis:east-side:123456789:stream/test-stream"
+stream_status:
+ description: The current state of the Kinesis Stream.
+ returned: when state == present.
+ type: str
+ sample: "ACTIVE"
+retention_period_hours:
+ description: Number of hours messages will be kept for a Kinesis Stream.
+ returned: when state == present.
+ type: int
+ sample: 24
+tags:
+ description: Dictionary containing all the tags associated with the Kinesis stream.
+ returned: when state == present.
+ type: dict
+ sample: {
+ "Name": "Splunk",
+ "Env": "development"
+ }
+'''
+
+import re
+import datetime
+import time
+from functools import reduce
+
+try:
+ import botocore.exceptions
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+def convert_to_lower(data):
+ """Convert all uppercase keys in dict with lowercase_
+ Args:
+ data (dict): Dictionary with keys that have upper cases in them
+ Example.. FooBar == foo_bar
+ if a val is of type datetime.datetime, it will be converted to
+ the ISO 8601
+
+ Basic Usage:
+ >>> test = {'FooBar': []}
+ >>> test = convert_to_lower(test)
+ {
+ 'foo_bar': []
+ }
+
+ Returns:
+ Dictionary
+ """
+ results = dict()
+ if isinstance(data, dict):
+ for key, val in data.items():
+ key = re.sub(r'(([A-Z]{1,3}){1})', r'_\1', key).lower()
+ if key[0] == '_':
+ key = key[1:]
+ if isinstance(val, datetime.datetime):
+ results[key] = val.isoformat()
+ elif isinstance(val, dict):
+ results[key] = convert_to_lower(val)
+ elif isinstance(val, list):
+ converted = list()
+ for item in val:
+ converted.append(convert_to_lower(item))
+ results[key] = converted
+ else:
+ results[key] = val
+ return results
+
+
+def make_tags_in_proper_format(tags):
+ """Take a dictionary of tags and convert them into the AWS Tags format.
+ Args:
+ tags (list): The tags you want applied.
+
+ Basic Usage:
+ >>> tags = [{'Key': 'env', 'Value': 'development'}]
+ >>> make_tags_in_proper_format(tags)
+ {
+ "env": "development",
+ }
+
+ Returns:
+ Dict
+ """
+ formatted_tags = dict()
+ for tag in tags:
+ formatted_tags[tag.get('Key')] = tag.get('Value')
+
+ return formatted_tags
+
+
+def make_tags_in_aws_format(tags):
+ """Take a dictionary of tags and convert them into the AWS Tags format.
+ Args:
+ tags (dict): The tags you want applied.
+
+ Basic Usage:
+ >>> tags = {'env': 'development', 'service': 'web'}
+ >>> make_tags_in_proper_format(tags)
+ [
+ {
+ "Value": "web",
+ "Key": "service"
+ },
+ {
+ "Value": "development",
+ "key": "env"
+ }
+ ]
+
+ Returns:
+ List
+ """
+ formatted_tags = list()
+ for key, val in tags.items():
+ formatted_tags.append({
+ 'Key': key,
+ 'Value': val
+ })
+
+ return formatted_tags
+
+
+def get_tags(client, stream_name, check_mode=False):
+ """Retrieve the tags for a Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): Name of the Kinesis stream.
+
+ Kwargs:
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >> get_tags(client, stream_name)
+
+ Returns:
+ Tuple (bool, str, dict)
+ """
+ err_msg = ''
+ success = False
+ params = {
+ 'StreamName': stream_name,
+ }
+ results = dict()
+ try:
+ if not check_mode:
+ results = (
+ client.list_tags_for_stream(**params)['Tags']
+ )
+ else:
+ results = [
+ {
+ 'Key': 'DryRunMode',
+ 'Value': 'true'
+ },
+ ]
+ success = True
+ except botocore.exceptions.ClientError as e:
+ err_msg = to_native(e)
+
+ return success, err_msg, results
+
+
+def find_stream(client, stream_name, check_mode=False):
+ """Retrieve a Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): Name of the Kinesis stream.
+
+ Kwargs:
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+
+ Returns:
+ Tuple (bool, str, dict)
+ """
+ err_msg = ''
+ success = False
+ params = {
+ 'StreamName': stream_name,
+ }
+ results = dict()
+ has_more_shards = True
+ shards = list()
+ try:
+ if not check_mode:
+ while has_more_shards:
+ results = (
+ client.describe_stream(**params)['StreamDescription']
+ )
+ shards.extend(results.pop('Shards'))
+ has_more_shards = results['HasMoreShards']
+ if has_more_shards:
+ params['ExclusiveStartShardId'] = shards[-1]['ShardId']
+ results['Shards'] = shards
+ num_closed_shards = len([s for s in shards if 'EndingSequenceNumber' in s['SequenceNumberRange']])
+ results['OpenShardsCount'] = len(shards) - num_closed_shards
+ results['ClosedShardsCount'] = num_closed_shards
+ results['ShardsCount'] = len(shards)
+ else:
+ results = {
+ 'OpenShardsCount': 5,
+ 'ClosedShardsCount': 0,
+ 'ShardsCount': 5,
+ 'HasMoreShards': True,
+ 'RetentionPeriodHours': 24,
+ 'StreamName': stream_name,
+ 'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/{0}'.format(stream_name),
+ 'StreamStatus': 'ACTIVE',
+ 'EncryptionType': 'NONE'
+ }
+ success = True
+ except botocore.exceptions.ClientError as e:
+ err_msg = to_native(e)
+
+ return success, err_msg, results
+
+
+def wait_for_status(client, stream_name, status, wait_timeout=300,
+ check_mode=False):
+ """Wait for the status to change for a Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ stream_name (str): The name of the kinesis stream.
+ status (str): The status to wait for.
+ examples. status=available, status=deleted
+
+ Kwargs:
+ wait_timeout (int): Number of seconds to wait, until this timeout is reached.
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> wait_for_status(client, stream_name, 'ACTIVE', 300)
+
+ Returns:
+ Tuple (bool, str, dict)
+ """
+ polling_increment_secs = 5
+ wait_timeout = time.time() + wait_timeout
+ status_achieved = False
+ stream = dict()
+ err_msg = ""
+
+ while wait_timeout > time.time():
+ try:
+ find_success, find_msg, stream = (
+ find_stream(client, stream_name, check_mode=check_mode)
+ )
+ if check_mode:
+ status_achieved = True
+ break
+
+ elif status != 'DELETING':
+ if find_success and stream:
+ if stream.get('StreamStatus') == status:
+ status_achieved = True
+ break
+
+ else:
+ if not find_success:
+ status_achieved = True
+ break
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = to_native(e)
+
+ time.sleep(polling_increment_secs)
+
+ if not status_achieved:
+ err_msg = "Wait time out reached, while waiting for results"
+ else:
+ err_msg = "Status {0} achieved successfully".format(status)
+
+ return status_achieved, err_msg, stream
+
+
+def tags_action(client, stream_name, tags, action='create', check_mode=False):
+ """Create or delete multiple tags from a Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ resource_id (str): The Amazon resource id.
+ tags (list): List of dictionaries.
+ examples.. [{Name: "", Values: [""]}]
+
+ Kwargs:
+ action (str): The action to perform.
+ valid actions == create and delete
+ default=create
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> resource_id = 'pcx-123345678'
+ >>> tags = {'env': 'development'}
+ >>> update_tags(client, resource_id, tags)
+ [True, '']
+
+ Returns:
+ List (bool, str)
+ """
+ success = False
+ err_msg = ""
+ params = {'StreamName': stream_name}
+ try:
+ if not check_mode:
+ if action == 'create':
+ params['Tags'] = tags
+ client.add_tags_to_stream(**params)
+ success = True
+ elif action == 'delete':
+ params['TagKeys'] = list(tags)
+ client.remove_tags_from_stream(**params)
+ success = True
+ else:
+ err_msg = 'Invalid action {0}'.format(action)
+ else:
+ if action == 'create':
+ success = True
+ elif action == 'delete':
+ success = True
+ else:
+ err_msg = 'Invalid action {0}'.format(action)
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = to_native(e)
+
+ return success, err_msg
+
+
+def recreate_tags_from_list(list_of_tags):
+ """Recreate tags from a list of tuples into the Amazon Tag format.
+ Args:
+ list_of_tags (list): List of tuples.
+
+ Basic Usage:
+ >>> list_of_tags = [('Env', 'Development')]
+ >>> recreate_tags_from_list(list_of_tags)
+ [
+ {
+ "Value": "Development",
+ "Key": "Env"
+ }
+ ]
+
+ Returns:
+ List
+ """
+ tags = list()
+ i = 0
+ for i in range(len(list_of_tags)):
+ key_name = list_of_tags[i][0]
+ key_val = list_of_tags[i][1]
+ tags.append(
+ {
+ 'Key': key_name,
+ 'Value': key_val
+ }
+ )
+ return tags
+
+
+def update_tags(client, stream_name, tags, check_mode=False):
+ """Update tags for an amazon resource.
+ Args:
+ resource_id (str): The Amazon resource id.
+ tags (dict): Dictionary of tags you want applied to the Kinesis stream.
+
+ Kwargs:
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> stream_name = 'test-stream'
+ >>> tags = {'env': 'development'}
+ >>> update_tags(client, stream_name, tags)
+ [True, '']
+
+ Return:
+ Tuple (bool, str)
+ """
+ success = False
+ changed = False
+ err_msg = ''
+ tag_success, tag_msg, current_tags = (
+ get_tags(client, stream_name, check_mode=check_mode)
+ )
+ if current_tags:
+ tags = make_tags_in_aws_format(tags)
+ current_tags_set = (
+ set(
+ reduce(
+ lambda x, y: x + y,
+ [make_tags_in_proper_format(current_tags).items()]
+ )
+ )
+ )
+
+ new_tags_set = (
+ set(
+ reduce(
+ lambda x, y: x + y,
+ [make_tags_in_proper_format(tags).items()]
+ )
+ )
+ )
+ tags_to_delete = list(current_tags_set.difference(new_tags_set))
+ tags_to_update = list(new_tags_set.difference(current_tags_set))
+ if tags_to_delete:
+ tags_to_delete = make_tags_in_proper_format(
+ recreate_tags_from_list(tags_to_delete)
+ )
+ delete_success, delete_msg = (
+ tags_action(
+ client, stream_name, tags_to_delete, action='delete',
+ check_mode=check_mode
+ )
+ )
+ if not delete_success:
+ return delete_success, changed, delete_msg
+ if tags_to_update:
+ tags = make_tags_in_proper_format(
+ recreate_tags_from_list(tags_to_update)
+ )
+ else:
+ return True, changed, 'Tags do not need to be updated'
+
+ if tags:
+ create_success, create_msg = (
+ tags_action(
+ client, stream_name, tags, action='create',
+ check_mode=check_mode
+ )
+ )
+ if create_success:
+ changed = True
+ return create_success, changed, create_msg
+
+ return success, changed, err_msg
+
+
+def stream_action(client, stream_name, shard_count=1, action='create',
+ timeout=300, check_mode=False):
+ """Create or Delete an Amazon Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ shard_count (int): Number of shards this stream will use.
+ action (str): The action to perform.
+ valid actions == create and delete
+ default=create
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> shard_count = 20
+ >>> stream_action(client, stream_name, shard_count, action='create')
+
+ Returns:
+ List (bool, str)
+ """
+ success = False
+ err_msg = ''
+ params = {
+ 'StreamName': stream_name
+ }
+ try:
+ if not check_mode:
+ if action == 'create':
+ params['ShardCount'] = shard_count
+ client.create_stream(**params)
+ success = True
+ elif action == 'delete':
+ client.delete_stream(**params)
+ success = True
+ else:
+ err_msg = 'Invalid action {0}'.format(action)
+ else:
+ if action == 'create':
+ success = True
+ elif action == 'delete':
+ success = True
+ else:
+ err_msg = 'Invalid action {0}'.format(action)
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = to_native(e)
+
+ return success, err_msg
+
+
+def stream_encryption_action(client, stream_name, action='start_encryption', encryption_type='', key_id='',
+ timeout=300, check_mode=False):
+ """Create, Encrypt or Delete an Amazon Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ shard_count (int): Number of shards this stream will use.
+ action (str): The action to perform.
+ valid actions == create and delete
+ default=create
+ encryption_type (str): NONE or KMS
+ key_id (str): The GUID or alias for the KMS key
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> shard_count = 20
+ >>> stream_action(client, stream_name, shard_count, action='create', encryption_type='KMS',key_id='alias/aws')
+
+ Returns:
+ List (bool, str)
+ """
+ success = False
+ err_msg = ''
+ params = {
+ 'StreamName': stream_name
+ }
+ try:
+ if not check_mode:
+ if action == 'start_encryption':
+ params['EncryptionType'] = encryption_type
+ params['KeyId'] = key_id
+ client.start_stream_encryption(**params)
+ success = True
+ elif action == 'stop_encryption':
+ params['EncryptionType'] = encryption_type
+ params['KeyId'] = key_id
+ client.stop_stream_encryption(**params)
+ success = True
+ else:
+ err_msg = 'Invalid encryption action {0}'.format(action)
+ else:
+ if action == 'start_encryption':
+ success = True
+ elif action == 'stop_encryption':
+ success = True
+ else:
+ err_msg = 'Invalid encryption action {0}'.format(action)
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = to_native(e)
+
+ return success, err_msg
+
+
+def retention_action(client, stream_name, retention_period=24,
+ action='increase', check_mode=False):
+ """Increase or Decrease the retention of messages in the Kinesis stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ retention_period (int): This is how long messages will be kept before
+ they are discarded. This can not be less than 24 hours.
+ action (str): The action to perform.
+ valid actions == create and delete
+ default=create
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> retention_period = 48
+ >>> retention_action(client, stream_name, retention_period, action='increase')
+
+ Returns:
+ Tuple (bool, str)
+ """
+ success = False
+ err_msg = ''
+ params = {
+ 'StreamName': stream_name
+ }
+ try:
+ if not check_mode:
+ if action == 'increase':
+ params['RetentionPeriodHours'] = retention_period
+ client.increase_stream_retention_period(**params)
+ success = True
+ err_msg = (
+ 'Retention Period increased successfully to {0}'.format(retention_period)
+ )
+ elif action == 'decrease':
+ params['RetentionPeriodHours'] = retention_period
+ client.decrease_stream_retention_period(**params)
+ success = True
+ err_msg = (
+ 'Retention Period decreased successfully to {0}'.format(retention_period)
+ )
+ else:
+ err_msg = 'Invalid action {0}'.format(action)
+ else:
+ if action == 'increase':
+ success = True
+ elif action == 'decrease':
+ success = True
+ else:
+ err_msg = 'Invalid action {0}'.format(action)
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = to_native(e)
+
+ return success, err_msg
+
+
+def update_shard_count(client, stream_name, number_of_shards=1, check_mode=False):
+ """Increase or Decrease the number of shards in the Kinesis stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ number_of_shards (int): Number of shards this stream will use.
+ default=1
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> number_of_shards = 3
+ >>> update_shard_count(client, stream_name, number_of_shards)
+
+ Returns:
+ Tuple (bool, str)
+ """
+ success = True
+ err_msg = ''
+ params = {
+ 'StreamName': stream_name,
+ 'ScalingType': 'UNIFORM_SCALING'
+ }
+ if not check_mode:
+ params['TargetShardCount'] = number_of_shards
+ try:
+ client.update_shard_count(**params)
+ except botocore.exceptions.ClientError as e:
+ return False, str(e)
+
+ return success, err_msg
+
+
+def update(client, current_stream, stream_name, number_of_shards=1, retention_period=None,
+ tags=None, wait=False, wait_timeout=300, check_mode=False):
+ """Update an Amazon Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ number_of_shards (int): Number of shards this stream will use.
+ default=1
+ retention_period (int): This is how long messages will be kept before
+ they are discarded. This can not be less than 24 hours.
+ tags (dict): The tags you want applied.
+ wait (bool): Wait until Stream is ACTIVE.
+ default=False
+ wait_timeout (int): How long to wait until this operation is considered failed.
+ default=300
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> current_stream = {
+ 'ShardCount': 3,
+ 'HasMoreShards': True,
+ 'RetentionPeriodHours': 24,
+ 'StreamName': 'test-stream',
+ 'StreamARN': 'arn:aws:kinesis:us-west-2:123456789:stream/test-stream',
+ 'StreamStatus': "ACTIVE'
+ }
+ >>> stream_name = 'test-stream'
+ >>> retention_period = 48
+ >>> number_of_shards = 10
+ >>> update(client, current_stream, stream_name,
+ number_of_shards, retention_period )
+
+ Returns:
+ Tuple (bool, bool, str)
+ """
+ success = True
+ changed = False
+ err_msg = ''
+ if retention_period:
+ if wait:
+ wait_success, wait_msg, current_stream = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ if not wait_success:
+ return wait_success, False, wait_msg
+
+ if current_stream.get('StreamStatus') == 'ACTIVE':
+ retention_changed = False
+ if retention_period > current_stream['RetentionPeriodHours']:
+ retention_changed, retention_msg = (
+ retention_action(
+ client, stream_name, retention_period, action='increase',
+ check_mode=check_mode
+ )
+ )
+
+ elif retention_period < current_stream['RetentionPeriodHours']:
+ retention_changed, retention_msg = (
+ retention_action(
+ client, stream_name, retention_period, action='decrease',
+ check_mode=check_mode
+ )
+ )
+
+ elif retention_period == current_stream['RetentionPeriodHours']:
+ retention_msg = (
+ 'Retention {0} is the same as {1}'
+ .format(
+ retention_period,
+ current_stream['RetentionPeriodHours']
+ )
+ )
+ success = True
+
+ if retention_changed:
+ success = True
+ changed = True
+
+ err_msg = retention_msg
+ if changed and wait:
+ wait_success, wait_msg, current_stream = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ if not wait_success:
+ return wait_success, False, wait_msg
+ elif changed and not wait:
+ stream_found, stream_msg, current_stream = (
+ find_stream(client, stream_name, check_mode=check_mode)
+ )
+ if stream_found:
+ if current_stream['StreamStatus'] != 'ACTIVE':
+ err_msg = (
+ 'Retention Period for {0} is in the process of updating'
+ .format(stream_name)
+ )
+ return success, changed, err_msg
+ else:
+ err_msg = (
+ 'StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}'
+ .format(current_stream.get('StreamStatus', 'UNKNOWN'))
+ )
+ return success, changed, err_msg
+
+ if current_stream['OpenShardsCount'] != number_of_shards:
+ success, err_msg = (
+ update_shard_count(client, stream_name, number_of_shards, check_mode=check_mode)
+ )
+
+ if not success:
+ return success, changed, err_msg
+
+ changed = True
+
+ if wait:
+ wait_success, wait_msg, current_stream = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ if not wait_success:
+ return wait_success, changed, wait_msg
+ else:
+ stream_found, stream_msg, current_stream = (
+ find_stream(client, stream_name, check_mode=check_mode)
+ )
+ if stream_found and current_stream['StreamStatus'] != 'ACTIVE':
+ err_msg = (
+ 'Number of shards for {0} is in the process of updating'
+ .format(stream_name)
+ )
+ return success, changed, err_msg
+
+ if tags:
+ tag_success, tag_changed, err_msg = (
+ update_tags(client, stream_name, tags, check_mode=check_mode)
+ )
+ if wait:
+ success, err_msg, status_stream = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ if success and changed:
+ err_msg = 'Kinesis Stream {0} updated successfully.'.format(stream_name)
+ elif success and not changed:
+ err_msg = 'Kinesis Stream {0} did not change.'.format(stream_name)
+
+ return success, changed, err_msg
+
+
+def create_stream(client, stream_name, number_of_shards=1, retention_period=None,
+ tags=None, wait=False, wait_timeout=300, check_mode=False):
+ """Create an Amazon Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ number_of_shards (int): Number of shards this stream will use.
+ default=1
+ retention_period (int): Can not be less than 24 hours
+ default=None
+ tags (dict): The tags you want applied.
+ default=None
+ wait (bool): Wait until Stream is ACTIVE.
+ default=False
+ wait_timeout (int): How long to wait until this operation is considered failed.
+ default=300
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> number_of_shards = 10
+ >>> tags = {'env': 'test'}
+ >>> create_stream(client, stream_name, number_of_shards, tags=tags)
+
+ Returns:
+ Tuple (bool, bool, str, dict)
+ """
+ success = False
+ changed = False
+ err_msg = ''
+ results = dict()
+
+ stream_found, stream_msg, current_stream = (
+ find_stream(client, stream_name, check_mode=check_mode)
+ )
+
+ if stream_found and current_stream.get('StreamStatus') == 'DELETING' and wait:
+ wait_success, wait_msg, current_stream = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+
+ if stream_found and current_stream.get('StreamStatus') != 'DELETING':
+ success, changed, err_msg = update(
+ client, current_stream, stream_name, number_of_shards,
+ retention_period, tags, wait, wait_timeout, check_mode=check_mode
+ )
+ else:
+ create_success, create_msg = (
+ stream_action(
+ client, stream_name, number_of_shards, action='create',
+ check_mode=check_mode
+ )
+ )
+ if not create_success:
+ changed = True
+ err_msg = 'Failed to create Kinesis stream: {0}'.format(create_msg)
+ return False, True, err_msg, {}
+ else:
+ changed = True
+ if wait:
+ wait_success, wait_msg, results = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ err_msg = (
+ 'Kinesis Stream {0} is in the process of being created'
+ .format(stream_name)
+ )
+ if not wait_success:
+ return wait_success, True, wait_msg, results
+ else:
+ err_msg = (
+ 'Kinesis Stream {0} created successfully'
+ .format(stream_name)
+ )
+
+ if tags:
+ changed, err_msg = (
+ tags_action(
+ client, stream_name, tags, action='create',
+ check_mode=check_mode
+ )
+ )
+ if changed:
+ success = True
+ if not success:
+ return success, changed, err_msg, results
+
+ stream_found, stream_msg, current_stream = (
+ find_stream(client, stream_name, check_mode=check_mode)
+ )
+ if retention_period and current_stream.get('StreamStatus') == 'ACTIVE':
+ changed, err_msg = (
+ retention_action(
+ client, stream_name, retention_period, action='increase',
+ check_mode=check_mode
+ )
+ )
+ if changed:
+ success = True
+ if not success:
+ return success, changed, err_msg, results
+ else:
+ err_msg = (
+ 'StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}'
+ .format(current_stream.get('StreamStatus', 'UNKNOWN'))
+ )
+ success = create_success
+ changed = True
+
+ if success:
+ stream_found, stream_msg, results = (
+ find_stream(client, stream_name, check_mode=check_mode)
+ )
+ tag_success, tag_msg, current_tags = (
+ get_tags(client, stream_name, check_mode=check_mode)
+ )
+ if current_tags and not check_mode:
+ current_tags = make_tags_in_proper_format(current_tags)
+ results['Tags'] = current_tags
+ elif check_mode and tags:
+ results['Tags'] = tags
+ else:
+ results['Tags'] = dict()
+ results = convert_to_lower(results)
+
+ return success, changed, err_msg, results
+
+
+def delete_stream(client, stream_name, wait=False, wait_timeout=300,
+ check_mode=False):
+ """Delete an Amazon Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ wait (bool): Wait until Stream is ACTIVE.
+ default=False
+ wait_timeout (int): How long to wait until this operation is considered failed.
+ default=300
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> delete_stream(client, stream_name)
+
+ Returns:
+ Tuple (bool, bool, str, dict)
+ """
+ success = False
+ changed = False
+ err_msg = ''
+ results = dict()
+ stream_found, stream_msg, current_stream = (
+ find_stream(client, stream_name, check_mode=check_mode)
+ )
+ if stream_found:
+ success, err_msg = (
+ stream_action(
+ client, stream_name, action='delete', check_mode=check_mode
+ )
+ )
+ if success:
+ changed = True
+ if wait:
+ success, err_msg, results = (
+ wait_for_status(
+ client, stream_name, 'DELETING', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ err_msg = 'Stream {0} deleted successfully'.format(stream_name)
+ if not success:
+ return success, True, err_msg, results
+ else:
+ err_msg = (
+ 'Stream {0} is in the process of being deleted'
+ .format(stream_name)
+ )
+ else:
+ success = True
+ changed = False
+ err_msg = 'Stream {0} does not exist'.format(stream_name)
+
+ return success, changed, err_msg, results
+
+
+def start_stream_encryption(client, stream_name, encryption_type='', key_id='',
+ wait=False, wait_timeout=300, check_mode=False):
+ """Start encryption on an Amazon Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ encryption_type (str): KMS or NONE
+ key_id (str): KMS key GUID or alias
+ wait (bool): Wait until Stream is ACTIVE.
+ default=False
+ wait_timeout (int): How long to wait until this operation is considered failed.
+ default=300
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> key_id = 'alias/aws'
+ >>> encryption_type = 'KMS'
+ >>> start_stream_encryption(client, stream_name,encryption_type,key_id)
+
+ Returns:
+ Tuple (bool, bool, str, dict)
+ """
+ success = False
+ changed = False
+ err_msg = ''
+ params = {
+ 'StreamName': stream_name
+ }
+
+ results = dict()
+ stream_found, stream_msg, current_stream = (
+ find_stream(client, stream_name, check_mode=check_mode)
+ )
+ if stream_found:
+ success, err_msg = (
+ stream_encryption_action(
+ client, stream_name, action='start_encryption', encryption_type=encryption_type, key_id=key_id, check_mode=check_mode
+ )
+ )
+ if success:
+ changed = True
+ if wait:
+ success, err_msg, results = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ err_msg = 'Kinesis Stream {0} encryption started successfully.'.format(stream_name)
+ if not success:
+ return success, True, err_msg, results
+ else:
+ err_msg = (
+ 'Kinesis Stream {0} is in the process of starting encryption.'.format(stream_name)
+ )
+ else:
+ success = True
+ changed = False
+ err_msg = 'Kinesis Stream {0} does not exist'.format(stream_name)
+
+ return success, changed, err_msg, results
+
+
+def stop_stream_encryption(client, stream_name, encryption_type='', key_id='',
+ wait=True, wait_timeout=300, check_mode=False):
+ """Stop encryption on an Amazon Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ encryption_type (str): KMS or NONE
+ key_id (str): KMS key GUID or alias
+ wait (bool): Wait until Stream is ACTIVE.
+ default=False
+ wait_timeout (int): How long to wait until this operation is considered failed.
+ default=300
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> start_stream_encryption(client, stream_name,encryption_type, key_id)
+
+ Returns:
+ Tuple (bool, bool, str, dict)
+ """
+ success = False
+ changed = False
+ err_msg = ''
+ params = {
+ 'StreamName': stream_name
+ }
+
+ results = dict()
+ stream_found, stream_msg, current_stream = (
+ find_stream(client, stream_name, check_mode=check_mode)
+ )
+ if stream_found:
+ if current_stream.get('EncryptionType') == 'KMS':
+ success, err_msg = (
+ stream_encryption_action(
+ client, stream_name, action='stop_encryption', key_id=key_id, encryption_type=encryption_type, check_mode=check_mode
+ )
+ )
+ elif current_stream.get('EncryptionType') == 'NONE':
+ success = True
+
+ if success:
+ changed = True
+ if wait:
+ success, err_msg, results = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ err_msg = 'Kinesis Stream {0} encryption stopped successfully.'.format(stream_name)
+ if not success:
+ return success, True, err_msg, results
+ else:
+ err_msg = (
+ 'Stream {0} is in the process of stopping encryption.'.format(stream_name)
+ )
+ else:
+ success = True
+ changed = False
+ err_msg = 'Stream {0} does not exist.'.format(stream_name)
+
+ return success, changed, err_msg, results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ shards=dict(default=None, required=False, type='int'),
+ retention_period=dict(default=None, required=False, type='int'),
+ tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']),
+ wait=dict(default=True, required=False, type='bool'),
+ wait_timeout=dict(default=300, required=False, type='int'),
+ state=dict(default='present', choices=['present', 'absent']),
+ encryption_type=dict(required=False, choices=['NONE', 'KMS']),
+ key_id=dict(required=False, type='str'),
+ encryption_state=dict(required=False, choices=['enabled', 'disabled']),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ retention_period = module.params.get('retention_period')
+ stream_name = module.params.get('name')
+ shards = module.params.get('shards')
+ state = module.params.get('state')
+ tags = module.params.get('tags')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ encryption_type = module.params.get('encryption_type')
+ key_id = module.params.get('key_id')
+ encryption_state = module.params.get('encryption_state')
+
+ if state == 'present' and not shards:
+ module.fail_json(msg='Shards is required when state == present.')
+
+ if retention_period:
+ if retention_period < 24:
+ module.fail_json(msg='Retention period can not be less than 24 hours.')
+
+ check_mode = module.check_mode
+ try:
+ client = module.client('kinesis')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ if state == 'present':
+ success, changed, err_msg, results = (
+ create_stream(
+ client, stream_name, shards, retention_period, tags,
+ wait, wait_timeout, check_mode
+ )
+ )
+ if encryption_state == 'enabled':
+ success, changed, err_msg, results = (
+ start_stream_encryption(
+ client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode
+ )
+ )
+ elif encryption_state == 'disabled':
+ success, changed, err_msg, results = (
+ stop_stream_encryption(
+ client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode
+ )
+ )
+ elif state == 'absent':
+ success, changed, err_msg, results = (
+ delete_stream(client, stream_name, wait, wait_timeout, check_mode)
+ )
+
+ if success:
+ module.exit_json(
+ success=success, changed=changed, msg=err_msg, **results
+ )
+ else:
+ module.fail_json(
+ success=success, changed=changed, msg=err_msg, result=results
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/lambda.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/lambda.py
new file mode 100644
index 00000000..9cb2e028
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/lambda.py
@@ -0,0 +1,604 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: lambda
+version_added: 1.0.0
+short_description: Manage AWS Lambda functions
+description:
+ - Allows for the management of Lambda functions.
+requirements: [ boto3 ]
+options:
+ name:
+ description:
+ - The name you want to assign to the function you are uploading. Cannot be changed.
+ required: true
+ type: str
+ state:
+ description:
+ - Create or delete Lambda function.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ runtime:
+ description:
+ - The runtime environment for the Lambda function you are uploading.
+ - Required when creating a function. Uses parameters as described in boto3 docs.
+ - Required when I(state=present).
+ - For supported list of runtimes, see U(https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html).
+ type: str
+ role:
+ description:
+ - The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS)
+ resources. You may use the bare ARN if the role belongs to the same AWS account.
+ - Required when I(state=present).
+ type: str
+ handler:
+ description:
+ - The function within your code that Lambda calls to begin execution.
+ type: str
+ zip_file:
+ description:
+ - A .zip file containing your deployment package
+ - If I(state=present) then either I(zip_file) or I(s3_bucket) must be present.
+ aliases: [ 'src' ]
+ type: str
+ s3_bucket:
+ description:
+ - Amazon S3 bucket name where the .zip file containing your deployment package is stored.
+ - If I(state=present) then either I(zip_file) or I(s3_bucket) must be present.
+ - I(s3_bucket) and I(s3_key) are required together.
+ type: str
+ s3_key:
+ description:
+ - The Amazon S3 object (the deployment package) key name you want to upload.
+ - I(s3_bucket) and I(s3_key) are required together.
+ type: str
+ s3_object_version:
+ description:
+ - The Amazon S3 object (the deployment package) version you want to upload.
+ type: str
+ description:
+ description:
+ - A short, user-defined function description. Lambda does not use this value. Assign a meaningful description as you see fit.
+ type: str
+ timeout:
+ description:
+ - The function maximum execution time in seconds after which Lambda should terminate the function.
+ default: 3
+ type: int
+ memory_size:
+ description:
+ - The amount of memory, in MB, your Lambda function is given.
+ default: 128
+ type: int
+ vpc_subnet_ids:
+ description:
+ - List of subnet IDs to run Lambda function in.
+ - Use this option if you need to access resources in your VPC. Leave empty if you don't want to run the function in a VPC.
+ - If set, I(vpc_security_group_ids) must also be set.
+ type: list
+ elements: str
+ vpc_security_group_ids:
+ description:
+ - List of VPC security group IDs to associate with the Lambda function.
+ - Required when I(vpc_subnet_ids) is used.
+ type: list
+ elements: str
+ environment_variables:
+ description:
+ - A dictionary of environment variables the Lambda function is given.
+ type: dict
+ dead_letter_arn:
+ description:
+ - The parent object that contains the target Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.
+ type: str
+ tracing_mode:
+ description:
+ - Set mode to 'Active' to sample and trace incoming requests with AWS X-Ray. Turned off (set to 'PassThrough') by default.
+ choices: ['Active', 'PassThrough']
+ type: str
+ tags:
+ description:
+ - tag dict to apply to the function (requires botocore 1.5.40 or above).
+ type: dict
+author:
+ - 'Steyn Huizinga (@steynovich)'
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Create Lambda functions
+- name: looped creation
+ community.aws.lambda:
+ name: '{{ item.name }}'
+ state: present
+ zip_file: '{{ item.zip_file }}'
+ runtime: 'python2.7'
+ role: 'arn:aws:iam::987654321012:role/lambda_basic_execution'
+ handler: 'hello_python.my_handler'
+ vpc_subnet_ids:
+ - subnet-123abcde
+ - subnet-edcba321
+ vpc_security_group_ids:
+ - sg-123abcde
+ - sg-edcba321
+ environment_variables: '{{ item.env_vars }}'
+ tags:
+ key1: 'value1'
+ loop:
+ - name: HelloWorld
+ zip_file: hello-code.zip
+ env_vars:
+ key1: "first"
+ key2: "second"
+ - name: ByeBye
+ zip_file: bye-code.zip
+ env_vars:
+ key1: "1"
+ key2: "2"
+
+# To remove previously added tags pass an empty dict
+- name: remove tags
+ community.aws.lambda:
+ name: 'Lambda function'
+ state: present
+ zip_file: 'code.zip'
+ runtime: 'python2.7'
+ role: 'arn:aws:iam::987654321012:role/lambda_basic_execution'
+ handler: 'hello_python.my_handler'
+ tags: {}
+
+# Basic Lambda function deletion
+- name: Delete Lambda functions HelloWorld and ByeBye
+ community.aws.lambda:
+ name: '{{ item }}'
+ state: absent
+ loop:
+ - HelloWorld
+ - ByeBye
+'''
+
+RETURN = r'''
+code:
+ description: the lambda function location returned by get_function in boto3
+ returned: success
+ type: dict
+ sample:
+ {
+ 'location': 'a presigned S3 URL',
+ 'repository_type': 'S3',
+ }
+configuration:
+ description: the lambda function metadata returned by get_function in boto3
+ returned: success
+ type: dict
+ sample:
+ {
+ 'code_sha256': 'zOAGfF5JLFuzZoSNirUtOrQp+S341IOA3BcoXXoaIaU=',
+ 'code_size': 123,
+ 'description': 'My function',
+ 'environment': {
+ 'variables': {
+ 'key': 'value'
+ }
+ },
+ 'function_arn': 'arn:aws:lambda:us-east-1:123456789012:function:myFunction:1',
+ 'function_name': 'myFunction',
+ 'handler': 'index.handler',
+ 'last_modified': '2017-08-01T00:00:00.000+0000',
+ 'memory_size': 128,
+ 'revision_id': 'a2x9886d-d48a-4a0c-ab64-82abc005x80c',
+ 'role': 'arn:aws:iam::123456789012:role/lambda_basic_execution',
+ 'runtime': 'nodejs6.10',
+ 'tracing_config': { 'mode': 'Active' },
+ 'timeout': 3,
+ 'version': '1',
+ 'vpc_config': {
+ 'security_group_ids': [],
+ 'subnet_ids': [],
+ 'vpc_id': '123'
+ }
+ }
+'''
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+
+import base64
+import hashlib
+import traceback
+import re
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # protected by AnsibleAWSModule
+
+
+def get_account_info(module):
+ """return the account information (account id and partition) we are currently working on
+
+ get_account_info tries too find out the account that we are working
+ on. It's not guaranteed that this will be easy so we try in
+ several different ways. Giving either IAM or STS privileges to
+ the account should be enough to permit this.
+ """
+ account_id = None
+ partition = None
+ try:
+ sts_client = module.client('sts', retry_decorator=AWSRetry.jittered_backoff())
+ caller_id = sts_client.get_caller_identity(aws_retry=True)
+ account_id = caller_id.get('Account')
+ partition = caller_id.get('Arn').split(':')[1]
+ except (BotoCoreError, ClientError):
+ try:
+ iam_client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+ arn, partition, service, reg, account_id, resource = iam_client.get_user(aws_retry=True)['User']['Arn'].split(':')
+ except is_boto3_error_code('AccessDenied') as e:
+ try:
+ except_msg = to_native(e.message)
+ except AttributeError:
+ except_msg = to_native(e)
+ m = re.search(r"arn:(aws(-([a-z\-]+))?):iam::([0-9]{12,32}):\w+/", except_msg)
+ if m is None:
+ module.fail_json_aws(e, msg="getting account information")
+ account_id = m.group(4)
+ partition = m.group(1)
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="getting account information")
+
+ return account_id, partition
+
+
+def get_current_function(connection, function_name, qualifier=None):
+ try:
+ if qualifier is not None:
+ return connection.get_function(FunctionName=function_name, Qualifier=qualifier, aws_retry=True)
+ return connection.get_function(FunctionName=function_name, aws_retry=True)
+ except is_boto3_error_code('ResourceNotFoundException'):
+ return None
+
+
+def sha256sum(filename):
+ hasher = hashlib.sha256()
+ with open(filename, 'rb') as f:
+ hasher.update(f.read())
+
+ code_hash = hasher.digest()
+ code_b64 = base64.b64encode(code_hash)
+ hex_digest = code_b64.decode('utf-8')
+
+ return hex_digest
+
+
+def set_tag(client, module, tags, function):
+
+ changed = False
+ arn = function['Configuration']['FunctionArn']
+
+ try:
+ current_tags = client.list_tags(Resource=arn, aws_retry=True).get('Tags', {})
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to list tags")
+
+ tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags=True)
+
+ try:
+ if tags_to_remove:
+ client.untag_resource(
+ Resource=arn,
+ TagKeys=tags_to_remove,
+ aws_retry=True
+ )
+ changed = True
+
+ if tags_to_add:
+ client.tag_resource(
+ Resource=arn,
+ Tags=tags_to_add,
+ aws_retry=True
+ )
+ changed = True
+
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to tag resource {0}".format(arn))
+
+ return changed
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ runtime=dict(),
+ role=dict(),
+ handler=dict(),
+ zip_file=dict(aliases=['src']),
+ s3_bucket=dict(),
+ s3_key=dict(),
+ s3_object_version=dict(),
+ description=dict(default=''),
+ timeout=dict(type='int', default=3),
+ memory_size=dict(type='int', default=128),
+ vpc_subnet_ids=dict(type='list', elements='str'),
+ vpc_security_group_ids=dict(type='list', elements='str'),
+ environment_variables=dict(type='dict'),
+ dead_letter_arn=dict(),
+ tracing_mode=dict(choices=['Active', 'PassThrough']),
+ tags=dict(type='dict'),
+ )
+
+ mutually_exclusive = [['zip_file', 's3_key'],
+ ['zip_file', 's3_bucket'],
+ ['zip_file', 's3_object_version']]
+
+ required_together = [['s3_key', 's3_bucket'],
+ ['vpc_subnet_ids', 'vpc_security_group_ids']]
+
+ required_if = [['state', 'present', ['runtime', 'handler', 'role']]]
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=mutually_exclusive,
+ required_together=required_together,
+ required_if=required_if)
+
+ name = module.params.get('name')
+ state = module.params.get('state').lower()
+ runtime = module.params.get('runtime')
+ role = module.params.get('role')
+ handler = module.params.get('handler')
+ s3_bucket = module.params.get('s3_bucket')
+ s3_key = module.params.get('s3_key')
+ s3_object_version = module.params.get('s3_object_version')
+ zip_file = module.params.get('zip_file')
+ description = module.params.get('description')
+ timeout = module.params.get('timeout')
+ memory_size = module.params.get('memory_size')
+ vpc_subnet_ids = module.params.get('vpc_subnet_ids')
+ vpc_security_group_ids = module.params.get('vpc_security_group_ids')
+ environment_variables = module.params.get('environment_variables')
+ dead_letter_arn = module.params.get('dead_letter_arn')
+ tracing_mode = module.params.get('tracing_mode')
+ tags = module.params.get('tags')
+
+ check_mode = module.check_mode
+ changed = False
+
+ try:
+ client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff())
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Trying to connect to AWS")
+
+ if tags is not None:
+ if not hasattr(client, "list_tags"):
+ module.fail_json(msg="Using tags requires botocore 1.5.40 or above")
+
+ if state == 'present':
+ if re.match(r'^arn:aws(-([a-z\-]+))?:iam', role):
+ role_arn = role
+ else:
+ # get account ID and assemble ARN
+ account_id, partition = get_account_info(module)
+ role_arn = 'arn:{0}:iam::{1}:role/{2}'.format(partition, account_id, role)
+
+ # Get function configuration if present, False otherwise
+ current_function = get_current_function(client, name)
+
+ # Update existing Lambda function
+ if state == 'present' and current_function:
+
+ # Get current state
+ current_config = current_function['Configuration']
+ current_version = None
+
+ # Update function configuration
+ func_kwargs = {'FunctionName': name}
+
+ # Update configuration if needed
+ if role_arn and current_config['Role'] != role_arn:
+ func_kwargs.update({'Role': role_arn})
+ if handler and current_config['Handler'] != handler:
+ func_kwargs.update({'Handler': handler})
+ if description and current_config['Description'] != description:
+ func_kwargs.update({'Description': description})
+ if timeout and current_config['Timeout'] != timeout:
+ func_kwargs.update({'Timeout': timeout})
+ if memory_size and current_config['MemorySize'] != memory_size:
+ func_kwargs.update({'MemorySize': memory_size})
+ if runtime and current_config['Runtime'] != runtime:
+ func_kwargs.update({'Runtime': runtime})
+ if (environment_variables is not None) and (current_config.get(
+ 'Environment', {}).get('Variables', {}) != environment_variables):
+ func_kwargs.update({'Environment': {'Variables': environment_variables}})
+ if dead_letter_arn is not None:
+ if current_config.get('DeadLetterConfig'):
+ if current_config['DeadLetterConfig']['TargetArn'] != dead_letter_arn:
+ func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
+ else:
+ if dead_letter_arn != "":
+ func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
+ if tracing_mode and (current_config.get('TracingConfig', {}).get('Mode', 'PassThrough') != tracing_mode):
+ func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}})
+
+ # If VPC configuration is desired
+ if vpc_subnet_ids:
+
+ if 'VpcConfig' in current_config:
+ # Compare VPC config with current config
+ current_vpc_subnet_ids = current_config['VpcConfig']['SubnetIds']
+ current_vpc_security_group_ids = current_config['VpcConfig']['SecurityGroupIds']
+
+ subnet_net_id_changed = sorted(vpc_subnet_ids) != sorted(current_vpc_subnet_ids)
+ vpc_security_group_ids_changed = sorted(vpc_security_group_ids) != sorted(current_vpc_security_group_ids)
+
+ if 'VpcConfig' not in current_config or subnet_net_id_changed or vpc_security_group_ids_changed:
+ new_vpc_config = {'SubnetIds': vpc_subnet_ids,
+ 'SecurityGroupIds': vpc_security_group_ids}
+ func_kwargs.update({'VpcConfig': new_vpc_config})
+ else:
+ # No VPC configuration is desired, assure VPC config is empty when present in current config
+ if 'VpcConfig' in current_config and current_config['VpcConfig'].get('VpcId'):
+ func_kwargs.update({'VpcConfig': {'SubnetIds': [], 'SecurityGroupIds': []}})
+
+ # Upload new configuration if configuration has changed
+ if len(func_kwargs) > 1:
+ try:
+ if not check_mode:
+ response = client.update_function_configuration(aws_retry=True, **func_kwargs)
+ current_version = response['Version']
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Trying to update lambda configuration")
+
+ # Update code configuration
+ code_kwargs = {'FunctionName': name, 'Publish': True}
+
+ # Update S3 location
+ if s3_bucket and s3_key:
+ # If function is stored on S3 always update
+ code_kwargs.update({'S3Bucket': s3_bucket, 'S3Key': s3_key})
+
+ # If S3 Object Version is given
+ if s3_object_version:
+ code_kwargs.update({'S3ObjectVersion': s3_object_version})
+
+ # Compare local checksum, update remote code when different
+ elif zip_file:
+ local_checksum = sha256sum(zip_file)
+ remote_checksum = current_config['CodeSha256']
+
+ # Only upload new code when local code is different compared to the remote code
+ if local_checksum != remote_checksum:
+ try:
+ with open(zip_file, 'rb') as f:
+ encoded_zip = f.read()
+ code_kwargs.update({'ZipFile': encoded_zip})
+ except IOError as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+ # Tag Function
+ if tags is not None:
+ if set_tag(client, module, tags, current_function):
+ changed = True
+
+ # Upload new code if needed (e.g. code checksum has changed)
+ if len(code_kwargs) > 2:
+ try:
+ if not check_mode:
+ response = client.update_function_code(aws_retry=True, **code_kwargs)
+ current_version = response['Version']
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Trying to upload new code")
+
+ # Describe function code and configuration
+ response = get_current_function(client, name, qualifier=current_version)
+ if not response:
+ module.fail_json(msg='Unable to get function information after updating')
+
+ # We're done
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
+
+ # Function doesn't exists, create new Lambda function
+ elif state == 'present':
+ if s3_bucket and s3_key:
+ # If function is stored on S3
+ code = {'S3Bucket': s3_bucket,
+ 'S3Key': s3_key}
+ if s3_object_version:
+ code.update({'S3ObjectVersion': s3_object_version})
+ elif zip_file:
+ # If function is stored in local zipfile
+ try:
+ with open(zip_file, 'rb') as f:
+ zip_content = f.read()
+
+ code = {'ZipFile': zip_content}
+ except IOError as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+ else:
+ module.fail_json(msg='Either S3 object or path to zipfile required')
+
+ func_kwargs = {'FunctionName': name,
+ 'Publish': True,
+ 'Runtime': runtime,
+ 'Role': role_arn,
+ 'Code': code,
+ 'Timeout': timeout,
+ 'MemorySize': memory_size,
+ }
+
+ if description is not None:
+ func_kwargs.update({'Description': description})
+
+ if handler is not None:
+ func_kwargs.update({'Handler': handler})
+
+ if environment_variables:
+ func_kwargs.update({'Environment': {'Variables': environment_variables}})
+
+ if dead_letter_arn:
+ func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
+
+ if tracing_mode:
+ func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}})
+
+ # If VPC configuration is given
+ if vpc_subnet_ids:
+ func_kwargs.update({'VpcConfig': {'SubnetIds': vpc_subnet_ids,
+ 'SecurityGroupIds': vpc_security_group_ids}})
+
+ # Finally try to create function
+ current_version = None
+ try:
+ if not check_mode:
+ response = client.create_function(aws_retry=True, **func_kwargs)
+ current_version = response['Version']
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Trying to create function")
+
+ # Tag Function
+ if tags is not None:
+ if set_tag(client, module, tags, get_current_function(client, name)):
+ changed = True
+
+ response = get_current_function(client, name, qualifier=current_version)
+ if not response:
+ module.fail_json(msg='Unable to get function information after creating')
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
+
+ # Delete existing Lambda function
+ if state == 'absent' and current_function:
+ try:
+ if not check_mode:
+ client.delete_function(FunctionName=name, aws_retry=True)
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Trying to delete Lambda function")
+
+ module.exit_json(changed=changed)
+
+ # Function already absent, do nothing
+ elif state == 'absent':
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/lambda_alias.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/lambda_alias.py
new file mode 100644
index 00000000..bd547a41
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/lambda_alias.py
@@ -0,0 +1,378 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lambda_alias
+version_added: 1.0.0
+short_description: Creates, updates or deletes AWS Lambda function aliases
+description:
+ - This module allows the management of AWS Lambda functions aliases via the Ansible
+ framework. It is idempotent and supports "Check" mode. Use module M(community.aws.lambda) to manage the lambda function
+ itself and M(community.aws.lambda_event) to manage event source mappings.
+
+
+author: Pierre Jodouin (@pjodouin), Ryan Scott Brown (@ryansb)
+options:
+ function_name:
+ description:
+ - The name of the function alias.
+ required: true
+ type: str
+ state:
+ description:
+ - Describes the desired state.
+ default: "present"
+ choices: ["present", "absent"]
+ type: str
+ name:
+ description:
+ - Name of the function alias.
+ required: true
+ aliases: ['alias_name']
+ type: str
+ description:
+ description:
+ - A short, user-defined function alias description.
+ type: str
+ function_version:
+ description:
+ - Version associated with the Lambda function alias.
+ A value of 0 (or omitted parameter) sets the alias to the $LATEST version.
+ aliases: ['version']
+ type: int
+requirements:
+ - boto3
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+---
+# Simple example to create a lambda function and publish a version
+- hosts: localhost
+ gather_facts: no
+ vars:
+ state: present
+ project_folder: /path/to/deployment/package
+ deployment_package: lambda.zip
+ account: 123456789012
+ production_version: 5
+ tasks:
+ - name: AWS Lambda Function
+ lambda:
+ state: "{{ state | default('present') }}"
+ name: myLambdaFunction
+ publish: True
+ description: lambda function description
+ code_s3_bucket: package-bucket
+ code_s3_key: "lambda/{{ deployment_package }}"
+ local_path: "{{ project_folder }}/{{ deployment_package }}"
+ runtime: python2.7
+ timeout: 5
+ handler: lambda.handler
+ memory_size: 128
+ role: "arn:aws:iam::{{ account }}:role/API2LambdaExecRole"
+
+ - name: Get information
+ lambda_info:
+ name: myLambdaFunction
+ register: lambda_info
+ - name: show results
+ ansible.builtin.debug:
+ msg: "{{ lambda_info['lambda_facts'] }}"
+
+# The following will set the Dev alias to the latest version ($LATEST) since version is omitted (or = 0)
+ - name: "alias 'Dev' for function {{ lambda_info.lambda_facts.FunctionName }} "
+ community.aws.lambda_alias:
+ state: "{{ state | default('present') }}"
+ function_name: "{{ lambda_info.lambda_facts.FunctionName }}"
+ name: Dev
+ description: Development is $LATEST version
+
+# The QA alias will only be created when a new version is published (i.e. not = '$LATEST')
+ - name: "alias 'QA' for function {{ lambda_info.lambda_facts.FunctionName }} "
+ community.aws.lambda_alias:
+ state: "{{ state | default('present') }}"
+ function_name: "{{ lambda_info.lambda_facts.FunctionName }}"
+ name: QA
+ version: "{{ lambda_info.lambda_facts.Version }}"
+ description: "QA is version {{ lambda_info.lambda_facts.Version }}"
+ when: lambda_info.lambda_facts.Version != "$LATEST"
+
+# The Prod alias will have a fixed version based on a variable
+ - name: "alias 'Prod' for function {{ lambda_info.lambda_facts.FunctionName }} "
+ community.aws.lambda_alias:
+ state: "{{ state | default('present') }}"
+ function_name: "{{ lambda_info.lambda_facts.FunctionName }}"
+ name: Prod
+ version: "{{ production_version }}"
+ description: "Production is version {{ production_version }}"
+'''
+
+RETURN = '''
+---
+alias_arn:
+ description: Full ARN of the function, including the alias
+ returned: success
+ type: str
+ sample: arn:aws:lambda:us-west-2:123456789012:function:myFunction:dev
+description:
+ description: A short description of the alias
+ returned: success
+ type: str
+ sample: The development stage for my hot new app
+function_version:
+ description: The qualifier that the alias refers to
+ returned: success
+ type: str
+ sample: $LATEST
+name:
+ description: The name of the alias assigned
+ returned: success
+ type: str
+ sample: dev
+'''
+
+import re
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+
+
+class AWSConnection:
+ """
+ Create the connection object and client objects as required.
+ """
+
+ def __init__(self, ansible_obj, resources, boto3_=True):
+
+ try:
+ self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=boto3_)
+
+ self.resource_client = dict()
+ if not resources:
+ resources = ['lambda']
+
+ resources.append('iam')
+
+ for resource in resources:
+ aws_connect_kwargs.update(dict(region=self.region,
+ endpoint=self.endpoint,
+ conn_type='client',
+ resource=resource
+ ))
+ self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
+
+ # if region is not provided, then get default profile/session region
+ if not self.region:
+ self.region = self.resource_client['lambda'].meta.region_name
+
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
+
+ try:
+ self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
+ except (ClientError, ValueError, KeyError, IndexError):
+ self.account_id = ''
+
+ def client(self, resource='lambda'):
+ return self.resource_client[resource]
+
+
+def pc(key):
+ """
+ Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
+
+ :param key:
+ :return:
+ """
+
+ return "".join([token.capitalize() for token in key.split('_')])
+
+
+def set_api_params(module, module_params):
+ """
+ Sets module parameters to those expected by the boto3 API.
+
+ :param module:
+ :param module_params:
+ :return:
+ """
+
+ api_params = dict()
+
+ for param in module_params:
+ module_param = module.params.get(param, None)
+ if module_param:
+ api_params[pc(param)] = module_param
+
+ return api_params
+
+
+def validate_params(module, aws):
+ """
+ Performs basic parameter validation.
+
+ :param module: Ansible module reference
+ :param aws: AWS client connection
+ :return:
+ """
+
+ function_name = module.params['function_name']
+
+ # validate function name
+ if not re.search(r'^[\w\-:]+$', function_name):
+ module.fail_json(
+ msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
+ )
+ if len(function_name) > 64:
+ module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
+
+ # if parameter 'function_version' is zero, set it to $LATEST, else convert it to a string
+ if module.params['function_version'] == 0:
+ module.params['function_version'] = '$LATEST'
+ else:
+ module.params['function_version'] = str(module.params['function_version'])
+
+ return
+
+
+def get_lambda_alias(module, aws):
+ """
+ Returns the lambda function alias if it exists.
+
+ :param module: Ansible module reference
+ :param aws: AWS client connection
+ :return:
+ """
+
+ client = aws.client('lambda')
+
+ # set API parameters
+ api_params = set_api_params(module, ('function_name', 'name'))
+
+ # check if alias exists and get facts
+ try:
+ results = client.get_alias(**api_params)
+
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ results = None
+ else:
+ module.fail_json(msg='Error retrieving function alias: {0}'.format(e))
+
+ return results
+
+
+def lambda_alias(module, aws):
+ """
+ Adds, updates or deletes lambda function aliases.
+
+ :param module: Ansible module reference
+ :param aws: AWS client connection
+ :return dict:
+ """
+ client = aws.client('lambda')
+ results = dict()
+ changed = False
+ current_state = 'absent'
+ state = module.params['state']
+
+ facts = get_lambda_alias(module, aws)
+ if facts:
+ current_state = 'present'
+
+ if state == 'present':
+ if current_state == 'present':
+
+ # check if alias has changed -- only version and description can change
+ alias_params = ('function_version', 'description')
+ for param in alias_params:
+ if module.params.get(param) != facts.get(pc(param)):
+ changed = True
+ break
+
+ if changed:
+ api_params = set_api_params(module, ('function_name', 'name'))
+ api_params.update(set_api_params(module, alias_params))
+
+ if not module.check_mode:
+ try:
+ results = client.update_alias(**api_params)
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ module.fail_json(msg='Error updating function alias: {0}'.format(e))
+
+ else:
+ # create new function alias
+ api_params = set_api_params(module, ('function_name', 'name', 'function_version', 'description'))
+
+ try:
+ if not module.check_mode:
+ results = client.create_alias(**api_params)
+ changed = True
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ module.fail_json(msg='Error creating function alias: {0}'.format(e))
+
+ else: # state = 'absent'
+ if current_state == 'present':
+ # delete the function
+ api_params = set_api_params(module, ('function_name', 'name'))
+
+ try:
+ if not module.check_mode:
+ results = client.delete_alias(**api_params)
+ changed = True
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ module.fail_json(msg='Error deleting function alias: {0}'.format(e))
+
+ return dict(changed=changed, **dict(results or facts))
+
+
+def main():
+ """
+ Main entry point.
+
+ :return dict: ansible facts
+ """
+ argument_spec = dict(
+ state=dict(required=False, default='present', choices=['present', 'absent']),
+ function_name=dict(required=True),
+ name=dict(required=True, aliases=['alias_name']),
+ function_version=dict(type='int', required=False, default=0, aliases=['version']),
+ description=dict(required=False, default=None),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[],
+ required_together=[],
+ )
+
+ aws = AWSConnection(module, ['lambda'])
+
+ validate_params(module, aws)
+
+ results = lambda_alias(module, aws)
+
+ module.exit_json(**camel_dict_to_snake_dict(results))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/lambda_event.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/lambda_event.py
new file mode 100644
index 00000000..e0009d13
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/lambda_event.py
@@ -0,0 +1,432 @@
+#!/usr/bin/python
+# (c) 2016, Pierre Jodouin <pjodouin@virtualcomputing.solutions>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lambda_event
+version_added: 1.0.0
+short_description: Creates, updates or deletes AWS Lambda function event mappings
+description:
+ - This module allows the management of AWS Lambda function event source mappings such as DynamoDB and Kinesis stream
+ events via the Ansible framework. These event source mappings are relevant only in the AWS Lambda pull model, where
+ AWS Lambda invokes the function.
+ It is idempotent and supports "Check" mode. Use module M(community.aws.lambda) to manage the lambda
+ function itself and M(community.aws.lambda_alias) to manage function aliases.
+
+
+author: Pierre Jodouin (@pjodouin), Ryan Brown (@ryansb)
+options:
+ lambda_function_arn:
+ description:
+ - The name or ARN of the lambda function.
+ required: true
+ aliases: ['function_name', 'function_arn']
+ type: str
+ state:
+ description:
+ - Describes the desired state.
+ default: "present"
+ choices: ["present", "absent"]
+ type: str
+ alias:
+ description:
+ - Name of the function alias.
+ - Mutually exclusive with I(version).
+ type: str
+ version:
+ description:
+ - Version of the Lambda function.
+ - Mutually exclusive with I(alias).
+ type: int
+ event_source:
+ description:
+ - Source of the event that triggers the lambda function.
+ - For DynamoDB and Kinesis events, select C(stream)
+ - For SQS queues, select C(sqs)
+ default: stream
+ choices: ['stream', 'sqs']
+ type: str
+ source_params:
+ description:
+ - Sub-parameters required for event source.
+ suboptions:
+ source_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the SQS queue, Kinesis stream or DynamoDB stream that is the event source.
+ type: str
+ required: true
+ enabled:
+ description:
+ - Indicates whether AWS Lambda should begin polling or readin from the event source.
+ default: true.
+ type: bool
+ batch_size:
+ description:
+ - The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function.
+ default: 100
+ type: int
+ starting_position:
+ description:
+ - The position in the stream where AWS Lambda should start reading.
+ - Required when I(event_source=stream).
+ choices: [TRIM_HORIZON,LATEST]
+ type: str
+ required: true
+ type: dict
+requirements:
+ - boto3
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Example that creates a lambda event notification for a DynamoDB stream
+- name: DynamoDB stream event mapping
+ community.aws.lambda_event:
+ state: present
+ event_source: stream
+ function_name: "{{ function_name }}"
+ alias: Dev
+ source_params:
+ source_arn: arn:aws:dynamodb:us-east-1:123456789012:table/tableName/stream/2016-03-19T19:51:37.457
+ enabled: True
+ batch_size: 100
+ starting_position: TRIM_HORIZON
+ register: event
+
+- name: Show source event
+ ansible.builtin.debug:
+ var: event.lambda_stream_events
+'''
+
+RETURN = '''
+---
+lambda_stream_events:
+ description: list of dictionaries returned by the API describing stream event mappings
+ returned: success
+ type: list
+'''
+
+import re
+import sys
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+
+
+# ---------------------------------------------------------------------------------------------------
+#
+# Helper Functions & classes
+#
+# ---------------------------------------------------------------------------------------------------
+
+
+class AWSConnection:
+ """
+ Create the connection object and client objects as required.
+ """
+
+ def __init__(self, ansible_obj, resources, use_boto3=True):
+
+ try:
+ self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=use_boto3)
+
+ self.resource_client = dict()
+ if not resources:
+ resources = ['lambda']
+
+ resources.append('iam')
+
+ for resource in resources:
+ aws_connect_kwargs.update(dict(region=self.region,
+ endpoint=self.endpoint,
+ conn_type='client',
+ resource=resource
+ ))
+ self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
+
+ # if region is not provided, then get default profile/session region
+ if not self.region:
+ self.region = self.resource_client['lambda'].meta.region_name
+
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
+
+ # set account ID
+ try:
+ self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
+ except (ClientError, ValueError, KeyError, IndexError):
+ self.account_id = ''
+
+ def client(self, resource='lambda'):
+ return self.resource_client[resource]
+
+
+def pc(key):
+ """
+ Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
+
+ :param key:
+ :return:
+ """
+
+ return "".join([token.capitalize() for token in key.split('_')])
+
+
+def ordered_obj(obj):
+ """
+ Order object for comparison purposes
+
+ :param obj:
+ :return:
+ """
+
+ if isinstance(obj, dict):
+ return sorted((k, ordered_obj(v)) for k, v in obj.items())
+ if isinstance(obj, list):
+ return sorted(ordered_obj(x) for x in obj)
+ else:
+ return obj
+
+
+def set_api_sub_params(params):
+ """
+ Sets module sub-parameters to those expected by the boto3 API.
+
+ :param params:
+ :return:
+ """
+
+ api_params = dict()
+
+ for param in params.keys():
+ param_value = params.get(param, None)
+ if param_value:
+ api_params[pc(param)] = param_value
+
+ return api_params
+
+
+def validate_params(module, aws):
+ """
+ Performs basic parameter validation.
+
+ :param module:
+ :param aws:
+ :return:
+ """
+
+ function_name = module.params['lambda_function_arn']
+
+ # validate function name
+ if not re.search(r'^[\w\-:]+$', function_name):
+ module.fail_json(
+ msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
+ )
+ if len(function_name) > 64 and not function_name.startswith('arn:aws:lambda:'):
+ module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
+
+ elif len(function_name) > 140 and function_name.startswith('arn:aws:lambda:'):
+ module.fail_json(msg='ARN "{0}" exceeds 140 character limit'.format(function_name))
+
+ # check if 'function_name' needs to be expanded in full ARN format
+ if not module.params['lambda_function_arn'].startswith('arn:aws:lambda:'):
+ function_name = module.params['lambda_function_arn']
+ module.params['lambda_function_arn'] = 'arn:aws:lambda:{0}:{1}:function:{2}'.format(aws.region, aws.account_id, function_name)
+
+ qualifier = get_qualifier(module)
+ if qualifier:
+ function_arn = module.params['lambda_function_arn']
+ module.params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier)
+
+ return
+
+
+def get_qualifier(module):
+ """
+ Returns the function qualifier as a version or alias or None.
+
+ :param module:
+ :return:
+ """
+
+ qualifier = None
+ if module.params['version'] > 0:
+ qualifier = str(module.params['version'])
+ elif module.params['alias']:
+ qualifier = str(module.params['alias'])
+
+ return qualifier
+
+
+# ---------------------------------------------------------------------------------------------------
+#
+# Lambda Event Handlers
+#
+# This section defines a lambda_event_X function where X is an AWS service capable of initiating
+# the execution of a Lambda function (pull only).
+#
+# ---------------------------------------------------------------------------------------------------
+
+def lambda_event_stream(module, aws):
+ """
+ Adds, updates or deletes lambda stream (DynamoDb, Kinesis) event notifications.
+ :param module:
+ :param aws:
+ :return:
+ """
+
+ client = aws.client('lambda')
+ facts = dict()
+ changed = False
+ current_state = 'absent'
+ state = module.params['state']
+
+ api_params = dict(FunctionName=module.params['lambda_function_arn'])
+
+ # check if required sub-parameters are present and valid
+ source_params = module.params['source_params']
+
+ source_arn = source_params.get('source_arn')
+ if source_arn:
+ api_params.update(EventSourceArn=source_arn)
+ else:
+ module.fail_json(msg="Source parameter 'source_arn' is required for stream event notification.")
+
+ # check if optional sub-parameters are valid, if present
+ batch_size = source_params.get('batch_size')
+ if batch_size:
+ try:
+ source_params['batch_size'] = int(batch_size)
+ except ValueError:
+ module.fail_json(msg="Source parameter 'batch_size' must be an integer, found: {0}".format(source_params['batch_size']))
+
+ # optional boolean value needs special treatment as not present does not imply False
+ source_param_enabled = module.boolean(source_params.get('enabled', 'True'))
+
+ # check if event mapping exist
+ try:
+ facts = client.list_event_source_mappings(**api_params)['EventSourceMappings']
+ if facts:
+ current_state = 'present'
+ except ClientError as e:
+ module.fail_json(msg='Error retrieving stream event notification configuration: {0}'.format(e))
+
+ if state == 'present':
+ if current_state == 'absent':
+
+ starting_position = source_params.get('starting_position')
+ if starting_position:
+ api_params.update(StartingPosition=starting_position)
+ elif module.params.get('event_source') == 'sqs':
+ # starting position is not required for SQS
+ pass
+ else:
+ module.fail_json(msg="Source parameter 'starting_position' is required for stream event notification.")
+
+ if source_arn:
+ api_params.update(Enabled=source_param_enabled)
+ if source_params.get('batch_size'):
+ api_params.update(BatchSize=source_params.get('batch_size'))
+
+ try:
+ if not module.check_mode:
+ facts = client.create_event_source_mapping(**api_params)
+ changed = True
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ module.fail_json(msg='Error creating stream source event mapping: {0}'.format(e))
+
+ else:
+ # current_state is 'present'
+ api_params = dict(FunctionName=module.params['lambda_function_arn'])
+ current_mapping = facts[0]
+ api_params.update(UUID=current_mapping['UUID'])
+ mapping_changed = False
+
+ # check if anything changed
+ if source_params.get('batch_size') and source_params['batch_size'] != current_mapping['BatchSize']:
+ api_params.update(BatchSize=source_params['batch_size'])
+ mapping_changed = True
+
+ if source_param_enabled is not None:
+ if source_param_enabled:
+ if current_mapping['State'] not in ('Enabled', 'Enabling'):
+ api_params.update(Enabled=True)
+ mapping_changed = True
+ else:
+ if current_mapping['State'] not in ('Disabled', 'Disabling'):
+ api_params.update(Enabled=False)
+ mapping_changed = True
+
+ if mapping_changed:
+ try:
+ if not module.check_mode:
+ facts = client.update_event_source_mapping(**api_params)
+ changed = True
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ module.fail_json(msg='Error updating stream source event mapping: {0}'.format(e))
+
+ else:
+ if current_state == 'present':
+ # remove the stream event mapping
+ api_params = dict(UUID=facts[0]['UUID'])
+
+ try:
+ if not module.check_mode:
+ facts = client.delete_event_source_mapping(**api_params)
+ changed = True
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ module.fail_json(msg='Error removing stream source event mapping: {0}'.format(e))
+
+ return camel_dict_to_snake_dict(dict(changed=changed, events=facts))
+
+
+def main():
+ """Produce a list of function suffixes which handle lambda events."""
+ source_choices = ["stream", "sqs"]
+
+ argument_spec = dict(
+ state=dict(required=False, default='present', choices=['present', 'absent']),
+ lambda_function_arn=dict(required=True, aliases=['function_name', 'function_arn']),
+ event_source=dict(required=False, default="stream", choices=source_choices),
+ source_params=dict(type='dict', required=True),
+ alias=dict(required=False, default=None),
+ version=dict(type='int', required=False, default=0),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['alias', 'version']],
+ required_together=[],
+ )
+
+ aws = AWSConnection(module, ['lambda'])
+
+ validate_params(module, aws)
+
+ if module.params['event_source'].lower() in ('stream', 'sqs'):
+ results = lambda_event_stream(module, aws)
+ else:
+ module.fail_json(msg='Please select `stream` or `sqs` as the event type')
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/lambda_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/lambda_facts.py
new file mode 100644
index 00000000..4c02947c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/lambda_facts.py
@@ -0,0 +1,389 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lambda_facts
+version_added: 1.0.0
+deprecated:
+ removed_at_date: '2021-12-01'
+ removed_from_collection: 'community.aws'
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.aws.lambda_info) instead.
+short_description: Gathers AWS Lambda function details as Ansible facts
+description:
+ - Gathers various details related to Lambda functions, including aliases, versions and event source mappings.
+ Use module M(community.aws.lambda) to manage the lambda function itself, M(community.aws.lambda_alias) to manage function aliases and
+ M(community.aws.lambda_event) to manage lambda event source mappings.
+
+
+options:
+ query:
+ description:
+ - Specifies the resource type for which to gather facts. Leave blank to retrieve all facts.
+ choices: [ "aliases", "all", "config", "mappings", "policy", "versions" ]
+ default: "all"
+ type: str
+ function_name:
+ description:
+ - The name of the lambda function for which facts are requested.
+ aliases: [ "function", "name"]
+ type: str
+ event_source_arn:
+ description:
+ - For query type 'mappings', this is the Amazon Resource Name (ARN) of the Amazon Kinesis or DynamoDB stream.
+ type: str
+author: Pierre Jodouin (@pjodouin)
+requirements:
+ - boto3
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+---
+# Simple example of listing all info for a function
+- name: List all for a specific function
+ community.aws.lambda_facts:
+ query: all
+ function_name: myFunction
+ register: my_function_details
+
+# List all versions of a function
+- name: List function versions
+ community.aws.lambda_facts:
+ query: versions
+ function_name: myFunction
+ register: my_function_versions
+
+# List all lambda function versions
+- name: List all function
+ community.aws.lambda_facts:
+ query: all
+ max_items: 20
+- name: show Lambda facts
+ ansible.builtin.debug:
+ var: lambda_facts
+'''
+
+RETURN = '''
+---
+lambda_facts:
+ description: lambda facts
+ returned: success
+ type: dict
+lambda_facts.function:
+ description: lambda function list
+ returned: success
+ type: dict
+lambda_facts.function.TheName:
+ description: lambda function information, including event, mapping, and version information
+ returned: success
+ type: dict
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+import json
+import datetime
+import sys
+import re
+
+
+try:
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def fix_return(node):
+ """
+ fixup returned dictionary
+
+ :param node:
+ :return:
+ """
+
+ if isinstance(node, datetime.datetime):
+ node_value = str(node)
+
+ elif isinstance(node, list):
+ node_value = [fix_return(item) for item in node]
+
+ elif isinstance(node, dict):
+ node_value = dict([(item, fix_return(node[item])) for item in node.keys()])
+
+ else:
+ node_value = node
+
+ return node_value
+
+
+def alias_details(client, module):
+ """
+ Returns list of aliases for a specified function.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :return dict:
+ """
+
+ lambda_facts = dict()
+
+ function_name = module.params.get('function_name')
+ if function_name:
+ params = dict()
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+ try:
+ lambda_facts.update(aliases=client.list_aliases(FunctionName=function_name, **params)['Aliases'])
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ lambda_facts.update(aliases=[])
+ else:
+ module.fail_json_aws(e, msg="Trying to get aliases")
+ else:
+ module.fail_json(msg='Parameter function_name required for query=aliases.')
+
+ return {function_name: camel_dict_to_snake_dict(lambda_facts)}
+
+
+def all_details(client, module):
+ """
+ Returns all lambda related facts.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :return dict:
+ """
+
+ if module.params.get('max_items') or module.params.get('next_marker'):
+ module.fail_json(msg='Cannot specify max_items nor next_marker for query=all.')
+
+ lambda_facts = dict()
+
+ function_name = module.params.get('function_name')
+ if function_name:
+ lambda_facts[function_name] = {}
+ lambda_facts[function_name].update(config_details(client, module)[function_name])
+ lambda_facts[function_name].update(alias_details(client, module)[function_name])
+ lambda_facts[function_name].update(policy_details(client, module)[function_name])
+ lambda_facts[function_name].update(version_details(client, module)[function_name])
+ lambda_facts[function_name].update(mapping_details(client, module)[function_name])
+ else:
+ lambda_facts.update(config_details(client, module))
+
+ return lambda_facts
+
+
+def config_details(client, module):
+ """
+ Returns configuration details for one or all lambda functions.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :return dict:
+ """
+
+ lambda_facts = dict()
+
+ function_name = module.params.get('function_name')
+ if function_name:
+ try:
+ lambda_facts.update(client.get_function_configuration(FunctionName=function_name))
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ lambda_facts.update(function={})
+ else:
+ module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name))
+ else:
+ params = dict()
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ try:
+ lambda_facts.update(function_list=client.list_functions(**params)['Functions'])
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ lambda_facts.update(function_list=[])
+ else:
+ module.fail_json_aws(e, msg="Trying to get function list")
+
+ functions = dict()
+ for func in lambda_facts.pop('function_list', []):
+ functions[func['FunctionName']] = camel_dict_to_snake_dict(func)
+ return functions
+
+ return {function_name: camel_dict_to_snake_dict(lambda_facts)}
+
+
+def mapping_details(client, module):
+ """
+ Returns all lambda event source mappings.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :return dict:
+ """
+
+ lambda_facts = dict()
+ params = dict()
+ function_name = module.params.get('function_name')
+
+ if function_name:
+ params['FunctionName'] = module.params.get('function_name')
+
+ if module.params.get('event_source_arn'):
+ params['EventSourceArn'] = module.params.get('event_source_arn')
+
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ try:
+ lambda_facts.update(mappings=client.list_event_source_mappings(**params)['EventSourceMappings'])
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ lambda_facts.update(mappings=[])
+ else:
+ module.fail_json_aws(e, msg="Trying to get source event mappings")
+
+ if function_name:
+ return {function_name: camel_dict_to_snake_dict(lambda_facts)}
+
+ return camel_dict_to_snake_dict(lambda_facts)
+
+
+def policy_details(client, module):
+ """
+ Returns policy attached to a lambda function.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :return dict:
+ """
+
+ if module.params.get('max_items') or module.params.get('next_marker'):
+ module.fail_json(msg='Cannot specify max_items nor next_marker for query=policy.')
+
+ lambda_facts = dict()
+
+ function_name = module.params.get('function_name')
+ if function_name:
+ try:
+ # get_policy returns a JSON string so must convert to dict before reassigning to its key
+ lambda_facts.update(policy=json.loads(client.get_policy(FunctionName=function_name)['Policy']))
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ lambda_facts.update(policy={})
+ else:
+ module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name))
+ else:
+ module.fail_json(msg='Parameter function_name required for query=policy.')
+
+ return {function_name: camel_dict_to_snake_dict(lambda_facts)}
+
+
+def version_details(client, module):
+ """
+ Returns all lambda function versions.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :return dict:
+ """
+
+ lambda_facts = dict()
+
+ function_name = module.params.get('function_name')
+ if function_name:
+ params = dict()
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ try:
+ lambda_facts.update(versions=client.list_versions_by_function(FunctionName=function_name, **params)['Versions'])
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ lambda_facts.update(versions=[])
+ else:
+ module.fail_json_aws(e, msg="Trying to get {0} versions".format(function_name))
+ else:
+ module.fail_json(msg='Parameter function_name required for query=versions.')
+
+ return {function_name: camel_dict_to_snake_dict(lambda_facts)}
+
+
+def main():
+ """
+ Main entry point.
+
+ :return dict: ansible facts
+ """
+ argument_spec = dict(
+ function_name=dict(required=False, default=None, aliases=['function', 'name']),
+ query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions'], default='all'),
+ event_source_arn=dict(required=False, default=None)
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[],
+ required_together=[]
+ )
+
+ # validate function_name if present
+ function_name = module.params['function_name']
+ if function_name:
+ if not re.search(r"^[\w\-:]+$", function_name):
+ module.fail_json(
+ msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
+ )
+ if len(function_name) > 64:
+ module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
+
+ client = module.client('lambda')
+
+ this_module = sys.modules[__name__]
+
+ invocations = dict(
+ aliases='alias_details',
+ all='all_details',
+ config='config_details',
+ mappings='mapping_details',
+ policy='policy_details',
+ versions='version_details',
+ )
+
+ this_module_function = getattr(this_module, invocations[module.params['query']])
+ all_facts = fix_return(this_module_function(client, module))
+
+ results = dict(ansible_facts={'lambda_facts': {'function': all_facts}}, changed=False)
+
+ if module.check_mode:
+ results['msg'] = 'Check mode set but ignored for fact gathering only.'
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/lambda_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/lambda_info.py
new file mode 100644
index 00000000..1e40aec4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/lambda_info.py
@@ -0,0 +1,376 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lambda_info
+version_added: 1.0.0
+short_description: Gathers AWS Lambda function details
+description:
+ - Gathers various details related to Lambda functions, including aliases, versions and event source mappings.
+ - Use module M(community.aws.lambda) to manage the lambda function itself, M(community.aws.lambda_alias) to manage function aliases and
+ M(community.aws.lambda_event) to manage lambda event source mappings.
+
+
+options:
+ query:
+ description:
+ - Specifies the resource type for which to gather information. Leave blank to retrieve all information.
+ choices: [ "aliases", "all", "config", "mappings", "policy", "versions" ]
+ default: "all"
+ type: str
+ function_name:
+ description:
+ - The name of the lambda function for which information is requested.
+ aliases: [ "function", "name"]
+ type: str
+ event_source_arn:
+ description:
+ - When I(query=mappings), this is the Amazon Resource Name (ARN) of the Amazon Kinesis or DynamoDB stream.
+ type: str
+author: Pierre Jodouin (@pjodouin)
+requirements:
+ - boto3
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+---
+# Simple example of listing all info for a function
+- name: List all for a specific function
+ community.aws.lambda_info:
+ query: all
+ function_name: myFunction
+ register: my_function_details
+# List all versions of a function
+- name: List function versions
+ community.aws.lambda_info:
+ query: versions
+ function_name: myFunction
+ register: my_function_versions
+# List all lambda function versions
+- name: List all function
+ community.aws.lambda_info:
+ query: all
+ max_items: 20
+ register: output
+- name: show Lambda information
+ ansible.builtin.debug:
+ msg: "{{ output['function'] }}"
+'''
+
+RETURN = '''
+---
+function:
+ description: lambda function list
+ returned: success
+ type: dict
+function.TheName:
+ description: lambda function information, including event, mapping, and version information
+ returned: success
+ type: dict
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+import json
+import datetime
+import re
+
+
+try:
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def fix_return(node):
+ """
+ fixup returned dictionary
+
+ :param node:
+ :return:
+ """
+
+ if isinstance(node, datetime.datetime):
+ node_value = str(node)
+
+ elif isinstance(node, list):
+ node_value = [fix_return(item) for item in node]
+
+ elif isinstance(node, dict):
+ node_value = dict([(item, fix_return(node[item])) for item in node.keys()])
+
+ else:
+ node_value = node
+
+ return node_value
+
+
+def alias_details(client, module):
+ """
+ Returns list of aliases for a specified function.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :return dict:
+ """
+
+ lambda_info = dict()
+
+ function_name = module.params.get('function_name')
+ if function_name:
+ params = dict()
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+ try:
+ lambda_info.update(aliases=client.list_aliases(FunctionName=function_name, **params)['Aliases'])
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ lambda_info.update(aliases=[])
+ else:
+ module.fail_json_aws(e, msg="Trying to get aliases")
+ else:
+ module.fail_json(msg='Parameter function_name required for query=aliases.')
+
+ return {function_name: camel_dict_to_snake_dict(lambda_info)}
+
+
+def all_details(client, module):
+ """
+ Returns all lambda related facts.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :return dict:
+ """
+
+ if module.params.get('max_items') or module.params.get('next_marker'):
+ module.fail_json(msg='Cannot specify max_items nor next_marker for query=all.')
+
+ lambda_info = dict()
+
+ function_name = module.params.get('function_name')
+ if function_name:
+ lambda_info[function_name] = {}
+ lambda_info[function_name].update(config_details(client, module)[function_name])
+ lambda_info[function_name].update(alias_details(client, module)[function_name])
+ lambda_info[function_name].update(policy_details(client, module)[function_name])
+ lambda_info[function_name].update(version_details(client, module)[function_name])
+ lambda_info[function_name].update(mapping_details(client, module)[function_name])
+ else:
+ lambda_info.update(config_details(client, module))
+
+ return lambda_info
+
+
+def config_details(client, module):
+ """
+ Returns configuration details for one or all lambda functions.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :return dict:
+ """
+
+ lambda_info = dict()
+
+ function_name = module.params.get('function_name')
+ if function_name:
+ try:
+ lambda_info.update(client.get_function_configuration(FunctionName=function_name))
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ lambda_info.update(function={})
+ else:
+ module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name))
+ else:
+ params = dict()
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ try:
+ lambda_info.update(function_list=client.list_functions(**params)['Functions'])
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ lambda_info.update(function_list=[])
+ else:
+ module.fail_json_aws(e, msg="Trying to get function list")
+
+ functions = dict()
+ for func in lambda_info.pop('function_list', []):
+ functions[func['FunctionName']] = camel_dict_to_snake_dict(func)
+ return functions
+
+ return {function_name: camel_dict_to_snake_dict(lambda_info)}
+
+
+def mapping_details(client, module):
+ """
+ Returns all lambda event source mappings.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :return dict:
+ """
+
+ lambda_info = dict()
+ params = dict()
+ function_name = module.params.get('function_name')
+
+ if function_name:
+ params['FunctionName'] = module.params.get('function_name')
+
+ if module.params.get('event_source_arn'):
+ params['EventSourceArn'] = module.params.get('event_source_arn')
+
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ try:
+ lambda_info.update(mappings=client.list_event_source_mappings(**params)['EventSourceMappings'])
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ lambda_info.update(mappings=[])
+ else:
+ module.fail_json_aws(e, msg="Trying to get source event mappings")
+
+ if function_name:
+ return {function_name: camel_dict_to_snake_dict(lambda_info)}
+
+ return camel_dict_to_snake_dict(lambda_info)
+
+
+def policy_details(client, module):
+ """
+ Returns policy attached to a lambda function.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :return dict:
+ """
+
+ if module.params.get('max_items') or module.params.get('next_marker'):
+ module.fail_json(msg='Cannot specify max_items nor next_marker for query=policy.')
+
+ lambda_info = dict()
+
+ function_name = module.params.get('function_name')
+ if function_name:
+ try:
+ # get_policy returns a JSON string so must convert to dict before reassigning to its key
+ lambda_info.update(policy=json.loads(client.get_policy(FunctionName=function_name)['Policy']))
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ lambda_info.update(policy={})
+ else:
+ module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name))
+ else:
+ module.fail_json(msg='Parameter function_name required for query=policy.')
+
+ return {function_name: camel_dict_to_snake_dict(lambda_info)}
+
+
+def version_details(client, module):
+ """
+ Returns all lambda function versions.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :return dict:
+ """
+
+ lambda_info = dict()
+
+ function_name = module.params.get('function_name')
+ if function_name:
+ params = dict()
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ try:
+ lambda_info.update(versions=client.list_versions_by_function(FunctionName=function_name, **params)['Versions'])
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ lambda_info.update(versions=[])
+ else:
+ module.fail_json_aws(e, msg="Trying to get {0} versions".format(function_name))
+ else:
+ module.fail_json(msg='Parameter function_name required for query=versions.')
+
+ return {function_name: camel_dict_to_snake_dict(lambda_info)}
+
+
+def main():
+ """
+ Main entry point.
+
+ :return dict: ansible facts
+ """
+ argument_spec = dict(
+ function_name=dict(required=False, default=None, aliases=['function', 'name']),
+ query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions'], default='all'),
+ event_source_arn=dict(required=False, default=None)
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[],
+ required_together=[]
+ )
+
+ # validate function_name if present
+ function_name = module.params['function_name']
+ if function_name:
+ if not re.search(r"^[\w\-:]+$", function_name):
+ module.fail_json(
+ msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
+ )
+ if len(function_name) > 64:
+ module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
+
+ client = module.client('lambda')
+
+ invocations = dict(
+ aliases='alias_details',
+ all='all_details',
+ config='config_details',
+ mappings='mapping_details',
+ policy='policy_details',
+ versions='version_details',
+ )
+
+ this_module_function = globals()[invocations[module.params['query']]]
+ all_facts = fix_return(this_module_function(client, module))
+
+ results = dict(function=all_facts, changed=False)
+
+ if module.check_mode:
+ results['msg'] = 'Check mode set but ignored for fact gathering only.'
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/lambda_policy.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/lambda_policy.py
new file mode 100644
index 00000000..2fb4b4dd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/lambda_policy.py
@@ -0,0 +1,431 @@
+#!/usr/bin/python
+# Copyright (c) 2016, Pierre Jodouin <pjodouin@virtualcomputing.solutions>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lambda_policy
+version_added: 1.0.0
+short_description: Creates, updates or deletes AWS Lambda policy statements.
+description:
+ - This module allows the management of AWS Lambda policy statements.
+ - It is idempotent and supports "Check" mode.
+ - Use module M(community.aws.lambda) to manage the lambda function itself, M(community.aws.lambda_alias) to manage function aliases,
+ M(community.aws.lambda_event) to manage event source mappings such as Kinesis streams, M(community.aws.execute_lambda) to execute a
+ lambda function and M(community.aws.lambda_info) to gather information relating to one or more lambda functions.
+
+
+author:
+ - Pierre Jodouin (@pjodouin)
+ - Michael De La Rue (@mikedlr)
+options:
+ function_name:
+ description:
+ - "Name of the Lambda function whose resource policy you are updating by adding a new permission."
+ - "You can specify a function name (for example, Thumbnail ) or you can specify Amazon Resource Name (ARN) of the"
+ - "function (for example, C(arn:aws:lambda:us-west-2:account-id:function:ThumbNail) ). AWS Lambda also allows you to"
+ - "specify partial ARN (for example, C(account-id:Thumbnail) ). Note that the length constraint applies only to the"
+ - "ARN. If you specify only the function name, it is limited to 64 character in length."
+ required: true
+ aliases: ['lambda_function_arn', 'function_arn']
+ type: str
+
+ state:
+ description:
+ - Describes the desired state.
+ default: "present"
+ choices: ["present", "absent"]
+ type: str
+
+ alias:
+ description:
+ - Name of the function alias. Mutually exclusive with I(version).
+ type: str
+
+ version:
+ description:
+ - Version of the Lambda function. Mutually exclusive with I(alias).
+ type: int
+
+ statement_id:
+ description:
+ - A unique statement identifier.
+ required: true
+ aliases: ['sid']
+ type: str
+
+ action:
+ description:
+ - "The AWS Lambda action you want to allow in this statement. Each Lambda action is a string starting with
+ lambda: followed by the API name (see Operations ). For example, C(lambda:CreateFunction) . You can use wildcard
+ (C(lambda:*)) to grant permission for all AWS Lambda actions."
+ required: true
+ type: str
+
+ principal:
+ description:
+ - "The principal who is getting this permission. It can be Amazon S3 service Principal (s3.amazonaws.com ) if
+ you want Amazon S3 to invoke the function, an AWS account ID if you are granting cross-account permission, or
+ any valid AWS service principal such as sns.amazonaws.com . For example, you might want to allow a custom
+ application in another AWS account to push events to AWS Lambda by invoking your function."
+ required: true
+ type: str
+
+ source_arn:
+ description:
+ - This is optional; however, when granting Amazon S3 permission to invoke your function, you should specify this
+ field with the bucket Amazon Resource Name (ARN) as its value. This ensures that only events generated from
+ the specified bucket can invoke the function.
+ type: str
+
+ source_account:
+ description:
+ - The AWS account ID (without a hyphen) of the source owner. For example, if I(source_arn) identifies a bucket,
+ then this is the bucket owner's account ID. You can use this additional condition to ensure the bucket you
+ specify is owned by a specific account (it is possible the bucket owner deleted the bucket and some other AWS
+ account created the bucket). You can also use this condition to specify all sources (that is, you don't
+ specify the I(source_arn) ) owned by a specific account.
+ type: str
+
+ event_source_token:
+ description:
+ - Token string representing source ARN or account. Mutually exclusive with I(source_arn) or I(source_account).
+ type: str
+
+requirements:
+ - boto3
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+
+- name: Lambda S3 event notification
+ community.aws.lambda_policy:
+ state: present
+ function_name: functionName
+ alias: Dev
+ statement_id: lambda-s3-myBucket-create-data-log
+ action: lambda:InvokeFunction
+ principal: s3.amazonaws.com
+ source_arn: arn:aws:s3:eu-central-1:123456789012:bucketName
+ source_account: 123456789012
+ register: lambda_policy_action
+
+- name: show results
+ ansible.builtin.debug:
+ var: lambda_policy_action
+'''
+
+RETURN = '''
+---
+lambda_policy_action:
+ description: describes what action was taken
+ returned: success
+ type: str
+'''
+
+import json
+import re
+from ansible.module_utils._text import to_native
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+try:
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def pc(key):
+ """
+ Changes python key into Pascal case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
+
+ :param key:
+ :return:
+ """
+
+ return "".join([token.capitalize() for token in key.split('_')])
+
+
+def policy_equal(module, current_statement):
+ for param in ('action', 'principal', 'source_arn', 'source_account', 'event_source_token'):
+ if module.params.get(param) != current_statement.get(param):
+ return False
+
+ return True
+
+
+def set_api_params(module, module_params):
+ """
+ Sets module parameters to those expected by the boto3 API.
+
+ :param module:
+ :param module_params:
+ :return:
+ """
+
+ api_params = dict()
+
+ for param in module_params:
+ module_param = module.params.get(param)
+ if module_param is not None:
+ api_params[pc(param)] = module_param
+
+ return api_params
+
+
+def validate_params(module):
+ """
+ Performs parameter validation beyond the module framework's validation.
+
+ :param module:
+ :return:
+ """
+
+ function_name = module.params['function_name']
+
+ # validate function name
+ if function_name.startswith('arn:'):
+ if not re.search(r'^[\w\-:]+$', function_name):
+ module.fail_json(
+ msg='ARN {0} is invalid. ARNs must contain only alphanumeric characters, hyphens and colons.'.format(function_name)
+ )
+ if len(function_name) > 140:
+ module.fail_json(msg='ARN name "{0}" exceeds 140 character limit'.format(function_name))
+ else:
+ if not re.search(r'^[\w\-]+$', function_name):
+ module.fail_json(
+ msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(
+ function_name)
+ )
+ if len(function_name) > 64:
+ module.fail_json(
+ msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
+
+
+def get_qualifier(module):
+ """
+ Returns the function qualifier as a version or alias or None.
+
+ :param module:
+ :return:
+ """
+
+ if module.params.get('version') is not None:
+ return to_native(module.params['version'])
+ elif module.params['alias']:
+ return to_native(module.params['alias'])
+
+ return None
+
+
+def extract_statement(policy, sid):
+ """return flattened single policy statement from a policy
+
+ If a policy statement is present in the policy extract it and
+ return it in a flattened form. Otherwise return an empty
+ dictionary.
+ """
+ if 'Statement' not in policy:
+ return {}
+ policy_statement = {}
+ # Now that we have the policy, check if required permission statement is present and flatten to
+ # simple dictionary if found.
+ for statement in policy['Statement']:
+ if statement['Sid'] == sid:
+ policy_statement['action'] = statement['Action']
+ try:
+ policy_statement['principal'] = statement['Principal']['Service']
+ except KeyError:
+ pass
+ try:
+ policy_statement['principal'] = statement['Principal']['AWS']
+ except KeyError:
+ pass
+ try:
+ policy_statement['source_arn'] = statement['Condition']['ArnLike']['AWS:SourceArn']
+ except KeyError:
+ pass
+ try:
+ policy_statement['source_account'] = statement['Condition']['StringEquals']['AWS:SourceAccount']
+ except KeyError:
+ pass
+ try:
+ policy_statement['event_source_token'] = statement['Condition']['StringEquals']['lambda:EventSourceToken']
+ except KeyError:
+ pass
+ break
+
+ return policy_statement
+
+
+def get_policy_statement(module, client):
+ """Checks that policy exists and if so, that statement ID is present or absent.
+
+ :param module:
+ :param client:
+ :return:
+ """
+ sid = module.params['statement_id']
+
+ # set API parameters
+ api_params = set_api_params(module, ('function_name', ))
+ qualifier = get_qualifier(module)
+ if qualifier:
+ api_params.update(Qualifier=qualifier)
+
+ policy_results = None
+ # check if function policy exists
+ try:
+ policy_results = client.get_policy(**api_params)
+ except ClientError as e:
+ try:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ return {}
+ except AttributeError: # catches ClientErrors without response, e.g. fail before connect
+ pass
+ module.fail_json_aws(e, msg="retrieving function policy")
+ except Exception as e:
+ module.fail_json_aws(e, msg="retrieving function policy")
+
+ # get_policy returns a JSON string so must convert to dict before reassigning to its key
+ policy = json.loads(policy_results.get('Policy', '{}'))
+ return extract_statement(policy, sid)
+
+
+def add_policy_permission(module, client):
+ """
+ Adds a permission statement to the policy.
+
+ :param module:
+ :param aws:
+ :return:
+ """
+
+ changed = False
+
+ # set API parameters
+ params = (
+ 'function_name',
+ 'statement_id',
+ 'action',
+ 'principal',
+ 'source_arn',
+ 'source_account',
+ 'event_source_token')
+ api_params = set_api_params(module, params)
+ qualifier = get_qualifier(module)
+ if qualifier:
+ api_params.update(Qualifier=qualifier)
+
+ if not module.check_mode:
+ try:
+ client.add_permission(**api_params)
+ except Exception as e:
+ module.fail_json_aws(e, msg="adding permission to policy")
+ changed = True
+
+ return changed
+
+
+def remove_policy_permission(module, client):
+ """
+ Removed a permission statement from the policy.
+
+ :param module:
+ :param aws:
+ :return:
+ """
+
+ changed = False
+
+ # set API parameters
+ api_params = set_api_params(module, ('function_name', 'statement_id'))
+ qualifier = get_qualifier(module)
+ if qualifier:
+ api_params.update(Qualifier=qualifier)
+
+ try:
+ if not module.check_mode:
+ client.remove_permission(**api_params)
+ changed = True
+ except Exception as e:
+ module.fail_json_aws(e, msg="removing permission from policy")
+
+ return changed
+
+
+def manage_state(module, lambda_client):
+ changed = False
+ current_state = 'absent'
+ state = module.params['state']
+ action_taken = 'none'
+
+ # check if the policy exists
+ current_policy_statement = get_policy_statement(module, lambda_client)
+ if current_policy_statement:
+ current_state = 'present'
+
+ if state == 'present':
+ if current_state == 'present' and not policy_equal(module, current_policy_statement):
+ remove_policy_permission(module, lambda_client)
+ changed = add_policy_permission(module, lambda_client)
+ action_taken = 'updated'
+ if not current_state == 'present':
+ changed = add_policy_permission(module, lambda_client)
+ action_taken = 'added'
+ elif current_state == 'present':
+ # remove the policy statement
+ changed = remove_policy_permission(module, lambda_client)
+ action_taken = 'deleted'
+
+ return dict(changed=changed, ansible_facts=dict(lambda_policy_action=action_taken))
+
+
+def setup_module_object():
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ function_name=dict(required=True, aliases=['lambda_function_arn', 'function_arn']),
+ statement_id=dict(required=True, aliases=['sid']),
+ alias=dict(),
+ version=dict(type='int'),
+ action=dict(required=True, ),
+ principal=dict(required=True, ),
+ source_arn=dict(),
+ source_account=dict(),
+ event_source_token=dict(),
+ )
+
+ return AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['alias', 'version'],
+ ['event_source_token', 'source_arn'],
+ ['event_source_token', 'source_account']],
+ )
+
+
+def main():
+ """
+ Main entry point.
+
+ :return dict: ansible facts
+ """
+
+ module = setup_module_object()
+ client = module.client('lambda')
+ validate_params(module)
+ results = manage_state(module, client)
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/lightsail.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/lightsail.py
new file mode 100644
index 00000000..4be2fc3f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/lightsail.py
@@ -0,0 +1,337 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lightsail
+version_added: 1.0.0
+short_description: Manage instances in AWS Lightsail
+description:
+ - Manage instances in AWS Lightsail.
+ - Instance tagging is not yet supported in this module.
+author:
+ - "Nick Ball (@nickball)"
+ - "Prasad Katti (@prasadkatti)"
+options:
+ state:
+ description:
+ - Indicate desired state of the target.
+ - I(rebooted) and I(restarted) are aliases.
+ default: present
+ choices: ['present', 'absent', 'running', 'restarted', 'rebooted', 'stopped']
+ type: str
+ name:
+ description: Name of the instance.
+ required: true
+ type: str
+ zone:
+ description:
+ - AWS availability zone in which to launch the instance.
+ - Required when I(state=present)
+ type: str
+ blueprint_id:
+ description:
+ - ID of the instance blueprint image.
+ - Required when I(state=present)
+ type: str
+ bundle_id:
+ description:
+ - Bundle of specification info for the instance.
+ - Required when I(state=present).
+ type: str
+ user_data:
+ description:
+ - Launch script that can configure the instance with additional data.
+ type: str
+ key_pair_name:
+ description:
+ - Name of the key pair to use with the instance.
+ - If I(state=present) and a key_pair_name is not provided, the default keypair from the region will be used.
+ type: str
+ wait:
+ description:
+ - Wait for the instance to be in state 'running' before returning.
+ - If I(wait=false) an ip_address may not be returned.
+ - Has no effect when I(state=rebooted) or I(state=absent).
+ type: bool
+ default: true
+ wait_timeout:
+ description:
+ - How long before I(wait) gives up, in seconds.
+ default: 300
+ type: int
+
+requirements:
+ - boto3
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+
+EXAMPLES = '''
+- name: Create a new Lightsail instance
+ community.aws.lightsail:
+ state: present
+ name: my_instance
+ region: us-east-1
+ zone: us-east-1a
+ blueprint_id: ubuntu_16_04
+ bundle_id: nano_1_0
+ key_pair_name: id_rsa
+ user_data: " echo 'hello world' > /home/ubuntu/test.txt"
+ register: my_instance
+
+- name: Delete an instance
+ community.aws.lightsail:
+ state: absent
+ region: us-east-1
+ name: my_instance
+
+'''
+
+RETURN = '''
+changed:
+ description: if a snapshot has been modified/created
+ returned: always
+ type: bool
+ sample:
+ changed: true
+instance:
+ description: instance data
+ returned: always
+ type: dict
+ sample:
+ arn: "arn:aws:lightsail:us-east-1:448830907657:Instance/1fef0175-d6c8-480e-84fa-214f969cda87"
+ blueprint_id: "ubuntu_16_04"
+ blueprint_name: "Ubuntu"
+ bundle_id: "nano_1_0"
+ created_at: "2017-03-27T08:38:59.714000-04:00"
+ hardware:
+ cpu_count: 1
+ ram_size_in_gb: 0.5
+ is_static_ip: false
+ location:
+ availability_zone: "us-east-1a"
+ region_name: "us-east-1"
+ name: "my_instance"
+ networking:
+ monthly_transfer:
+ gb_per_month_allocated: 1024
+ ports:
+ - access_direction: "inbound"
+ access_from: "Anywhere (0.0.0.0/0)"
+ access_type: "public"
+ common_name: ""
+ from_port: 80
+ protocol: tcp
+ to_port: 80
+ - access_direction: "inbound"
+ access_from: "Anywhere (0.0.0.0/0)"
+ access_type: "public"
+ common_name: ""
+ from_port: 22
+ protocol: tcp
+ to_port: 22
+ private_ip_address: "172.26.8.14"
+ public_ip_address: "34.207.152.202"
+ resource_type: "Instance"
+ ssh_key_name: "keypair"
+ state:
+ code: 16
+ name: running
+ support_code: "588307843083/i-0997c97831ee21e33"
+ username: "ubuntu"
+'''
+
+import time
+
+try:
+ import botocore
+except ImportError:
+ # will be caught by AnsibleAWSModule
+ pass
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def find_instance_info(module, client, instance_name, fail_if_not_found=False):
+
+ try:
+ res = client.get_instance(instanceName=instance_name)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'NotFoundException' and not fail_if_not_found:
+ return None
+ module.fail_json_aws(e)
+ return res['instance']
+
+
+def wait_for_instance_state(module, client, instance_name, states):
+ """
+ `states` is a list of instance states that we are waiting for.
+ """
+
+ wait_timeout = module.params.get('wait_timeout')
+ wait_max = time.time() + wait_timeout
+ while wait_max > time.time():
+ try:
+ instance = find_instance_info(module, client, instance_name)
+ if instance['state']['name'] in states:
+ break
+ time.sleep(5)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+ else:
+ module.fail_json(msg='Timed out waiting for instance "{0}" to get to one of the following states -'
+ ' {1}'.format(instance_name, states))
+
+
+def create_instance(module, client, instance_name):
+
+ inst = find_instance_info(module, client, instance_name)
+ if inst:
+ module.exit_json(changed=False, instance=camel_dict_to_snake_dict(inst))
+ else:
+ create_params = {'instanceNames': [instance_name],
+ 'availabilityZone': module.params.get('zone'),
+ 'blueprintId': module.params.get('blueprint_id'),
+ 'bundleId': module.params.get('bundle_id'),
+ 'userData': module.params.get('user_data')}
+
+ key_pair_name = module.params.get('key_pair_name')
+ if key_pair_name:
+ create_params['keyPairName'] = key_pair_name
+
+ try:
+ client.create_instances(**create_params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+ wait = module.params.get('wait')
+ if wait:
+ desired_states = ['running']
+ wait_for_instance_state(module, client, instance_name, desired_states)
+ inst = find_instance_info(module, client, instance_name, fail_if_not_found=True)
+
+ module.exit_json(changed=True, instance=camel_dict_to_snake_dict(inst))
+
+
+def delete_instance(module, client, instance_name):
+
+ changed = False
+
+ inst = find_instance_info(module, client, instance_name)
+ if inst is None:
+ module.exit_json(changed=changed, instance={})
+
+ # Wait for instance to exit transition state before deleting
+ desired_states = ['running', 'stopped']
+ wait_for_instance_state(module, client, instance_name, desired_states)
+
+ try:
+ client.delete_instance(instanceName=instance_name)
+ changed = True
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+ module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(inst))
+
+
+def restart_instance(module, client, instance_name):
+ """
+ Reboot an existing instance
+ Wait will not apply here as this is an OS-level operation
+ """
+
+ changed = False
+
+ inst = find_instance_info(module, client, instance_name, fail_if_not_found=True)
+
+ try:
+ client.reboot_instance(instanceName=instance_name)
+ changed = True
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+ module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(inst))
+
+
+def start_or_stop_instance(module, client, instance_name, state):
+ """
+ Start or stop an existing instance
+ """
+
+ changed = False
+
+ inst = find_instance_info(module, client, instance_name, fail_if_not_found=True)
+
+ # Wait for instance to exit transition state before state change
+ desired_states = ['running', 'stopped']
+ wait_for_instance_state(module, client, instance_name, desired_states)
+
+ # Try state change
+ if inst and inst['state']['name'] != state:
+ try:
+ if state == 'running':
+ client.start_instance(instanceName=instance_name)
+ else:
+ client.stop_instance(instanceName=instance_name)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+ changed = True
+ # Grab current instance info
+ inst = find_instance_info(module, client, instance_name)
+
+ wait = module.params.get('wait')
+ if wait:
+ desired_states = [state]
+ wait_for_instance_state(module, client, instance_name, desired_states)
+ inst = find_instance_info(module, client, instance_name, fail_if_not_found=True)
+
+ module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(inst))
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'stopped', 'running', 'restarted',
+ 'rebooted']),
+ zone=dict(type='str'),
+ blueprint_id=dict(type='str'),
+ bundle_id=dict(type='str'),
+ key_pair_name=dict(type='str'),
+ user_data=dict(type='str', default=''),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(default=300, type='int'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[['state', 'present', ('zone', 'blueprint_id', 'bundle_id')]])
+
+ client = module.client('lightsail')
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+
+ if state == 'present':
+ create_instance(module, client, name)
+ elif state == 'absent':
+ delete_instance(module, client, name)
+ elif state in ('running', 'stopped'):
+ start_or_stop_instance(module, client, name, state)
+ elif state in ('restarted', 'rebooted'):
+ restart_instance(module, client, name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds.py
new file mode 100644
index 00000000..13211864
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds.py
@@ -0,0 +1,1396 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: rds
+version_added: 1.0.0
+short_description: create, delete, or modify Amazon rds instances, rds snapshots, and related facts
+description:
+ - Creates, deletes, or modifies rds resources.
+ - When creating an instance it can be either a new instance or a read-only replica of an existing instance.
+ - This module has a dependency on python-boto >= 2.5 and will soon be deprecated.
+ - The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely on boto.rds2 (boto >= 2.26.0).
+ - Please use boto3 based M(community.aws.rds_instance) instead.
+options:
+ command:
+ description:
+ - Specifies the action to take. The 'reboot' option is available starting at version 2.0.
+ required: true
+ choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ]
+ type: str
+ instance_name:
+ description:
+ - Database instance identifier.
+ - Required except when using I(command=facts) or I(command=delete) on just a snapshot.
+ type: str
+ source_instance:
+ description:
+ - Name of the database to replicate.
+ - Used only when I(command=replicate).
+ type: str
+ db_engine:
+ description:
+ - The type of database.
+ - Used only when I(command=create).
+ - mariadb was added in version 2.2.
+ choices: ['mariadb', 'MySQL', 'oracle-se1', 'oracle-se2', 'oracle-se', 'oracle-ee',
+ 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora']
+ type: str
+ size:
+ description:
+ - Size in gigabytes of the initial storage for the DB instance.
+ - Used only when I(command=create) or I(command=modify).
+ type: str
+ instance_type:
+ description:
+ - The instance type of the database.
+ - If not specified then the replica inherits the same instance type as the source instance.
+ - Required when I(command=create).
+ - Optional when I(command=replicate), I(command=modify) or I(command=restore).
+ aliases: ['type']
+ type: str
+ username:
+ description:
+ - Master database username.
+ - Used only when I(command=create).
+ type: str
+ password:
+ description:
+ - Password for the master database username.
+ - Used only when I(command=create) or I(command=modify).
+ type: str
+ db_name:
+ description:
+ - Name of a database to create within the instance.
+ - If not specified then no database is created.
+ - Used only when I(command=create).
+ type: str
+ engine_version:
+ description:
+ - Version number of the database engine to use.
+ - If not specified then the current Amazon RDS default engine version is used
+ - Used only when I(command=create).
+ type: str
+ parameter_group:
+ description:
+ - Name of the DB parameter group to associate with this instance.
+ - If omitted then the RDS default DBParameterGroup will be used.
+ - Used only when I(command=create) or I(command=modify).
+ type: str
+ license_model:
+ description:
+ - The license model for this DB instance.
+ - Used only when I(command=create) or I(command=restore).
+ choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ]
+ type: str
+ multi_zone:
+ description:
+ - Specifies if this is a Multi-availability-zone deployment.
+ - Can not be used in conjunction with I(zone) parameter.
+ - Used only when I(command=create) or I(command=modify).
+ type: bool
+ iops:
+ description:
+ - Specifies the number of IOPS for the instance.
+ - Used only when I(command=create) or I(command=modify).
+ - Must be an integer greater than 1000.
+ type: str
+ security_groups:
+ description:
+ - Comma separated list of one or more security groups.
+ - Used only when I(command=create) or I(command=modify).
+ type: str
+ vpc_security_groups:
+ description:
+ - Comma separated list of one or more vpc security group ids.
+ - Also requires I(subnet) to be specified.
+ - Used only when I(command=create) or I(command=modify).
+ type: list
+ elements: str
+ port:
+ description:
+ - Port number that the DB instance uses for connections.
+ - Used only when I(command=create) or I(command=replicate).
+ - 'Defaults to the standard ports for each I(db_engine): C(3306) for MySQL and MariaDB, C(1521) for Oracle
+ C(1433) for SQL Server, C(5432) for PostgreSQL.'
+ type: int
+ upgrade:
+ description:
+ - Indicates that minor version upgrades should be applied automatically.
+ - Used only when I(command=create) or I(command=modify) or I(command=restore) or I(command=replicate).
+ type: bool
+ default: false
+ option_group:
+ description:
+ - The name of the option group to use.
+ - If not specified then the default option group is used.
+ - Used only when I(command=create).
+ type: str
+ maint_window:
+ description:
+ - 'Maintenance window in format of C(ddd:hh24:mi-ddd:hh24:mi). (Example: C(Mon:22:00-Mon:23:15))'
+ - Times are specified in UTC.
+ - If not specified then a random maintenance window is assigned.
+ - Used only when I(command=create) or I(command=modify).
+ type: str
+ backup_window:
+ description:
+ - 'Backup window in format of C(hh24:mi-hh24:mi). (Example: C(18:00-20:30))'
+ - Times are specified in UTC.
+ - If not specified then a random backup window is assigned.
+ - Used only when command=create or command=modify.
+ type: str
+ backup_retention:
+ description:
+ - Number of days backups are retained.
+ - Set to 0 to disable backups.
+ - Default is 1 day.
+ - 'Valid range: 0-35.'
+ - Used only when I(command=create) or I(command=modify).
+ type: str
+ zone:
+ description:
+ - availability zone in which to launch the instance.
+ - Used only when I(command=create), I(command=replicate) or I(command=restore).
+ - Can not be used in conjunction with I(multi_zone) parameter.
+ aliases: ['aws_zone', 'ec2_zone']
+ type: str
+ subnet:
+ description:
+ - VPC subnet group.
+ - If specified then a VPC instance is created.
+ - Used only when I(command=create).
+ type: str
+ snapshot:
+ description:
+ - Name of snapshot to take.
+ - When I(command=delete), if no I(snapshot) name is provided then no snapshot is taken.
+ - When I(command=delete), if no I(instance_name) is provided the snapshot is deleted.
+ - Used with I(command=facts), I(command=delete) or I(command=snapshot).
+ type: str
+ wait:
+ description:
+ - When I(command=create), replicate, modify or restore then wait for the database to enter the 'available' state.
+ - When I(command=delete), wait for the database to be terminated.
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - How long before wait gives up, in seconds.
+ - Used when I(wait=true).
+ default: 300
+ type: int
+ apply_immediately:
+ description:
+ - When I(apply_immediately=true), the modifications will be applied as soon as possible rather than waiting for the
+ next preferred maintenance window.
+ - Used only when I(command=modify).
+ type: bool
+ default: false
+ force_failover:
+ description:
+ - If enabled, the reboot is done using a MultiAZ failover.
+ - Used only when I(command=reboot).
+ type: bool
+ default: false
+ new_instance_name:
+ description:
+ - Name to rename an instance to.
+ - Used only when I(command=modify).
+ type: str
+ character_set_name:
+ description:
+ - Associate the DB instance with a specified character set.
+ - Used with I(command=create).
+ type: str
+ publicly_accessible:
+ description:
+ - Explicitly set whether the resource should be publicly accessible or not.
+ - Used with I(command=create), I(command=replicate).
+ - Requires boto >= 2.26.0
+ type: str
+ tags:
+ description:
+ - tags dict to apply to a resource.
+ - Used with I(command=create), I(command=replicate), I(command=restore).
+ - Requires boto >= 2.26.0
+ type: dict
+requirements:
+ - "python >= 2.6"
+ - "boto"
+author:
+ - "Bruce Pennypacker (@bpennypacker)"
+ - "Will Thames (@willthames)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+# FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD
+
+EXAMPLES = r'''
+- name: Basic mysql provisioning example
+ community.aws.rds:
+ command: create
+ instance_name: new-database
+ db_engine: MySQL
+ size: 10
+ instance_type: db.m1.small
+ username: mysql_admin
+ password: 1nsecure
+ tags:
+ Environment: testing
+ Application: cms
+
+- name: Create a read-only replica and wait for it to become available
+ community.aws.rds:
+ command: replicate
+ instance_name: new-database-replica
+ source_instance: new_database
+ wait: yes
+ wait_timeout: 600
+
+- name: Delete an instance, but create a snapshot before doing so
+ community.aws.rds:
+ command: delete
+ instance_name: new-database
+ snapshot: new_database_snapshot
+
+- name: Get facts about an instance
+ community.aws.rds:
+ command: facts
+ instance_name: new-database
+ register: new_database_facts
+
+- name: Rename an instance and wait for the change to take effect
+ community.aws.rds:
+ command: modify
+ instance_name: new-database
+ new_instance_name: renamed-database
+ wait: yes
+
+- name: Reboot an instance and wait for it to become available again
+ community.aws.rds:
+ command: reboot
+ instance_name: database
+ wait: yes
+
+# Restore a Postgres db instance from a snapshot, wait for it to become available again, and
+# then modify it to add your security group. Also, display the new endpoint.
+# Note that the "publicly_accessible" option is allowed here just as it is in the AWS CLI
+- community.aws.rds:
+ command: restore
+ snapshot: mypostgres-snapshot
+ instance_name: MyNewInstanceName
+ region: us-west-2
+ zone: us-west-2b
+ subnet: default-vpc-xx441xxx
+ publicly_accessible: yes
+ wait: yes
+ wait_timeout: 600
+ tags:
+ Name: pg1_test_name_tag
+ register: rds
+
+- community.aws.rds:
+ command: modify
+ instance_name: MyNewInstanceName
+ region: us-west-2
+ vpc_security_groups: sg-xxx945xx
+
+- ansible.builtin.debug:
+ msg: "The new db endpoint is {{ rds.instance.endpoint }}"
+'''
+
+RETURN = r'''
+instance:
+ description: the rds instance
+ returned: always
+ type: complex
+ contains:
+ engine:
+ description: the name of the database engine
+ returned: when RDS instance exists
+ type: str
+ sample: "oracle-se"
+ engine_version:
+ description: the version of the database engine
+ returned: when RDS instance exists
+ type: str
+ sample: "11.2.0.4.v6"
+ license_model:
+ description: the license model information
+ returned: when RDS instance exists
+ type: str
+ sample: "bring-your-own-license"
+ character_set_name:
+ description: the name of the character set that this instance is associated with
+ returned: when RDS instance exists
+ type: str
+ sample: "AL32UTF8"
+ allocated_storage:
+ description: the allocated storage size in gigabytes (GB)
+ returned: when RDS instance exists
+ type: str
+ sample: "100"
+ publicly_accessible:
+ description: the accessibility options for the DB instance
+ returned: when RDS instance exists
+ type: bool
+ sample: "true"
+ latest_restorable_time:
+ description: the latest time to which a database can be restored with point-in-time restore
+ returned: when RDS instance exists
+ type: str
+ sample: "1489707802.0"
+ secondary_availability_zone:
+ description: the name of the secondary AZ for a DB instance with multi-AZ support
+ returned: when RDS instance exists and is multi-AZ
+ type: str
+ sample: "eu-west-1b"
+ backup_window:
+ description: the daily time range during which automated backups are created if automated backups are enabled
+ returned: when RDS instance exists and automated backups are enabled
+ type: str
+ sample: "03:00-03:30"
+ auto_minor_version_upgrade:
+ description: indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window
+ returned: when RDS instance exists
+ type: bool
+ sample: "true"
+ read_replica_source_dbinstance_identifier:
+ description: the identifier of the source DB instance if this RDS instance is a read replica
+ returned: when read replica RDS instance exists
+ type: str
+ sample: "null"
+ db_name:
+ description: the name of the database to create when the DB instance is created
+ returned: when RDS instance exists
+ type: str
+ sample: "ASERTG"
+ endpoint:
+ description: the endpoint uri of the database instance
+ returned: when RDS instance exists
+ type: str
+ sample: "my-ansible-database.asdfaosdgih.us-east-1.rds.amazonaws.com"
+ port:
+ description: the listening port of the database instance
+ returned: when RDS instance exists
+ type: int
+ sample: 3306
+ parameter_groups:
+ description: the list of DB parameter groups applied to this RDS instance
+ returned: when RDS instance exists and parameter groups are defined
+ type: complex
+ contains:
+ parameter_apply_status:
+ description: the status of parameter updates
+ returned: when RDS instance exists
+ type: str
+ sample: "in-sync"
+ parameter_group_name:
+ description: the name of the DP parameter group
+ returned: when RDS instance exists
+ type: str
+ sample: "testawsrpprodb01spfile-1ujg7nrs7sgyz"
+ option_groups:
+ description: the list of option group memberships for this RDS instance
+ returned: when RDS instance exists
+ type: complex
+ contains:
+ option_group_name:
+ description: the option group name for this RDS instance
+ returned: when RDS instance exists
+ type: str
+ sample: "default:oracle-se-11-2"
+ status:
+ description: the status of the RDS instance's option group membership
+ returned: when RDS instance exists
+ type: str
+ sample: "in-sync"
+ pending_modified_values:
+ description: a dictionary of changes to the RDS instance that are pending
+ returned: when RDS instance exists
+ type: complex
+ contains:
+ db_instance_class:
+ description: the new DB instance class for this RDS instance that will be applied or is in progress
+ returned: when RDS instance exists
+ type: str
+ sample: "null"
+ db_instance_identifier:
+ description: the new DB instance identifier this RDS instance that will be applied or is in progress
+ returned: when RDS instance exists
+ type: str
+ sample: "null"
+ allocated_storage:
+ description: the new allocated storage size for this RDS instance that will be applied or is in progress
+ returned: when RDS instance exists
+ type: str
+ sample: "null"
+ backup_retention_period:
+ description: the pending number of days for which automated backups are retained
+ returned: when RDS instance exists
+ type: str
+ sample: "null"
+ engine_version:
+ description: indicates the database engine version
+ returned: when RDS instance exists
+ type: str
+ sample: "null"
+ iops:
+ description: the new provisioned IOPS value for this RDS instance that will be applied or is being applied
+ returned: when RDS instance exists
+ type: str
+ sample: "null"
+ master_user_password:
+ description: the pending or in-progress change of the master credentials for this RDS instance
+ returned: when RDS instance exists
+ type: str
+ sample: "null"
+ multi_az:
+ description: indicates that the single-AZ RDS instance is to change to a multi-AZ deployment
+ returned: when RDS instance exists
+ type: str
+ sample: "null"
+ port:
+ description: specifies the pending port for this RDS instance
+ returned: when RDS instance exists
+ type: str
+ sample: "null"
+ db_subnet_groups:
+ description: information on the subnet group associated with this RDS instance
+ returned: when RDS instance exists
+ type: complex
+ contains:
+ description:
+ description: the subnet group associated with the DB instance
+ returned: when RDS instance exists
+ type: str
+ sample: "Subnets for the UAT RDS SQL DB Instance"
+ name:
+ description: the name of the DB subnet group
+ returned: when RDS instance exists
+ type: str
+ sample: "samplesubnetgrouprds-j6paiqkxqp4z"
+ status:
+ description: the status of the DB subnet group
+ returned: when RDS instance exists
+ type: str
+ sample: "complete"
+ subnets:
+ description: the description of the DB subnet group
+ returned: when RDS instance exists
+ type: complex
+ contains:
+ availability_zone:
+ description: subnet availability zone information
+ returned: when RDS instance exists
+ type: complex
+ contains:
+ name:
+ description: availability zone
+ returned: when RDS instance exists
+ type: str
+ sample: "eu-west-1b"
+ provisioned_iops_capable:
+ description: whether provisioned iops are available in AZ subnet
+ returned: when RDS instance exists
+ type: bool
+ sample: "false"
+ identifier:
+ description: the identifier of the subnet
+ returned: when RDS instance exists
+ type: str
+ sample: "subnet-3fdba63e"
+ status:
+ description: the status of the subnet
+ returned: when RDS instance exists
+ type: str
+ sample: "active"
+'''
+
+import time
+
+try:
+ import boto.rds
+ import boto.exception
+except ImportError:
+ pass # Taken care of by ec2.HAS_BOTO
+
+try:
+ import boto.rds2
+ import boto.rds2.exceptions
+ HAS_RDS2 = True
+except ImportError:
+ HAS_RDS2 = False
+
+from ansible.module_utils._text import to_native
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+
+
+DEFAULT_PORTS = {
+ 'aurora': 3306,
+ 'mariadb': 3306,
+ 'mysql': 3306,
+ 'oracle': 1521,
+ 'sqlserver': 1433,
+ 'postgres': 5432,
+}
+
+
+class RDSException(Exception):
+ def __init__(self, exc):
+ if hasattr(exc, 'error_message') and exc.error_message:
+ self.message = exc.error_message
+ self.code = exc.error_code
+ elif hasattr(exc, 'body') and 'Error' in exc.body:
+ self.message = exc.body['Error']['Message']
+ self.code = exc.body['Error']['Code']
+ else:
+ self.message = str(exc)
+ self.code = 'Unknown Error'
+
+
+class RDSConnection:
+ def __init__(self, module, region, **aws_connect_params):
+ try:
+ self.connection = connect_to_aws(boto.rds, region, **aws_connect_params)
+ except boto.exception.BotoServerError as e:
+ module.fail_json(msg=e.error_message)
+
+ def get_db_instance(self, instancename):
+ try:
+ return RDSDBInstance(self.connection.get_all_dbinstances(instancename)[0])
+ except boto.exception.BotoServerError:
+ return None
+
+ def get_db_snapshot(self, snapshotid):
+ try:
+ return RDSSnapshot(self.connection.get_all_dbsnapshots(snapshot_id=snapshotid)[0])
+ except boto.exception.BotoServerError:
+ return None
+
+ def create_db_instance(self, instance_name, size, instance_class, db_engine,
+ username, password, **params):
+ params['engine'] = db_engine
+ try:
+ result = self.connection.create_dbinstance(instance_name, size, instance_class,
+ username, password, **params)
+ return RDSDBInstance(result)
+ except boto.exception.BotoServerError as e:
+ raise RDSException(e)
+
+ def create_db_instance_read_replica(self, instance_name, source_instance, **params):
+ try:
+ result = self.connection.createdb_instance_read_replica(instance_name, source_instance, **params)
+ return RDSDBInstance(result)
+ except boto.exception.BotoServerError as e:
+ raise RDSException(e)
+
+ def delete_db_instance(self, instance_name, **params):
+ try:
+ result = self.connection.delete_dbinstance(instance_name, **params)
+ return RDSDBInstance(result)
+ except boto.exception.BotoServerError as e:
+ raise RDSException(e)
+
+ def delete_db_snapshot(self, snapshot):
+ try:
+ result = self.connection.delete_dbsnapshot(snapshot)
+ return RDSSnapshot(result)
+ except boto.exception.BotoServerError as e:
+ raise RDSException(e)
+
+ def modify_db_instance(self, instance_name, **params):
+ try:
+ result = self.connection.modify_dbinstance(instance_name, **params)
+ return RDSDBInstance(result)
+ except boto.exception.BotoServerError as e:
+ raise RDSException(e)
+
+ def reboot_db_instance(self, instance_name, **params):
+ try:
+ result = self.connection.reboot_dbinstance(instance_name)
+ return RDSDBInstance(result)
+ except boto.exception.BotoServerError as e:
+ raise RDSException(e)
+
+ def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
+ try:
+ result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params)
+ return RDSDBInstance(result)
+ except boto.exception.BotoServerError as e:
+ raise RDSException(e)
+
+ def create_db_snapshot(self, snapshot, instance_name, **params):
+ try:
+ result = self.connection.create_dbsnapshot(snapshot, instance_name)
+ return RDSSnapshot(result)
+ except boto.exception.BotoServerError as e:
+ raise RDSException(e)
+
+ def promote_read_replica(self, instance_name, **params):
+ try:
+ result = self.connection.promote_read_replica(instance_name, **params)
+ return RDSDBInstance(result)
+ except boto.exception.BotoServerError as e:
+ raise RDSException(e)
+
+
+class RDS2Connection:
+ def __init__(self, module, region, **aws_connect_params):
+ try:
+ self.connection = connect_to_aws(boto.rds2, region, **aws_connect_params)
+ except boto.exception.BotoServerError as e:
+ module.fail_json(msg=e.error_message)
+
+ def get_db_instance(self, instancename):
+ try:
+ dbinstances = self.connection.describe_db_instances(
+ db_instance_identifier=instancename
+ )['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']
+ result = RDS2DBInstance(dbinstances[0])
+ return result
+ except boto.rds2.exceptions.DBInstanceNotFound as e:
+ return None
+ except Exception as e:
+ raise e
+
+ def get_db_snapshot(self, snapshotid):
+ try:
+ snapshots = self.connection.describe_db_snapshots(
+ db_snapshot_identifier=snapshotid,
+ snapshot_type='manual'
+ )['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots']
+ result = RDS2Snapshot(snapshots[0])
+ return result
+ except boto.rds2.exceptions.DBSnapshotNotFound:
+ return None
+
+ def create_db_instance(self, instance_name, size, instance_class, db_engine,
+ username, password, **params):
+ try:
+ result = self.connection.create_db_instance(instance_name, size, instance_class, db_engine, username, password,
+ **params)['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance']
+ return RDS2DBInstance(result)
+ except boto.exception.BotoServerError as e:
+ raise RDSException(e)
+
+ def create_db_instance_read_replica(self, instance_name, source_instance, **params):
+ try:
+ result = self.connection.create_db_instance_read_replica(
+ instance_name,
+ source_instance,
+ **params
+ )['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance']
+ return RDS2DBInstance(result)
+ except boto.exception.BotoServerError as e:
+ raise RDSException(e)
+
+ def delete_db_instance(self, instance_name, **params):
+ try:
+ result = self.connection.delete_db_instance(instance_name, **params)['DeleteDBInstanceResponse']['DeleteDBInstanceResult']['DBInstance']
+ return RDS2DBInstance(result)
+ except boto.exception.BotoServerError as e:
+ raise RDSException(e)
+
+ def delete_db_snapshot(self, snapshot):
+ try:
+ result = self.connection.delete_db_snapshot(snapshot)['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
+ return RDS2Snapshot(result)
+ except boto.exception.BotoServerError as e:
+ raise RDSException(e)
+
+ def modify_db_instance(self, instance_name, **params):
+ try:
+ result = self.connection.modify_db_instance(instance_name, **params)['ModifyDBInstanceResponse']['ModifyDBInstanceResult']['DBInstance']
+ return RDS2DBInstance(result)
+ except boto.exception.BotoServerError as e:
+ raise RDSException(e)
+
+ def reboot_db_instance(self, instance_name, **params):
+ try:
+ result = self.connection.reboot_db_instance(instance_name, **params)['RebootDBInstanceResponse']['RebootDBInstanceResult']['DBInstance']
+ return RDS2DBInstance(result)
+ except boto.exception.BotoServerError as e:
+ raise RDSException(e)
+
+ def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
+ try:
+ result = self.connection.restore_db_instance_from_db_snapshot(
+ instance_name,
+ snapshot,
+ **params
+ )['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance']
+ return RDS2DBInstance(result)
+ except boto.exception.BotoServerError as e:
+ raise RDSException(e)
+
+ def create_db_snapshot(self, snapshot, instance_name, **params):
+ try:
+ result = self.connection.create_db_snapshot(snapshot, instance_name, **params)['CreateDBSnapshotResponse']['CreateDBSnapshotResult']['DBSnapshot']
+ return RDS2Snapshot(result)
+ except boto.exception.BotoServerError as e:
+ raise RDSException(e)
+
+ def promote_read_replica(self, instance_name, **params):
+ try:
+ result = self.connection.promote_read_replica(instance_name, **params)['PromoteReadReplicaResponse']['PromoteReadReplicaResult']['DBInstance']
+ return RDS2DBInstance(result)
+ except boto.exception.BotoServerError as e:
+ raise RDSException(e)
+
+
+class RDSDBInstance:
+ def __init__(self, dbinstance):
+ self.instance = dbinstance
+ self.name = dbinstance.id
+ self.status = dbinstance.status
+
+ def get_data(self):
+ d = {
+ 'id': self.name,
+ 'create_time': self.instance.create_time,
+ 'status': self.status,
+ 'availability_zone': self.instance.availability_zone,
+ 'backup_retention': self.instance.backup_retention_period,
+ 'backup_window': self.instance.preferred_backup_window,
+ 'maintenance_window': self.instance.preferred_maintenance_window,
+ 'multi_zone': self.instance.multi_az,
+ 'instance_type': self.instance.instance_class,
+ 'username': self.instance.master_username,
+ 'iops': self.instance.iops
+ }
+
+ # Only assign an Endpoint if one is available
+ if hasattr(self.instance, 'endpoint'):
+ d["endpoint"] = self.instance.endpoint[0]
+ d["port"] = self.instance.endpoint[1]
+ if self.instance.vpc_security_groups is not None:
+ d["vpc_security_groups"] = ','.join(x.vpc_group for x in self.instance.vpc_security_groups)
+ else:
+ d["vpc_security_groups"] = None
+ else:
+ d["endpoint"] = None
+ d["port"] = None
+ d["vpc_security_groups"] = None
+ d['DBName'] = self.instance.DBName if hasattr(self.instance, 'DBName') else None
+ # ReadReplicaSourceDBInstanceIdentifier may or may not exist
+ try:
+ d["replication_source"] = self.instance.ReadReplicaSourceDBInstanceIdentifier
+ except Exception:
+ d["replication_source"] = None
+ return d
+
+
+class RDS2DBInstance:
+ def __init__(self, dbinstance):
+ self.instance = dbinstance
+ if 'DBInstanceIdentifier' not in dbinstance:
+ self.name = None
+ else:
+ self.name = self.instance.get('DBInstanceIdentifier')
+ self.status = self.instance.get('DBInstanceStatus')
+
+ def get_data(self):
+ d = {
+ 'id': self.name,
+ 'create_time': self.instance['InstanceCreateTime'],
+ 'engine': self.instance['Engine'],
+ 'engine_version': self.instance['EngineVersion'],
+ 'license_model': self.instance['LicenseModel'],
+ 'character_set_name': self.instance['CharacterSetName'],
+ 'allocated_storage': self.instance['AllocatedStorage'],
+ 'publicly_accessible': self.instance['PubliclyAccessible'],
+ 'latest_restorable_time': self.instance['LatestRestorableTime'],
+ 'status': self.status,
+ 'availability_zone': self.instance['AvailabilityZone'],
+ 'secondary_availability_zone': self.instance['SecondaryAvailabilityZone'],
+ 'backup_retention': self.instance['BackupRetentionPeriod'],
+ 'backup_window': self.instance['PreferredBackupWindow'],
+ 'maintenance_window': self.instance['PreferredMaintenanceWindow'],
+ 'auto_minor_version_upgrade': self.instance['AutoMinorVersionUpgrade'],
+ 'read_replica_source_dbinstance_identifier': self.instance['ReadReplicaSourceDBInstanceIdentifier'],
+ 'multi_zone': self.instance['MultiAZ'],
+ 'instance_type': self.instance['DBInstanceClass'],
+ 'username': self.instance['MasterUsername'],
+ 'db_name': self.instance['DBName'],
+ 'iops': self.instance['Iops'],
+ 'replication_source': self.instance['ReadReplicaSourceDBInstanceIdentifier']
+ }
+ if self.instance['DBParameterGroups'] is not None:
+ parameter_groups = []
+ for x in self.instance['DBParameterGroups']:
+ parameter_groups.append({'parameter_group_name': x['DBParameterGroupName'], 'parameter_apply_status': x['ParameterApplyStatus']})
+ d['parameter_groups'] = parameter_groups
+ if self.instance['OptionGroupMemberships'] is not None:
+ option_groups = []
+ for x in self.instance['OptionGroupMemberships']:
+ option_groups.append({'status': x['Status'], 'option_group_name': x['OptionGroupName']})
+ d['option_groups'] = option_groups
+ if self.instance['PendingModifiedValues'] is not None:
+ pdv = self.instance['PendingModifiedValues']
+ d['pending_modified_values'] = {
+ 'multi_az': pdv['MultiAZ'],
+ 'master_user_password': pdv['MasterUserPassword'],
+ 'port': pdv['Port'],
+ 'iops': pdv['Iops'],
+ 'allocated_storage': pdv['AllocatedStorage'],
+ 'engine_version': pdv['EngineVersion'],
+ 'backup_retention_period': pdv['BackupRetentionPeriod'],
+ 'db_instance_class': pdv['DBInstanceClass'],
+ 'db_instance_identifier': pdv['DBInstanceIdentifier']
+ }
+ if self.instance["DBSubnetGroup"] is not None:
+ dsg = self.instance["DBSubnetGroup"]
+ db_subnet_groups = {}
+ db_subnet_groups['vpc_id'] = dsg['VpcId']
+ db_subnet_groups['name'] = dsg['DBSubnetGroupName']
+ db_subnet_groups['status'] = dsg['SubnetGroupStatus'].lower()
+ db_subnet_groups['description'] = dsg['DBSubnetGroupDescription']
+ db_subnet_groups['subnets'] = []
+ for x in dsg["Subnets"]:
+ db_subnet_groups['subnets'].append({
+ 'status': x['SubnetStatus'].lower(),
+ 'identifier': x['SubnetIdentifier'],
+ 'availability_zone': {
+ 'name': x['SubnetAvailabilityZone']['Name'],
+ 'provisioned_iops_capable': x['SubnetAvailabilityZone']['ProvisionedIopsCapable']
+ }
+ })
+ d['db_subnet_groups'] = db_subnet_groups
+ if self.instance["VpcSecurityGroups"] is not None:
+ d['vpc_security_groups'] = ','.join(x['VpcSecurityGroupId'] for x in self.instance['VpcSecurityGroups'])
+ if "Endpoint" in self.instance and self.instance["Endpoint"] is not None:
+ d['endpoint'] = self.instance["Endpoint"].get('Address', None)
+ d['port'] = self.instance["Endpoint"].get('Port', None)
+ else:
+ d['endpoint'] = None
+ d['port'] = None
+ d['DBName'] = self.instance['DBName'] if hasattr(self.instance, 'DBName') else None
+ return d
+
+
+class RDSSnapshot:
+ def __init__(self, snapshot):
+ self.snapshot = snapshot
+ self.name = snapshot.id
+ self.status = snapshot.status
+
+ def get_data(self):
+ d = {
+ 'id': self.name,
+ 'create_time': self.snapshot.snapshot_create_time,
+ 'status': self.status,
+ 'availability_zone': self.snapshot.availability_zone,
+ 'instance_id': self.snapshot.instance_id,
+ 'instance_created': self.snapshot.instance_create_time,
+ }
+ # needs boto >= 2.21.0
+ if hasattr(self.snapshot, 'snapshot_type'):
+ d["snapshot_type"] = self.snapshot.snapshot_type
+ if hasattr(self.snapshot, 'iops'):
+ d["iops"] = self.snapshot.iops
+ return d
+
+
+class RDS2Snapshot:
+ def __init__(self, snapshot):
+ if 'DeleteDBSnapshotResponse' in snapshot:
+ self.snapshot = snapshot['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
+ else:
+ self.snapshot = snapshot
+ self.name = self.snapshot.get('DBSnapshotIdentifier')
+ self.status = self.snapshot.get('Status')
+
+ def get_data(self):
+ d = {
+ 'id': self.name,
+ 'create_time': self.snapshot['SnapshotCreateTime'],
+ 'status': self.status,
+ 'availability_zone': self.snapshot['AvailabilityZone'],
+ 'instance_id': self.snapshot['DBInstanceIdentifier'],
+ 'instance_created': self.snapshot['InstanceCreateTime'],
+ 'snapshot_type': self.snapshot['SnapshotType'],
+ 'iops': self.snapshot['Iops'],
+ }
+ return d
+
+
+def await_resource(conn, resource, status, module):
+ start_time = time.time()
+ wait_timeout = module.params.get('wait_timeout') + start_time
+ check_interval = 5
+ while wait_timeout > time.time() and resource.status != status:
+ time.sleep(check_interval)
+ if wait_timeout <= time.time():
+ module.fail_json(msg="Timeout waiting for RDS resource %s" % resource.name)
+ if module.params.get('command') == 'snapshot':
+ # Temporary until all the rds2 commands have their responses parsed
+ if resource.name is None:
+ module.fail_json(msg="There was a problem waiting for RDS snapshot %s" % resource.snapshot)
+ # Back off if we're getting throttled, since we're just waiting anyway
+ resource = AWSRetry.backoff(tries=5, delay=20, backoff=1.5)(conn.get_db_snapshot)(resource.name)
+ else:
+ # Temporary until all the rds2 commands have their responses parsed
+ if resource.name is None:
+ module.fail_json(msg="There was a problem waiting for RDS instance %s" % resource.instance)
+ # Back off if we're getting throttled, since we're just waiting anyway
+ resource = AWSRetry.backoff(tries=5, delay=20, backoff=1.5)(conn.get_db_instance)(resource.name)
+ if resource is None:
+ break
+ # Some RDS resources take much longer than others to be ready. Check
+ # less aggressively for slow ones to avoid throttling.
+ if time.time() > start_time + 90:
+ check_interval = 20
+ return resource
+
+
+def create_db_instance(module, conn):
+ required_vars = ['instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password']
+ valid_vars = ['backup_retention', 'backup_window',
+ 'character_set_name', 'db_name', 'engine_version',
+ 'instance_type', 'iops', 'license_model', 'maint_window',
+ 'multi_zone', 'option_group', 'parameter_group', 'port',
+ 'subnet', 'upgrade', 'zone']
+ if module.params.get('subnet'):
+ valid_vars.append('vpc_security_groups')
+ else:
+ valid_vars.append('security_groups')
+ if HAS_RDS2:
+ valid_vars.extend(['publicly_accessible', 'tags'])
+ params = validate_parameters(required_vars, valid_vars, module)
+ instance_name = module.params.get('instance_name')
+
+ result = conn.get_db_instance(instance_name)
+ if result:
+ changed = False
+ else:
+ try:
+ result = conn.create_db_instance(instance_name, module.params.get('size'),
+ module.params.get('instance_type'), module.params.get('db_engine'),
+ module.params.get('username'), module.params.get('password'), **params)
+ changed = True
+ except RDSException as e:
+ module.fail_json(msg="Failed to create instance: %s" % to_native(e))
+
+ if module.params.get('wait'):
+ resource = await_resource(conn, result, 'available', module)
+ else:
+ resource = conn.get_db_instance(instance_name)
+
+ module.exit_json(changed=changed, instance=resource.get_data())
+
+
+def replicate_db_instance(module, conn):
+ required_vars = ['instance_name', 'source_instance']
+ valid_vars = ['instance_type', 'port', 'upgrade', 'zone']
+ if HAS_RDS2:
+ valid_vars.extend(['iops', 'option_group', 'publicly_accessible', 'tags'])
+ params = validate_parameters(required_vars, valid_vars, module)
+ instance_name = module.params.get('instance_name')
+ source_instance = module.params.get('source_instance')
+
+ result = conn.get_db_instance(instance_name)
+ if result:
+ changed = False
+ else:
+ try:
+ result = conn.create_db_instance_read_replica(instance_name, source_instance, **params)
+ changed = True
+ except RDSException as e:
+ module.fail_json(msg="Failed to create replica instance: %s " % to_native(e))
+
+ if module.params.get('wait'):
+ resource = await_resource(conn, result, 'available', module)
+ else:
+ resource = conn.get_db_instance(instance_name)
+
+ module.exit_json(changed=changed, instance=resource.get_data())
+
+
+def delete_db_instance_or_snapshot(module, conn):
+ required_vars = []
+ valid_vars = ['instance_name', 'snapshot', 'skip_final_snapshot']
+ params = validate_parameters(required_vars, valid_vars, module)
+ instance_name = module.params.get('instance_name')
+ snapshot = module.params.get('snapshot')
+
+ if not instance_name:
+ result = conn.get_db_snapshot(snapshot)
+ else:
+ result = conn.get_db_instance(instance_name)
+ if not result:
+ module.exit_json(changed=False)
+ if result.status == 'deleting':
+ module.exit_json(changed=False)
+ try:
+ if instance_name:
+ if snapshot:
+ params["skip_final_snapshot"] = False
+ if HAS_RDS2:
+ params["final_db_snapshot_identifier"] = snapshot
+ else:
+ params["final_snapshot_id"] = snapshot
+ else:
+ params["skip_final_snapshot"] = True
+ result = conn.delete_db_instance(instance_name, **params)
+ else:
+ result = conn.delete_db_snapshot(snapshot)
+ except RDSException as e:
+ module.fail_json(msg="Failed to delete instance: %s" % to_native(e))
+
+ # If we're not waiting for a delete to complete then we're all done
+ # so just return
+ if not module.params.get('wait'):
+ module.exit_json(changed=True)
+ try:
+ await_resource(conn, result, 'deleted', module)
+ module.exit_json(changed=True)
+ except RDSException as e:
+ if e.code == 'DBInstanceNotFound':
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg=to_native(e))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def facts_db_instance_or_snapshot(module, conn):
+ instance_name = module.params.get('instance_name')
+ snapshot = module.params.get('snapshot')
+
+ if instance_name and snapshot:
+ module.fail_json(msg="Facts must be called with either instance_name or snapshot, not both")
+ if instance_name:
+ resource = conn.get_db_instance(instance_name)
+ if not resource:
+ module.fail_json(msg="DB instance %s does not exist" % instance_name)
+ if snapshot:
+ resource = conn.get_db_snapshot(snapshot)
+ if not resource:
+ module.fail_json(msg="DB snapshot %s does not exist" % snapshot)
+
+ module.exit_json(changed=False, instance=resource.get_data())
+
+
+def modify_db_instance(module, conn):
+ required_vars = ['instance_name']
+ valid_vars = ['apply_immediately', 'backup_retention', 'backup_window',
+ 'db_name', 'engine_version', 'instance_type', 'iops', 'license_model',
+ 'maint_window', 'multi_zone', 'new_instance_name',
+ 'option_group', 'parameter_group', 'password', 'size', 'upgrade']
+
+ params = validate_parameters(required_vars, valid_vars, module)
+ instance_name = module.params.get('instance_name')
+ new_instance_name = module.params.get('new_instance_name')
+
+ try:
+ result = conn.modify_db_instance(instance_name, **params)
+ except RDSException as e:
+ module.fail_json(msg=to_native(e))
+ if params.get('apply_immediately'):
+ if new_instance_name:
+ # Wait until the new instance name is valid
+ new_instance = None
+ while not new_instance:
+ new_instance = conn.get_db_instance(new_instance_name)
+ time.sleep(5)
+
+ # Found instance but it briefly flicks to available
+ # before rebooting so let's wait until we see it rebooting
+ # before we check whether to 'wait'
+ result = await_resource(conn, new_instance, 'rebooting', module)
+
+ if module.params.get('wait'):
+ resource = await_resource(conn, result, 'available', module)
+ else:
+ resource = conn.get_db_instance(instance_name)
+
+ # guess that this changed the DB, need a way to check
+ module.exit_json(changed=True, instance=resource.get_data())
+
+
+def promote_db_instance(module, conn):
+ required_vars = ['instance_name']
+ valid_vars = ['backup_retention', 'backup_window']
+ params = validate_parameters(required_vars, valid_vars, module)
+ instance_name = module.params.get('instance_name')
+
+ result = conn.get_db_instance(instance_name)
+ if not result:
+ module.fail_json(msg="DB Instance %s does not exist" % instance_name)
+
+ if result.get_data().get('replication_source'):
+ try:
+ result = conn.promote_read_replica(instance_name, **params)
+ changed = True
+ except RDSException as e:
+ module.fail_json(msg=to_native(e))
+ else:
+ changed = False
+
+ if module.params.get('wait'):
+ resource = await_resource(conn, result, 'available', module)
+ else:
+ resource = conn.get_db_instance(instance_name)
+
+ module.exit_json(changed=changed, instance=resource.get_data())
+
+
+def snapshot_db_instance(module, conn):
+ required_vars = ['instance_name', 'snapshot']
+ valid_vars = ['tags']
+ params = validate_parameters(required_vars, valid_vars, module)
+ instance_name = module.params.get('instance_name')
+ snapshot = module.params.get('snapshot')
+ changed = False
+ result = conn.get_db_snapshot(snapshot)
+ if not result:
+ try:
+ result = conn.create_db_snapshot(snapshot, instance_name, **params)
+ changed = True
+ except RDSException as e:
+ module.fail_json(msg=to_native(e))
+
+ if module.params.get('wait'):
+ resource = await_resource(conn, result, 'available', module)
+ else:
+ resource = conn.get_db_snapshot(snapshot)
+
+ module.exit_json(changed=changed, snapshot=resource.get_data())
+
+
+def reboot_db_instance(module, conn):
+ required_vars = ['instance_name']
+ valid_vars = []
+
+ if HAS_RDS2:
+ valid_vars.append('force_failover')
+
+ params = validate_parameters(required_vars, valid_vars, module)
+ instance_name = module.params.get('instance_name')
+ result = conn.get_db_instance(instance_name)
+ changed = False
+ try:
+ result = conn.reboot_db_instance(instance_name, **params)
+ changed = True
+ except RDSException as e:
+ module.fail_json(msg=to_native(e))
+
+ if module.params.get('wait'):
+ resource = await_resource(conn, result, 'available', module)
+ else:
+ resource = conn.get_db_instance(instance_name)
+
+ module.exit_json(changed=changed, instance=resource.get_data())
+
+
+def restore_db_instance(module, conn):
+ required_vars = ['instance_name', 'snapshot']
+ valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone',
+ 'option_group', 'port', 'publicly_accessible',
+ 'subnet', 'tags', 'upgrade', 'zone']
+ if HAS_RDS2:
+ valid_vars.append('instance_type')
+ else:
+ required_vars.append('instance_type')
+ params = validate_parameters(required_vars, valid_vars, module)
+ instance_name = module.params.get('instance_name')
+ instance_type = module.params.get('instance_type')
+ snapshot = module.params.get('snapshot')
+
+ changed = False
+ result = conn.get_db_instance(instance_name)
+ if not result:
+ try:
+ result = conn.restore_db_instance_from_db_snapshot(instance_name, snapshot, instance_type, **params)
+ changed = True
+ except RDSException as e:
+ module.fail_json(msg=to_native(e))
+
+ if module.params.get('wait'):
+ resource = await_resource(conn, result, 'available', module)
+ else:
+ resource = conn.get_db_instance(instance_name)
+
+ module.exit_json(changed=changed, instance=resource.get_data())
+
+
+def validate_parameters(required_vars, valid_vars, module):
+ command = module.params.get('command')
+ for v in required_vars:
+ if not module.params.get(v):
+ module.fail_json(msg="Parameter %s required for %s command" % (v, command))
+
+ # map to convert rds module options to boto rds and rds2 options
+ optional_params = {
+ 'port': 'port',
+ 'db_name': 'db_name',
+ 'zone': 'availability_zone',
+ 'maint_window': 'preferred_maintenance_window',
+ 'backup_window': 'preferred_backup_window',
+ 'backup_retention': 'backup_retention_period',
+ 'multi_zone': 'multi_az',
+ 'engine_version': 'engine_version',
+ 'upgrade': 'auto_minor_version_upgrade',
+ 'subnet': 'db_subnet_group_name',
+ 'license_model': 'license_model',
+ 'option_group': 'option_group_name',
+ 'size': 'allocated_storage',
+ 'iops': 'iops',
+ 'new_instance_name': 'new_instance_id',
+ 'apply_immediately': 'apply_immediately',
+ }
+ # map to convert rds module options to boto rds options
+ optional_params_rds = {
+ 'db_engine': 'engine',
+ 'password': 'master_password',
+ 'parameter_group': 'param_group',
+ 'instance_type': 'instance_class',
+ }
+ # map to convert rds module options to boto rds2 options
+ optional_params_rds2 = {
+ 'tags': 'tags',
+ 'publicly_accessible': 'publicly_accessible',
+ 'parameter_group': 'db_parameter_group_name',
+ 'character_set_name': 'character_set_name',
+ 'instance_type': 'db_instance_class',
+ 'password': 'master_user_password',
+ 'new_instance_name': 'new_db_instance_identifier',
+ 'force_failover': 'force_failover',
+ }
+ if HAS_RDS2:
+ optional_params.update(optional_params_rds2)
+ sec_group = 'db_security_groups'
+ else:
+ optional_params.update(optional_params_rds)
+ sec_group = 'security_groups'
+ # Check for options only supported with rds2
+ for k in set(optional_params_rds2.keys()) - set(optional_params_rds.keys()):
+ if module.params.get(k):
+ module.fail_json(msg="Parameter %s requires boto.rds (boto >= 2.26.0)" % k)
+
+ params = {}
+ for (k, v) in optional_params.items():
+ if module.params.get(k) is not None and k not in required_vars:
+ if k in valid_vars:
+ params[v] = module.params[k]
+ else:
+ if module.params.get(k) is False:
+ pass
+ else:
+ module.fail_json(msg="Parameter %s is not valid for %s command" % (k, command))
+
+ if module.params.get('security_groups'):
+ params[sec_group] = module.params.get('security_groups').split(',')
+
+ vpc_groups = module.params.get('vpc_security_groups')
+ if vpc_groups:
+ if HAS_RDS2:
+ params['vpc_security_group_ids'] = vpc_groups
+ else:
+ groups_list = []
+ for x in vpc_groups:
+ groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x))
+ params['vpc_security_groups'] = groups_list
+
+ # Convert tags dict to list of tuples that rds2 expects
+ if 'tags' in params:
+ params['tags'] = module.params['tags'].items()
+ return params
+
+
+def main():
+ argument_spec = dict(
+ command=dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True),
+ instance_name=dict(required=False),
+ source_instance=dict(required=False),
+ db_engine=dict(choices=['mariadb', 'MySQL', 'oracle-se1', 'oracle-se2', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex',
+ 'sqlserver-web', 'postgres', 'aurora'], required=False),
+ size=dict(required=False),
+ instance_type=dict(aliases=['type'], required=False),
+ username=dict(required=False),
+ password=dict(no_log=True, required=False),
+ db_name=dict(required=False),
+ engine_version=dict(required=False),
+ parameter_group=dict(required=False),
+ license_model=dict(choices=['license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license'], required=False),
+ multi_zone=dict(type='bool', required=False),
+ iops=dict(required=False),
+ security_groups=dict(required=False),
+ vpc_security_groups=dict(type='list', required=False, elements='str'),
+ port=dict(required=False, type='int'),
+ upgrade=dict(type='bool', default=False),
+ option_group=dict(required=False),
+ maint_window=dict(required=False),
+ backup_window=dict(required=False),
+ backup_retention=dict(required=False),
+ zone=dict(aliases=['aws_zone', 'ec2_zone'], required=False),
+ subnet=dict(required=False),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ snapshot=dict(required=False),
+ apply_immediately=dict(type='bool', default=False),
+ new_instance_name=dict(required=False),
+ tags=dict(type='dict', required=False),
+ publicly_accessible=dict(required=False),
+ character_set_name=dict(required=False),
+ force_failover=dict(type='bool', required=False, default=False),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ check_boto3=False,
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ invocations = {
+ 'create': create_db_instance,
+ 'replicate': replicate_db_instance,
+ 'delete': delete_db_instance_or_snapshot,
+ 'facts': facts_db_instance_or_snapshot,
+ 'modify': modify_db_instance,
+ 'promote': promote_db_instance,
+ 'snapshot': snapshot_db_instance,
+ 'reboot': reboot_db_instance,
+ 'restore': restore_db_instance,
+ }
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+ if not region:
+ module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
+
+ # set port to per db defaults if not specified
+ if module.params['port'] is None and module.params['db_engine'] is not None and module.params['command'] == 'create':
+ if '-' in module.params['db_engine']:
+ engine = module.params['db_engine'].split('-')[0]
+ else:
+ engine = module.params['db_engine']
+ module.params['port'] = DEFAULT_PORTS[engine.lower()]
+
+ # connect to the rds endpoint
+ if HAS_RDS2:
+ conn = RDS2Connection(module, region, **aws_connect_params)
+ else:
+ conn = RDSConnection(module, region, **aws_connect_params)
+
+ invocations[module.params.get('command')](module, conn)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_instance.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_instance.py
new file mode 100644
index 00000000..3aa9c7f6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_instance.py
@@ -0,0 +1,1234 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: rds_instance
+version_added: 1.0.0
+short_description: Manage RDS instances
+description:
+ - Create, modify, and delete RDS instances.
+
+requirements:
+ - botocore
+ - boto3 >= 1.5.0
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+author:
+ - Sloane Hertel (@s-hertel)
+
+options:
+ # General module options
+ state:
+ description:
+ - Whether the snapshot should exist or not. I(rebooted) is not idempotent and will leave the DB instance in a running state
+ and start it prior to rebooting if it was stopped. I(present) will leave the DB instance in the current running/stopped state,
+ (running if creating the DB instance).
+ - I(state=running) and I(state=started) are synonyms, as are I(state=rebooted) and I(state=restarted). Note - rebooting the instance
+ is not idempotent.
+ choices: ['present', 'absent', 'terminated', 'running', 'started', 'stopped', 'rebooted', 'restarted']
+ default: 'present'
+ type: str
+ creation_source:
+ description: Which source to use if restoring from a template (an existing instance, S3 bucket, or snapshot).
+ choices: ['snapshot', 's3', 'instance']
+ type: str
+ force_update_password:
+ description:
+ - Set to True to update your cluster password with I(master_user_password). Since comparing passwords to determine
+ if it needs to be updated is not possible this is set to False by default to allow idempotence.
+ type: bool
+ default: False
+ purge_cloudwatch_logs_exports:
+ description: Set to False to retain any enabled cloudwatch logs that aren't specified in the task and are associated with the instance.
+ type: bool
+ default: True
+ purge_tags:
+ description: Set to False to retain any tags that aren't specified in task and are associated with the instance.
+ type: bool
+ default: True
+ read_replica:
+ description:
+ - Set to False to promote a read replica cluster or true to create one. When creating a read replica C(creation_source) should
+ be set to 'instance' or not provided. C(source_db_instance_identifier) must be provided with this option.
+ type: bool
+ wait:
+ description:
+ - Whether to wait for the cluster to be available, stopped, or deleted. At a later time a wait_timeout option may be added.
+ Following each API call to create/modify/delete the instance a waiter is used with a 60 second delay 30 times until the instance reaches
+ the expected state (available/stopped/deleted). The total task time may also be influenced by AWSRetry which helps stabilize if the
+ instance is in an invalid state to operate on to begin with (such as if you try to stop it when it is in the process of rebooting).
+ If setting this to False task retries and delays may make your playbook execution better handle timeouts for major modifications.
+ type: bool
+ default: True
+
+ # Options that have a corresponding boto3 parameter
+ allocated_storage:
+ description:
+ - The amount of storage (in gibibytes) to allocate for the DB instance.
+ type: int
+ allow_major_version_upgrade:
+ description:
+ - Whether to allow major version upgrades.
+ type: bool
+ apply_immediately:
+ description:
+ - A value that specifies whether modifying a cluster with I(new_db_instance_identifier) and I(master_user_password)
+ should be applied as soon as possible, regardless of the I(preferred_maintenance_window) setting. If false, changes
+ are applied during the next maintenance window.
+ type: bool
+ default: False
+ auto_minor_version_upgrade:
+ description:
+ - Whether minor version upgrades are applied automatically to the DB instance during the maintenance window.
+ type: bool
+ availability_zone:
+ description:
+ - A list of EC2 Availability Zones that instances in the DB cluster can be created in.
+ May be used when creating a cluster or when restoring from S3 or a snapshot. Mutually exclusive with I(multi_az).
+ aliases:
+ - az
+ - zone
+ type: str
+ backup_retention_period:
+ description:
+ - The number of days for which automated backups are retained.
+ - When set to C(0), automated backups will be disabled. (Not applicable if the DB instance is a source to read replicas)
+ - May be used when creating a new cluster, when restoring from S3, or when modifying a cluster.
+ type: int
+ ca_certificate_identifier:
+ description:
+ - The identifier of the CA certificate for the DB instance.
+ type: str
+ character_set_name:
+ description:
+ - The character set to associate with the DB cluster.
+ type: str
+ copy_tags_to_snapshot:
+ description:
+ - Whether or not to copy all tags from the DB instance to snapshots of the instance. When initially creating
+ a DB instance the RDS API defaults this to false if unspecified.
+ type: bool
+ db_cluster_identifier:
+ description:
+ - The DB cluster (lowercase) identifier to add the aurora DB instance to. The identifier must contain from 1 to
+ 63 letters, numbers, or hyphens and the first character must be a letter and may not end in a hyphen or
+ contain consecutive hyphens.
+ aliases:
+ - cluster_id
+ type: str
+ db_instance_class:
+ description:
+ - The compute and memory capacity of the DB instance, for example db.t2.micro.
+ aliases:
+ - class
+ - instance_type
+ type: str
+ db_instance_identifier:
+ description:
+ - The DB instance (lowercase) identifier. The identifier must contain from 1 to 63 letters, numbers, or
+ hyphens and the first character must be a letter and may not end in a hyphen or contain consecutive hyphens.
+ aliases:
+ - instance_id
+ - id
+ required: True
+ type: str
+ db_name:
+ description:
+ - The name for your database. If a name is not provided Amazon RDS will not create a database.
+ type: str
+ db_parameter_group_name:
+ description:
+ - The name of the DB parameter group to associate with this DB instance. When creating the DB instance if this
+ argument is omitted the default DBParameterGroup for the specified engine is used.
+ type: str
+ db_security_groups:
+ description:
+ - (EC2-Classic platform) A list of DB security groups to associate with this DB instance.
+ type: list
+ elements: str
+ db_snapshot_identifier:
+ description:
+ - The identifier for the DB snapshot to restore from if using I(creation_source=snapshot).
+ type: str
+ db_subnet_group_name:
+ description:
+ - The DB subnet group name to use for the DB instance.
+ aliases:
+ - subnet_group
+ type: str
+ domain:
+ description:
+ - The Active Directory Domain to restore the instance in.
+ type: str
+ domain_iam_role_name:
+ description:
+ - The name of the IAM role to be used when making API calls to the Directory Service.
+ type: str
+ enable_cloudwatch_logs_exports:
+ description:
+ - A list of log types that need to be enabled for exporting to CloudWatch Logs.
+ aliases:
+ - cloudwatch_log_exports
+ type: list
+ elements: str
+ enable_iam_database_authentication:
+ description:
+ - Enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts.
+ If this option is omitted when creating the cluster, Amazon RDS sets this to False.
+ type: bool
+ enable_performance_insights:
+ description:
+ - Whether to enable Performance Insights for the DB instance.
+ type: bool
+ engine:
+ description:
+ - The name of the database engine to be used for this DB instance. This is required to create an instance.
+ Valid choices are aurora | aurora-mysql | aurora-postgresql | mariadb | mysql | oracle-ee | oracle-se |
+ oracle-se1 | oracle-se2 | postgres | sqlserver-ee | sqlserver-ex | sqlserver-se | sqlserver-web
+ type: str
+ engine_version:
+ description:
+ - The version number of the database engine to use. For Aurora MySQL that could be 5.6.10a , 5.7.12.
+ Aurora PostgreSQL example, 9.6.3
+ type: str
+ final_db_snapshot_identifier:
+ description:
+ - The DB instance snapshot identifier of the new DB instance snapshot created when I(skip_final_snapshot) is false.
+ aliases:
+ - final_snapshot_identifier
+ type: str
+ force_failover:
+ description:
+ - Set to true to conduct the reboot through a MultiAZ failover.
+ type: bool
+ iops:
+ description:
+ - The Provisioned IOPS (I/O operations per second) value. Is only set when using I(storage_type) is set to io1.
+ type: int
+ kms_key_id:
+ description:
+ - The ARN of the AWS KMS key identifier for an encrypted DB instance. If you are creating a DB instance with the
+ same AWS account that owns the KMS encryption key used to encrypt the new DB instance, then you can use the KMS key
+ alias instead of the ARN for the KM encryption key.
+ - If I(storage_encrypted) is true and and this option is not provided, the default encryption key is used.
+ type: str
+ license_model:
+ description:
+ - The license model for the DB instance.
+ - Several options are license-included, bring-your-own-license, and general-public-license.
+ - This option can also be omitted to default to an accepted value.
+ type: str
+ master_user_password:
+ description:
+ - An 8-41 character password for the master database user. The password can contain any printable ASCII character
+ except "/", """, or "@". To modify the password use I(force_password_update). Use I(apply immediately) to change
+ the password immediately, otherwise it is updated during the next maintenance window.
+ aliases:
+ - password
+ type: str
+ master_username:
+ description:
+ - The name of the master user for the DB cluster. Must be 1-16 letters or numbers and begin with a letter.
+ aliases:
+ - username
+ type: str
+ max_allocated_storage:
+ description:
+ - The upper limit to which Amazon RDS can automatically scale the storage of the DB instance.
+ type: int
+ monitoring_interval:
+ description:
+ - The interval, in seconds, when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting
+ metrics, specify 0. Amazon RDS defaults this to 0 if omitted when initially creating a DB instance.
+ type: int
+ monitoring_role_arn:
+ description:
+ - The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs.
+ type: str
+ multi_az:
+ description:
+ - Specifies if the DB instance is a Multi-AZ deployment. Mutually exclusive with I(availability_zone).
+ type: bool
+ new_db_instance_identifier:
+ description:
+ - The new DB cluster (lowercase) identifier for the DB cluster when renaming a DB instance. The identifier must contain
+ from 1 to 63 letters, numbers, or hyphens and the first character must be a letter and may not end in a hyphen or
+ contain consecutive hyphens. Use I(apply_immediately) to rename immediately, otherwise it is updated during the
+ next maintenance window.
+ aliases:
+ - new_instance_id
+ - new_id
+ type: str
+ option_group_name:
+ description:
+ - The option group to associate with the DB instance.
+ type: str
+ performance_insights_kms_key_id:
+ description:
+ - The AWS KMS key identifier (ARN, name, or alias) for encryption of Performance Insights data.
+ type: str
+ performance_insights_retention_period:
+ description:
+ - The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731.
+ type: int
+ port:
+ description:
+ - The port number on which the instances accept connections.
+ type: int
+ preferred_backup_window:
+ description:
+ - The daily time range (in UTC) of at least 30 minutes, during which automated backups are created if automated backups are
+ enabled using I(backup_retention_period). The option must be in the format of "hh24:mi-hh24:mi" and not conflict with
+ I(preferred_maintenance_window).
+ aliases:
+ - backup_window
+ type: str
+ preferred_maintenance_window:
+ description:
+ - The weekly time range (in UTC) of at least 30 minutes, during which system maintenance can occur. The option must
+ be in the format "ddd:hh24:mi-ddd:hh24:mi" where ddd is one of Mon, Tue, Wed, Thu, Fri, Sat, Sun.
+ aliases:
+ - maintenance_window
+ type: str
+ processor_features:
+ description:
+ - A dictionary of Name, Value pairs to indicate the number of CPU cores and the number of threads per core for the
+ DB instance class of the DB instance. Names are threadsPerCore and coreCount.
+ Set this option to an empty dictionary to use the default processor features.
+ suboptions:
+ threadsPerCore:
+ description: The number of threads per core
+ coreCount:
+ description: The number of CPU cores
+ type: dict
+ promotion_tier:
+ description:
+ - An integer that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of
+ the existing primary instance.
+ type: str
+ publicly_accessible:
+ description:
+ - Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with
+ a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal
+ instance with a DNS name that resolves to a private IP address.
+ type: bool
+ restore_time:
+ description:
+ - If using I(creation_source=instance) this indicates the UTC date and time to restore from the source instance.
+ For example, "2009-09-07T23:45:00Z".
+ - May alternatively set I(use_latest_restore_time=True).
+ - Only one of I(use_latest_restorable_time) and I(restore_time) may be provided.
+ type: str
+ s3_bucket_name:
+ description:
+ - The name of the Amazon S3 bucket that contains the data used to create the Amazon DB instance.
+ type: str
+ s3_ingestion_role_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that authorizes Amazon RDS to access
+ the Amazon S3 bucket on your behalf.
+ type: str
+ s3_prefix:
+ description:
+ - The prefix for all of the file names that contain the data used to create the Amazon DB instance. If you do not
+ specify a SourceS3Prefix value, then the Amazon DB instance is created by using all of the files in the Amazon S3 bucket.
+ type: str
+ skip_final_snapshot:
+ description:
+ - Whether a final DB cluster snapshot is created before the DB cluster is deleted. If this is false I(final_db_snapshot_identifier)
+ must be provided.
+ type: bool
+ default: false
+ snapshot_identifier:
+ description:
+ - The ARN of the DB snapshot to restore from when using I(creation_source=snapshot).
+ type: str
+ source_db_instance_identifier:
+ description:
+ - The identifier or ARN of the source DB instance from which to restore when creating a read replica or spinning up a point-in-time
+ DB instance using I(creation_source=instance). If the source DB is not in the same region this should be an ARN.
+ type: str
+ source_engine:
+ description:
+ - The identifier for the database engine that was backed up to create the files stored in the Amazon S3 bucket.
+ choices:
+ - mysql
+ type: str
+ source_engine_version:
+ description:
+ - The version of the database that the backup files were created from.
+ type: str
+ source_region:
+ description:
+ - The region of the DB instance from which the replica is created.
+ type: str
+ storage_encrypted:
+ description:
+ - Whether the DB instance is encrypted.
+ type: bool
+ storage_type:
+ description:
+ - The storage type to be associated with the DB instance. I(storage_type) does not apply to Aurora DB instances.
+ choices:
+ - standard
+ - gp2
+ - io1
+ type: str
+ tags:
+ description:
+ - A dictionary of key value pairs to assign the DB cluster.
+ type: dict
+ tde_credential_arn:
+ description:
+ - The ARN from the key store with which to associate the instance for Transparent Data Encryption. This is
+ supported by Oracle or SQL Server DB instances and may be used in conjunction with C(storage_encrypted)
+ though it might slightly affect the performance of your database.
+ aliases:
+ - transparent_data_encryption_arn
+ type: str
+ tde_credential_password:
+ description:
+ - The password for the given ARN from the key store in order to access the device.
+ aliases:
+ - transparent_data_encryption_password
+ type: str
+ timezone:
+ description:
+ - The time zone of the DB instance.
+ type: str
+ use_latest_restorable_time:
+ description:
+ - Whether to restore the DB instance to the latest restorable backup time.
+ - Only one of I(use_latest_restorable_time) and I(restore_time) may be provided.
+ type: bool
+ aliases:
+ - restore_from_latest
+ vpc_security_group_ids:
+ description:
+ - A list of EC2 VPC security groups to associate with the DB cluster.
+ type: list
+ elements: str
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+- name: create minimal aurora instance in default VPC and default subnet group
+ community.aws.rds_instance:
+ engine: aurora
+ db_instance_identifier: ansible-test-aurora-db-instance
+ instance_type: db.t2.small
+ password: "{{ password }}"
+ username: "{{ username }}"
+ cluster_id: ansible-test-cluster # This cluster must exist - see rds_cluster to manage it
+
+- name: Create a DB instance using the default AWS KMS encryption key
+ community.aws.rds_instance:
+ id: test-encrypted-db
+ state: present
+ engine: mariadb
+ storage_encrypted: True
+ db_instance_class: db.t2.medium
+ username: "{{ username }}"
+ password: "{{ password }}"
+ allocated_storage: "{{ allocated_storage }}"
+
+- name: remove the DB instance without a final snapshot
+ community.aws.rds_instance:
+ id: "{{ instance_id }}"
+ state: absent
+ skip_final_snapshot: True
+
+- name: remove the DB instance with a final snapshot
+ community.aws.rds_instance:
+ id: "{{ instance_id }}"
+ state: absent
+ final_snapshot_identifier: "{{ snapshot_id }}"
+'''
+
+RETURN = r'''
+allocated_storage:
+ description: The allocated storage size in gibibytes. This is always 1 for aurora database engines.
+ returned: always
+ type: int
+ sample: 20
+auto_minor_version_upgrade:
+ description: Whether minor engine upgrades are applied automatically to the DB instance during the maintenance window.
+ returned: always
+ type: bool
+ sample: true
+availability_zone:
+ description: The availability zone for the DB instance.
+ returned: always
+ type: str
+ sample: us-east-1f
+backup_retention_period:
+ description: The number of days for which automated backups are retained.
+ returned: always
+ type: int
+ sample: 1
+ca_certificate_identifier:
+ description: The identifier of the CA certificate for the DB instance.
+ returned: always
+ type: str
+ sample: rds-ca-2015
+copy_tags_to_snapshot:
+ description: Whether tags are copied from the DB instance to snapshots of the DB instance.
+ returned: always
+ type: bool
+ sample: false
+db_instance_arn:
+ description: The Amazon Resource Name (ARN) for the DB instance.
+ returned: always
+ type: str
+ sample: arn:aws:rds:us-east-1:123456789012:db:ansible-test
+db_instance_class:
+ description: The name of the compute and memory capacity class of the DB instance.
+ returned: always
+ type: str
+ sample: db.m4.large
+db_instance_identifier:
+ description: The identifier of the DB instance
+ returned: always
+ type: str
+ sample: ansible-test
+db_instance_port:
+ description: The port that the DB instance listens on.
+ returned: always
+ type: int
+ sample: 0
+db_instance_status:
+ description: The current state of this database.
+ returned: always
+ type: str
+ sample: stopped
+db_parameter_groups:
+ description: The list of DB parameter groups applied to this DB instance.
+ returned: always
+ type: complex
+ contains:
+ db_parameter_group_name:
+ description: The name of the DP parameter group.
+ returned: always
+ type: str
+ sample: default.mariadb10.0
+ parameter_apply_status:
+ description: The status of parameter updates.
+ returned: always
+ type: str
+ sample: in-sync
+db_security_groups:
+ description: A list of DB security groups associated with this DB instance.
+ returned: always
+ type: list
+ sample: []
+db_subnet_group:
+ description: The subnet group associated with the DB instance.
+ returned: always
+ type: complex
+ contains:
+ db_subnet_group_description:
+ description: The description of the DB subnet group.
+ returned: always
+ type: str
+ sample: default
+ db_subnet_group_name:
+ description: The name of the DB subnet group.
+ returned: always
+ type: str
+ sample: default
+ subnet_group_status:
+ description: The status of the DB subnet group.
+ returned: always
+ type: str
+ sample: Complete
+ subnets:
+ description: A list of Subnet elements.
+ returned: always
+ type: complex
+ contains:
+ subnet_availability_zone:
+ description: The availability zone of the subnet.
+ returned: always
+ type: complex
+ contains:
+ name:
+ description: The name of the Availability Zone.
+ returned: always
+ type: str
+ sample: us-east-1c
+ subnet_identifier:
+ description: The ID of the subnet.
+ returned: always
+ type: str
+ sample: subnet-12345678
+ subnet_status:
+ description: The status of the subnet.
+ returned: always
+ type: str
+ sample: Active
+ vpc_id:
+ description: The VpcId of the DB subnet group.
+ returned: always
+ type: str
+ sample: vpc-12345678
+dbi_resource_id:
+ description: The AWS Region-unique, immutable identifier for the DB instance.
+ returned: always
+ type: str
+ sample: db-UHV3QRNWX4KB6GALCIGRML6QFA
+domain_memberships:
+ description: The Active Directory Domain membership records associated with the DB instance.
+ returned: always
+ type: list
+ sample: []
+endpoint:
+ description: The connection endpoint.
+ returned: always
+ type: complex
+ contains:
+ address:
+ description: The DNS address of the DB instance.
+ returned: always
+ type: str
+ sample: ansible-test.cvlrtwiennww.us-east-1.rds.amazonaws.com
+ hosted_zone_id:
+ description: The ID that Amazon Route 53 assigns when you create a hosted zone.
+ returned: always
+ type: str
+ sample: ZTR2ITUGPA61AM
+ port:
+ description: The port that the database engine is listening on.
+ returned: always
+ type: int
+ sample: 3306
+engine:
+ description: The database engine version.
+ returned: always
+ type: str
+ sample: mariadb
+engine_version:
+ description: The database engine version.
+ returned: always
+ type: str
+ sample: 10.0.35
+iam_database_authentication_enabled:
+ description: Whether mapping of AWS Identity and Access Management (IAM) accounts to database accounts is enabled.
+ returned: always
+ type: bool
+ sample: false
+instance_create_time:
+ description: The date and time the DB instance was created.
+ returned: always
+ type: str
+ sample: '2018-07-04T16:48:35.332000+00:00'
+kms_key_id:
+ description: The AWS KMS key identifier for the encrypted DB instance when storage_encrypted is true.
+ returned: When storage_encrypted is true
+ type: str
+ sample: arn:aws:kms:us-east-1:123456789012:key/70c45553-ad2e-4a85-9f14-cfeb47555c33
+latest_restorable_time:
+ description: The latest time to which a database can be restored with point-in-time restore.
+ returned: always
+ type: str
+ sample: '2018-07-04T16:50:50.642000+00:00'
+license_model:
+ description: The License model information for this DB instance.
+ returned: always
+ type: str
+ sample: general-public-license
+master_username:
+ description: The master username for the DB instance.
+ returned: always
+ type: str
+ sample: test
+max_allocated_storage:
+ description: The upper limit to which Amazon RDS can automatically scale the storage of the DB instance.
+ returned: When max allocated storage is present.
+ type: int
+ sample: 100
+monitoring_interval:
+ description:
+ - The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance.
+ 0 means collecting Enhanced Monitoring metrics is disabled.
+ returned: always
+ type: int
+ sample: 0
+multi_az:
+ description: Whether the DB instance is a Multi-AZ deployment.
+ returned: always
+ type: bool
+ sample: false
+option_group_memberships:
+ description: The list of option group memberships for this DB instance.
+ returned: always
+ type: complex
+ contains:
+ option_group_name:
+ description: The name of the option group that the instance belongs to.
+ returned: always
+ type: str
+ sample: default:mariadb-10-0
+ status:
+ description: The status of the DB instance's option group membership.
+ returned: always
+ type: str
+ sample: in-sync
+pending_modified_values:
+ description: The changes to the DB instance that are pending.
+ returned: always
+ type: complex
+ contains: {}
+performance_insights_enabled:
+ description: True if Performance Insights is enabled for the DB instance, and otherwise false.
+ returned: always
+ type: bool
+ sample: false
+preferred_backup_window:
+ description: The daily time range during which automated backups are created if automated backups are enabled.
+ returned: always
+ type: str
+ sample: 07:01-07:31
+preferred_maintenance_window:
+ description: The weekly time range (in UTC) during which system maintenance can occur.
+ returned: always
+ type: str
+ sample: sun:09:31-sun:10:01
+publicly_accessible:
+ description:
+ - True for an Internet-facing instance with a publicly resolvable DNS name, False to indicate an
+ internal instance with a DNS name that resolves to a private IP address.
+ returned: always
+ type: bool
+ sample: true
+read_replica_db_instance_identifiers:
+ description: Identifiers of the Read Replicas associated with this DB instance.
+ returned: always
+ type: list
+ sample: []
+storage_encrypted:
+ description: Whether the DB instance is encrypted.
+ returned: always
+ type: bool
+ sample: false
+storage_type:
+ description: The storage type to be associated with the DB instance.
+ returned: always
+ type: str
+ sample: standard
+tags:
+ description: A dictionary of tags associated with the DB instance.
+ returned: always
+ type: complex
+ contains: {}
+vpc_security_groups:
+ description: A list of VPC security group elements that the DB instance belongs to.
+ returned: always
+ type: complex
+ contains:
+ status:
+ description: The status of the VPC security group.
+ returned: always
+ type: str
+ sample: active
+ vpc_security_group_id:
+ description: The name of the VPC security group.
+ returned: always
+ type: str
+ sample: sg-12345678
+'''
+
+from ansible.module_utils._text import to_text
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters
+from ansible_collections.amazon.aws.plugins.module_utils.rds import (
+ arg_spec_to_rds_params,
+ call_method,
+ ensure_tags,
+ get_final_identifier,
+ get_rds_method_attribute,
+ get_tags,
+)
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry
+from ansible.module_utils.six import string_types
+
+from time import sleep
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError, WaiterError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def get_rds_method_attribute_name(instance, state, creation_source, read_replica):
+ method_name = None
+ if state == 'absent' or state == 'terminated':
+ if instance and instance['DBInstanceStatus'] not in ['deleting', 'deleted']:
+ method_name = 'delete_db_instance'
+ else:
+ if instance:
+ method_name = 'modify_db_instance'
+ elif read_replica is True:
+ method_name = 'create_db_instance_read_replica'
+ elif creation_source == 'snapshot':
+ method_name = 'restore_db_instance_from_db_snapshot'
+ elif creation_source == 's3':
+ method_name = 'restore_db_instance_from_s3'
+ elif creation_source == 'instance':
+ method_name = 'restore_db_instance_to_point_in_time'
+ else:
+ method_name = 'create_db_instance'
+ return method_name
+
+
+def get_instance(client, module, db_instance_id):
+ try:
+ for i in range(3):
+ try:
+ instance = client.describe_db_instances(DBInstanceIdentifier=db_instance_id)['DBInstances'][0]
+ instance['Tags'] = get_tags(client, module, instance['DBInstanceArn'])
+ if instance.get('ProcessorFeatures'):
+ instance['ProcessorFeatures'] = dict((feature['Name'], feature['Value']) for feature in instance['ProcessorFeatures'])
+ if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'):
+ instance['PendingModifiedValues']['ProcessorFeatures'] = dict(
+ (feature['Name'], feature['Value'])
+ for feature in instance['PendingModifiedValues']['ProcessorFeatures']
+ )
+ break
+ except is_boto3_error_code('DBInstanceNotFound'):
+ sleep(3)
+ else:
+ instance = {}
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Failed to describe DB instances')
+ return instance
+
+
+def get_final_snapshot(client, module, snapshot_identifier):
+ try:
+ snapshots = AWSRetry.jittered_backoff()(client.describe_db_snapshots)(DBSnapshotIdentifier=snapshot_identifier)
+ if len(snapshots.get('DBSnapshots', [])) == 1:
+ return snapshots['DBSnapshots'][0]
+ return {}
+ except is_boto3_error_code('DBSnapshotNotFound') as e: # May not be using wait: True
+ return {}
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Failed to retrieve information about the final snapshot')
+
+
+def get_parameters(client, module, parameters, method_name):
+ if method_name == 'restore_db_instance_to_point_in_time':
+ parameters['TargetDBInstanceIdentifier'] = module.params['db_instance_identifier']
+
+ required_options = get_boto3_client_method_parameters(client, method_name, required=True)
+ if any([parameters.get(k) is None for k in required_options]):
+ module.fail_json(msg='To {0} requires the parameters: {1}'.format(
+ get_rds_method_attribute(method_name, module).operation_description, required_options))
+ options = get_boto3_client_method_parameters(client, method_name)
+ parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None)
+
+ if parameters.get('ProcessorFeatures') is not None:
+ parameters['ProcessorFeatures'] = [{'Name': k, 'Value': to_text(v)} for k, v in parameters['ProcessorFeatures'].items()]
+
+ # If this parameter is an empty list it can only be used with modify_db_instance (as the parameter UseDefaultProcessorFeatures)
+ if parameters.get('ProcessorFeatures') == [] and not method_name == 'modify_db_instance':
+ parameters.pop('ProcessorFeatures')
+
+ if method_name == 'create_db_instance' or method_name == 'create_db_instance_read_replica':
+ if parameters.get('Tags'):
+ parameters['Tags'] = ansible_dict_to_boto3_tag_list(parameters['Tags'])
+
+ if method_name == 'modify_db_instance':
+ parameters = get_options_with_changing_values(client, module, parameters)
+
+ return parameters
+
+
+def get_options_with_changing_values(client, module, parameters):
+ instance_id = module.params['db_instance_identifier']
+ purge_cloudwatch_logs = module.params['purge_cloudwatch_logs_exports']
+ force_update_password = module.params['force_update_password']
+ port = module.params['port']
+ apply_immediately = parameters.pop('ApplyImmediately', None)
+ cloudwatch_logs_enabled = module.params['enable_cloudwatch_logs_exports']
+
+ if port:
+ parameters['DBPortNumber'] = port
+ if not force_update_password:
+ parameters.pop('MasterUserPassword', None)
+ if cloudwatch_logs_enabled:
+ parameters['CloudwatchLogsExportConfiguration'] = cloudwatch_logs_enabled
+ if not module.params['storage_type']:
+ parameters.pop('Iops', None)
+
+ instance = get_instance(client, module, instance_id)
+ updated_parameters = get_changing_options_with_inconsistent_keys(parameters, instance, purge_cloudwatch_logs)
+ updated_parameters.update(get_changing_options_with_consistent_keys(parameters, instance))
+ parameters = updated_parameters
+
+ if parameters.get('NewDBInstanceIdentifier') and instance.get('PendingModifiedValues', {}).get('DBInstanceIdentifier'):
+ if parameters['NewDBInstanceIdentifier'] == instance['PendingModifiedValues']['DBInstanceIdentifier'] and not apply_immediately:
+ parameters.pop('NewDBInstanceIdentifier')
+
+ if parameters:
+ parameters['DBInstanceIdentifier'] = instance_id
+ if apply_immediately is not None:
+ parameters['ApplyImmediately'] = apply_immediately
+
+ return parameters
+
+
+def get_current_attributes_with_inconsistent_keys(instance):
+ options = {}
+ if instance.get('PendingModifiedValues', {}).get('PendingCloudwatchLogsExports', {}).get('LogTypesToEnable', []):
+ current_enabled = instance['PendingModifiedValues']['PendingCloudwatchLogsExports']['LogTypesToEnable']
+ current_disabled = instance['PendingModifiedValues']['PendingCloudwatchLogsExports']['LogTypesToDisable']
+ options['CloudwatchLogsExportConfiguration'] = {'LogTypesToEnable': current_enabled, 'LogTypesToDisable': current_disabled}
+ else:
+ options['CloudwatchLogsExportConfiguration'] = {'LogTypesToEnable': instance.get('EnabledCloudwatchLogsExports', []), 'LogTypesToDisable': []}
+ if instance.get('PendingModifiedValues', {}).get('Port'):
+ options['DBPortNumber'] = instance['PendingModifiedValues']['Port']
+ else:
+ options['DBPortNumber'] = instance['Endpoint']['Port']
+ if instance.get('PendingModifiedValues', {}).get('DBSubnetGroupName'):
+ options['DBSubnetGroupName'] = instance['PendingModifiedValues']['DBSubnetGroupName']
+ else:
+ options['DBSubnetGroupName'] = instance['DBSubnetGroup']['DBSubnetGroupName']
+ if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'):
+ options['ProcessorFeatures'] = instance['PendingModifiedValues']['ProcessorFeatures']
+ else:
+ options['ProcessorFeatures'] = instance.get('ProcessorFeatures', {})
+ options['OptionGroupName'] = [g['OptionGroupName'] for g in instance['OptionGroupMemberships']]
+ options['DBSecurityGroups'] = [sg['DBSecurityGroupName'] for sg in instance['DBSecurityGroups'] if sg['Status'] in ['adding', 'active']]
+ options['VpcSecurityGroupIds'] = [sg['VpcSecurityGroupId'] for sg in instance['VpcSecurityGroups'] if sg['Status'] in ['adding', 'active']]
+ options['DBParameterGroupName'] = [parameter_group['DBParameterGroupName'] for parameter_group in instance['DBParameterGroups']]
+ options['AllowMajorVersionUpgrade'] = None
+ options['EnableIAMDatabaseAuthentication'] = instance['IAMDatabaseAuthenticationEnabled']
+ # PerformanceInsightsEnabled is not returned on older RDS instances it seems
+ options['EnablePerformanceInsights'] = instance.get('PerformanceInsightsEnabled', False)
+ options['MasterUserPassword'] = None
+ options['NewDBInstanceIdentifier'] = instance['DBInstanceIdentifier']
+
+ return options
+
+
+def get_changing_options_with_inconsistent_keys(modify_params, instance, purge_cloudwatch_logs):
+ changing_params = {}
+ current_options = get_current_attributes_with_inconsistent_keys(instance)
+
+ if current_options.get("MaxAllocatedStorage") is None:
+ current_options["MaxAllocatedStorage"] = None
+
+ for option in current_options:
+ current_option = current_options[option]
+ desired_option = modify_params.pop(option, None)
+ if desired_option is None:
+ continue
+
+ # TODO: allow other purge_option module parameters rather than just checking for things to add
+ if isinstance(current_option, list):
+ if isinstance(desired_option, list):
+ if set(desired_option) <= set(current_option):
+ continue
+ elif isinstance(desired_option, string_types):
+ if desired_option in current_option:
+ continue
+
+ if current_option == desired_option:
+ continue
+
+ if option == 'ProcessorFeatures' and desired_option == []:
+ changing_params['UseDefaultProcessorFeatures'] = True
+ elif option == 'CloudwatchLogsExportConfiguration':
+ current_option = set(current_option.get('LogTypesToEnable', []))
+ desired_option = set(desired_option)
+ format_option = {'EnableLogTypes': [], 'DisableLogTypes': []}
+ format_option['EnableLogTypes'] = list(desired_option.difference(current_option))
+ if purge_cloudwatch_logs:
+ format_option['DisableLogTypes'] = list(current_option.difference(desired_option))
+ if format_option['EnableLogTypes'] or format_option['DisableLogTypes']:
+ changing_params[option] = format_option
+ else:
+ changing_params[option] = desired_option
+
+ return changing_params
+
+
+def get_changing_options_with_consistent_keys(modify_params, instance):
+ inconsistent_parameters = list(modify_params.keys())
+ changing_params = {}
+
+ for param in modify_params:
+ current_option = instance.get('PendingModifiedValues', {}).get(param)
+ if current_option is None:
+ current_option = instance[param]
+ if modify_params[param] != current_option:
+ changing_params[param] = modify_params[param]
+
+ return changing_params
+
+
+def validate_options(client, module, instance):
+ state = module.params['state']
+ skip_final_snapshot = module.params['skip_final_snapshot']
+ snapshot_id = module.params['final_db_snapshot_identifier']
+ modified_id = module.params['new_db_instance_identifier']
+ engine = module.params['engine']
+ tde_options = bool(module.params['tde_credential_password'] or module.params['tde_credential_arn'])
+ read_replica = module.params['read_replica']
+ creation_source = module.params['creation_source']
+ source_instance = module.params['source_db_instance_identifier']
+ if module.params['source_region'] is not None:
+ same_region = bool(module.params['source_region'] == module.params['region'])
+ else:
+ same_region = True
+
+ if modified_id:
+ modified_instance = get_instance(client, module, modified_id)
+ else:
+ modified_instance = {}
+
+ if modified_id and instance and modified_instance:
+ module.fail_json(msg='A new instance ID {0} was provided but it already exists'.format(modified_id))
+ if modified_id and not instance and modified_instance:
+ module.fail_json(msg='A new instance ID {0} was provided but the instance to be renamed does not exist'.format(modified_id))
+ if state in ('absent', 'terminated') and instance and not skip_final_snapshot and snapshot_id is None:
+ module.fail_json(msg='skip_final_snapshot is false but all of the following are missing: final_db_snapshot_identifier')
+ if engine is not None and not (engine.startswith('mysql') or engine.startswith('oracle')) and tde_options:
+ module.fail_json(msg='TDE is available for MySQL and Oracle DB instances')
+ if read_replica is True and not instance and creation_source not in [None, 'instance']:
+ module.fail_json(msg='Cannot create a read replica from {0}. You must use a source DB instance'.format(creation_source))
+ if read_replica is True and not instance and not source_instance:
+ module.fail_json(msg='read_replica is true and the instance does not exist yet but all of the following are missing: source_db_instance_identifier')
+
+
+def update_instance(client, module, instance, instance_id):
+ changed = False
+
+ # Get newly created DB instance
+ if not instance:
+ instance = get_instance(client, module, instance_id)
+
+ # Check tagging/promoting/rebooting/starting/stopping instance
+ changed |= ensure_tags(
+ client, module, instance['DBInstanceArn'], instance['Tags'], module.params['tags'], module.params['purge_tags']
+ )
+ changed |= promote_replication_instance(client, module, instance, module.params['read_replica'])
+ changed |= update_instance_state(client, module, instance, module.params['state'])
+
+ return changed
+
+
+def promote_replication_instance(client, module, instance, read_replica):
+ changed = False
+ if read_replica is False:
+ changed = bool(instance.get('ReadReplicaSourceDBInstanceIdentifier') or instance.get('StatusInfos'))
+ if changed:
+ try:
+ call_method(client, module, method_name='promote_read_replica', parameters={'DBInstanceIdentifier': instance['DBInstanceIdentifier']})
+ changed = True
+ except is_boto3_error_code('InvalidDBInstanceState') as e:
+ if 'DB Instance is not a read replica' in e.response['Error']['Message']:
+ pass
+ else:
+ raise e
+ return changed
+
+
+def update_instance_state(client, module, instance, state):
+ changed = False
+ if state in ['rebooted', 'restarted']:
+ changed |= reboot_running_db_instance(client, module, instance)
+ if state in ['started', 'running', 'stopped']:
+ changed |= start_or_stop_instance(client, module, instance, state)
+ return changed
+
+
+def reboot_running_db_instance(client, module, instance):
+ parameters = {'DBInstanceIdentifier': instance['DBInstanceIdentifier']}
+ if instance['DBInstanceStatus'] in ['stopped', 'stopping']:
+ call_method(client, module, 'start_db_instance', parameters)
+ if module.params.get('force_failover') is not None:
+ parameters['ForceFailover'] = module.params['force_failover']
+ results, changed = call_method(client, module, 'reboot_db_instance', parameters)
+ return changed
+
+
+def start_or_stop_instance(client, module, instance, state):
+ changed = False
+ parameters = {'DBInstanceIdentifier': instance['DBInstanceIdentifier']}
+ if state == 'stopped' and instance['DBInstanceStatus'] not in ['stopping', 'stopped']:
+ if module.params['db_snapshot_identifier']:
+ parameters['DBSnapshotIdentifier'] = module.params['db_snapshot_identifier']
+ result, changed = call_method(client, module, 'stop_db_instance', parameters)
+ elif state == 'started' and instance['DBInstanceStatus'] not in ['available', 'starting', 'restarting']:
+ result, changed = call_method(client, module, 'start_db_instance', parameters)
+ return changed
+
+
+def main():
+ arg_spec = dict(
+ state=dict(choices=['present', 'absent', 'terminated', 'running', 'started', 'stopped', 'rebooted', 'restarted'], default='present'),
+ creation_source=dict(choices=['snapshot', 's3', 'instance']),
+ force_update_password=dict(type='bool', default=False, no_log=False),
+ purge_cloudwatch_logs_exports=dict(type='bool', default=True),
+ purge_tags=dict(type='bool', default=True),
+ read_replica=dict(type='bool'),
+ wait=dict(type='bool', default=True),
+ )
+
+ parameter_options = dict(
+ allocated_storage=dict(type='int'),
+ allow_major_version_upgrade=dict(type='bool'),
+ apply_immediately=dict(type='bool', default=False),
+ auto_minor_version_upgrade=dict(type='bool'),
+ availability_zone=dict(aliases=['az', 'zone']),
+ backup_retention_period=dict(type='int'),
+ ca_certificate_identifier=dict(),
+ character_set_name=dict(),
+ copy_tags_to_snapshot=dict(type='bool'),
+ db_cluster_identifier=dict(aliases=['cluster_id']),
+ db_instance_class=dict(aliases=['class', 'instance_type']),
+ db_instance_identifier=dict(required=True, aliases=['instance_id', 'id']),
+ db_name=dict(),
+ db_parameter_group_name=dict(),
+ db_security_groups=dict(type='list', elements='str'),
+ db_snapshot_identifier=dict(),
+ db_subnet_group_name=dict(aliases=['subnet_group']),
+ domain=dict(),
+ domain_iam_role_name=dict(),
+ enable_cloudwatch_logs_exports=dict(type='list', aliases=['cloudwatch_log_exports'], elements='str'),
+ enable_iam_database_authentication=dict(type='bool'),
+ enable_performance_insights=dict(type='bool'),
+ engine=dict(),
+ engine_version=dict(),
+ final_db_snapshot_identifier=dict(aliases=['final_snapshot_identifier']),
+ force_failover=dict(type='bool'),
+ iops=dict(type='int'),
+ kms_key_id=dict(),
+ license_model=dict(),
+ master_user_password=dict(aliases=['password'], no_log=True),
+ master_username=dict(aliases=['username']),
+ max_allocated_storage=dict(type='int'),
+ monitoring_interval=dict(type='int'),
+ monitoring_role_arn=dict(),
+ multi_az=dict(type='bool'),
+ new_db_instance_identifier=dict(aliases=['new_instance_id', 'new_id']),
+ option_group_name=dict(),
+ performance_insights_kms_key_id=dict(),
+ performance_insights_retention_period=dict(type='int'),
+ port=dict(type='int'),
+ preferred_backup_window=dict(aliases=['backup_window']),
+ preferred_maintenance_window=dict(aliases=['maintenance_window']),
+ processor_features=dict(type='dict'),
+ promotion_tier=dict(),
+ publicly_accessible=dict(type='bool'),
+ restore_time=dict(),
+ s3_bucket_name=dict(),
+ s3_ingestion_role_arn=dict(),
+ s3_prefix=dict(),
+ skip_final_snapshot=dict(type='bool', default=False),
+ snapshot_identifier=dict(),
+ source_db_instance_identifier=dict(),
+ source_engine=dict(choices=['mysql']),
+ source_engine_version=dict(),
+ source_region=dict(),
+ storage_encrypted=dict(type='bool'),
+ storage_type=dict(choices=['standard', 'gp2', 'io1']),
+ tags=dict(type='dict'),
+ tde_credential_arn=dict(aliases=['transparent_data_encryption_arn']),
+ tde_credential_password=dict(no_log=True, aliases=['transparent_data_encryption_password']),
+ timezone=dict(),
+ use_latest_restorable_time=dict(type='bool', aliases=['restore_from_latest']),
+ vpc_security_group_ids=dict(type='list', elements='str')
+ )
+ arg_spec.update(parameter_options)
+
+ required_if = [
+ ('engine', 'aurora', ('db_cluster_identifier',)),
+ ('engine', 'aurora-mysql', ('db_cluster_identifier',)),
+ ('engine', 'aurora-postresql', ('db_cluster_identifier',)),
+ ('creation_source', 'snapshot', ('snapshot_identifier', 'engine')),
+ ('creation_source', 's3', (
+ 's3_bucket_name', 'engine', 'master_username', 'master_user_password',
+ 'source_engine', 'source_engine_version', 's3_ingestion_role_arn')),
+ ]
+ mutually_exclusive = [
+ ('s3_bucket_name', 'source_db_instance_identifier', 'snapshot_identifier'),
+ ('use_latest_restorable_time', 'restore_time'),
+ ('availability_zone', 'multi_az'),
+ ]
+
+ module = AnsibleAWSModule(
+ argument_spec=arg_spec,
+ required_if=required_if,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True
+ )
+
+ if not module.boto3_at_least('1.5.0'):
+ module.fail_json(msg="rds_instance requires boto3 > 1.5.0")
+
+ # Sanitize instance identifiers
+ module.params['db_instance_identifier'] = module.params['db_instance_identifier'].lower()
+ if module.params['new_db_instance_identifier']:
+ module.params['new_db_instance_identifier'] = module.params['new_db_instance_identifier'].lower()
+
+ # Sanitize processor features
+ if module.params['processor_features'] is not None:
+ module.params['processor_features'] = dict((k, to_text(v)) for k, v in module.params['processor_features'].items())
+
+ client = module.client('rds')
+ changed = False
+ state = module.params['state']
+ instance_id = module.params['db_instance_identifier']
+ instance = get_instance(client, module, instance_id)
+ validate_options(client, module, instance)
+ method_name = get_rds_method_attribute_name(instance, state, module.params['creation_source'], module.params['read_replica'])
+
+ if method_name:
+ raw_parameters = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in parameter_options))
+ parameters = get_parameters(client, module, raw_parameters, method_name)
+
+ if parameters:
+ result, changed = call_method(client, module, method_name, parameters)
+
+ instance_id = get_final_identifier(method_name, module)
+
+ # Check tagging/promoting/rebooting/starting/stopping instance
+ if state != 'absent' and (not module.check_mode or instance):
+ changed |= update_instance(client, module, instance, instance_id)
+
+ if changed:
+ instance = get_instance(client, module, instance_id)
+ if state != 'absent' and (instance or not module.check_mode):
+ for attempt_to_wait in range(0, 10):
+ instance = get_instance(client, module, instance_id)
+ if instance:
+ break
+ else:
+ sleep(5)
+
+ if state == 'absent' and changed and not module.params['skip_final_snapshot']:
+ instance.update(FinalSnapshot=get_final_snapshot(client, module, module.params['final_db_snapshot_identifier']))
+
+ pending_processor_features = None
+ if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'):
+ pending_processor_features = instance['PendingModifiedValues'].pop('ProcessorFeatures')
+ instance = camel_dict_to_snake_dict(instance, ignore_list=['Tags', 'ProcessorFeatures'])
+ if pending_processor_features is not None:
+ instance['pending_modified_values']['processor_features'] = pending_processor_features
+
+ module.exit_json(changed=changed, **instance)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_instance_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_instance_facts.py
new file mode 100644
index 00000000..cccd2b3f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_instance_facts.py
@@ -0,0 +1,408 @@
+#!/usr/bin/python
+# Copyright (c) 2017, 2018 Michael De La Rue
+# Copyright (c) 2017, 2018 Will Thames
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rds_instance_info
+version_added: 1.0.0
+short_description: obtain information about one or more RDS instances
+description:
+ - Obtain information about one or more RDS instances.
+ - This module was called C(rds_instance_facts) before Ansible 2.9. The usage did not change.
+options:
+ db_instance_identifier:
+ description:
+ - The RDS instance's unique identifier.
+ required: false
+ aliases:
+ - id
+ type: str
+ filters:
+ description:
+ - A filter that specifies one or more DB instances to describe.
+ See U(https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html)
+ type: dict
+requirements:
+ - "python >= 2.7"
+ - "boto3"
+author:
+ - "Will Thames (@willthames)"
+ - "Michael De La Rue (@mikedlr)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: Get information about an instance
+ community.aws.rds_instance_info:
+ db_instance_identifier: new-database
+ register: new_database_info
+
+- name: Get all RDS instances
+ community.aws.rds_instance_info:
+'''
+
+RETURN = '''
+instances:
+ description: List of RDS instances
+ returned: always
+ type: complex
+ contains:
+ allocated_storage:
+ description: Gigabytes of storage allocated to the database
+ returned: always
+ type: int
+ sample: 10
+ auto_minor_version_upgrade:
+ description: Whether minor version upgrades happen automatically
+ returned: always
+ type: bool
+ sample: true
+ availability_zone:
+ description: Availability Zone in which the database resides
+ returned: always
+ type: str
+ sample: us-west-2b
+ backup_retention_period:
+ description: Days for which backups are retained
+ returned: always
+ type: int
+ sample: 7
+ ca_certificate_identifier:
+ description: ID for the CA certificate
+ returned: always
+ type: str
+ sample: rds-ca-2015
+ copy_tags_to_snapshot:
+ description: Whether DB tags should be copied to the snapshot
+ returned: always
+ type: bool
+ sample: false
+ db_instance_arn:
+ description: ARN of the database instance
+ returned: always
+ type: str
+ sample: arn:aws:rds:us-west-2:111111111111:db:helloworld-rds
+ db_instance_class:
+ description: Instance class of the database instance
+ returned: always
+ type: str
+ sample: db.t2.small
+ db_instance_identifier:
+ description: Database instance identifier
+ returned: always
+ type: str
+ sample: helloworld-rds
+ db_instance_port:
+ description: Port used by the database instance
+ returned: always
+ type: int
+ sample: 0
+ db_instance_status:
+ description: Status of the database instance
+ returned: always
+ type: str
+ sample: available
+ db_name:
+ description: Name of the database
+ returned: always
+ type: str
+ sample: management
+ db_parameter_groups:
+ description: List of database parameter groups
+ returned: always
+ type: complex
+ contains:
+ db_parameter_group_name:
+ description: Name of the database parameter group
+ returned: always
+ type: str
+ sample: psql-pg-helloworld
+ parameter_apply_status:
+ description: Whether the parameter group has been applied
+ returned: always
+ type: str
+ sample: in-sync
+ db_security_groups:
+ description: List of security groups used by the database instance
+ returned: always
+ type: list
+ sample: []
+ db_subnet_group:
+ description: list of subnet groups
+ returned: always
+ type: complex
+ contains:
+ db_subnet_group_description:
+ description: Description of the DB subnet group
+ returned: always
+ type: str
+ sample: My database subnet group
+ db_subnet_group_name:
+ description: Name of the database subnet group
+ returned: always
+ type: str
+ sample: my-subnet-group
+ subnet_group_status:
+ description: Subnet group status
+ returned: always
+ type: str
+ sample: Complete
+ subnets:
+ description: List of subnets in the subnet group
+ returned: always
+ type: complex
+ contains:
+ subnet_availability_zone:
+ description: Availability zone of the subnet
+ returned: always
+ type: complex
+ contains:
+ name:
+ description: Name of the availability zone
+ returned: always
+ type: str
+ sample: us-west-2c
+ subnet_identifier:
+ description: Subnet ID
+ returned: always
+ type: str
+ sample: subnet-abcd1234
+ subnet_status:
+ description: Subnet status
+ returned: always
+ type: str
+ sample: Active
+ vpc_id:
+ description: VPC id of the subnet group
+ returned: always
+ type: str
+ sample: vpc-abcd1234
+ dbi_resource_id:
+ description: AWS Region-unique, immutable identifier for the DB instance
+ returned: always
+ type: str
+ sample: db-AAAAAAAAAAAAAAAAAAAAAAAAAA
+ domain_memberships:
+ description: List of domain memberships
+ returned: always
+ type: list
+ sample: []
+ endpoint:
+ description: Database endpoint
+ returned: always
+ type: complex
+ contains:
+ address:
+ description: Database endpoint address
+ returned: always
+ type: str
+ sample: helloworld-rds.ctrqpe3so1sf.us-west-2.rds.amazonaws.com
+ hosted_zone_id:
+ description: Route53 hosted zone ID
+ returned: always
+ type: str
+ sample: Z1PABCD0000000
+ port:
+ description: Database endpoint port
+ returned: always
+ type: int
+ sample: 5432
+ engine:
+ description: Database engine
+ returned: always
+ type: str
+ sample: postgres
+ engine_version:
+ description: Database engine version
+ returned: always
+ type: str
+ sample: 9.5.10
+ iam_database_authentication_enabled:
+ description: Whether database authentication through IAM is enabled
+ returned: always
+ type: bool
+ sample: false
+ instance_create_time:
+ description: Date and time the instance was created
+ returned: always
+ type: str
+ sample: '2017-10-10T04:00:07.434000+00:00'
+ kms_key_id:
+ description: KMS Key ID
+ returned: always
+ type: str
+ sample: arn:aws:kms:us-west-2:111111111111:key/abcd1234-0000-abcd-1111-0123456789ab
+ latest_restorable_time:
+ description: Latest time to which a database can be restored with point-in-time restore
+ returned: always
+ type: str
+ sample: '2018-05-17T00:03:56+00:00'
+ license_model:
+ description: License model
+ returned: always
+ type: str
+ sample: postgresql-license
+ master_username:
+ description: Database master username
+ returned: always
+ type: str
+ sample: dbadmin
+ monitoring_interval:
+ description: Interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance
+ returned: always
+ type: int
+ sample: 0
+ multi_az:
+ description: Whether Multi-AZ is on
+ returned: always
+ type: bool
+ sample: false
+ option_group_memberships:
+ description: List of option groups
+ returned: always
+ type: complex
+ contains:
+ option_group_name:
+ description: Option group name
+ returned: always
+ type: str
+ sample: default:postgres-9-5
+ status:
+ description: Status of option group
+ returned: always
+ type: str
+ sample: in-sync
+ pending_modified_values:
+ description: Modified values pending application
+ returned: always
+ type: complex
+ contains: {}
+ performance_insights_enabled:
+ description: Whether performance insights are enabled
+ returned: always
+ type: bool
+ sample: false
+ preferred_backup_window:
+ description: Preferred backup window
+ returned: always
+ type: str
+ sample: 04:00-05:00
+ preferred_maintenance_window:
+ description: Preferred maintenance window
+ returned: always
+ type: str
+ sample: mon:05:00-mon:05:30
+ publicly_accessible:
+ description: Whether the DB is publicly accessible
+ returned: always
+ type: bool
+ sample: false
+ read_replica_db_instance_identifiers:
+ description: List of database instance read replicas
+ returned: always
+ type: list
+ sample: []
+ storage_encrypted:
+ description: Whether the storage is encrypted
+ returned: always
+ type: bool
+ sample: true
+ storage_type:
+ description: Storage type of the Database instance
+ returned: always
+ type: str
+ sample: gp2
+ tags:
+ description: Tags used by the database instance
+ returned: always
+ type: complex
+ contains: {}
+ vpc_security_groups:
+ description: List of VPC security groups
+ returned: always
+ type: complex
+ contains:
+ status:
+ description: Status of the VPC security group
+ returned: always
+ type: str
+ sample: active
+ vpc_security_group_id:
+ description: VPC Security Group ID
+ returned: always
+ type: str
+ sample: sg-abcd1234
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
+ boto3_tag_list_to_ansible_dict,
+ AWSRetry,
+ camel_dict_to_snake_dict,
+ )
+
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+
+def instance_info(module, conn):
+ instance_name = module.params.get('db_instance_identifier')
+ filters = module.params.get('filters')
+
+ params = dict()
+ if instance_name:
+ params['DBInstanceIdentifier'] = instance_name
+ if filters:
+ params['Filters'] = ansible_dict_to_boto3_filter_list(filters)
+
+ paginator = conn.get_paginator('describe_db_instances')
+ try:
+ results = paginator.paginate(**params).build_full_result()['DBInstances']
+ except is_boto3_error_code('DBInstanceNotFound'):
+ results = []
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, "Couldn't get instance information")
+
+ for instance in results:
+ try:
+ instance['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=instance['DBInstanceArn'],
+ aws_retry=True)['TagList'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Couldn't get tags for instance %s" % instance['DBInstanceIdentifier'])
+
+ return dict(changed=False, instances=[camel_dict_to_snake_dict(instance, ignore_list=['Tags']) for instance in results])
+
+
+def main():
+ argument_spec = dict(
+ db_instance_identifier=dict(aliases=['id']),
+ filters=dict(type='dict')
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ if module._name == 'rds_instance_facts':
+ module.deprecate("The 'rds_instance_facts' module has been renamed to 'rds_instance_info'", date='2021-12-01', collection_name='community.aws')
+
+ conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+
+ module.exit_json(**instance_info(module, conn))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_instance_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_instance_info.py
new file mode 100644
index 00000000..cccd2b3f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_instance_info.py
@@ -0,0 +1,408 @@
+#!/usr/bin/python
+# Copyright (c) 2017, 2018 Michael De La Rue
+# Copyright (c) 2017, 2018 Will Thames
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rds_instance_info
+version_added: 1.0.0
+short_description: obtain information about one or more RDS instances
+description:
+ - Obtain information about one or more RDS instances.
+ - This module was called C(rds_instance_facts) before Ansible 2.9. The usage did not change.
+options:
+ db_instance_identifier:
+ description:
+ - The RDS instance's unique identifier.
+ required: false
+ aliases:
+ - id
+ type: str
+ filters:
+ description:
+ - A filter that specifies one or more DB instances to describe.
+ See U(https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html)
+ type: dict
+requirements:
+ - "python >= 2.7"
+ - "boto3"
+author:
+ - "Will Thames (@willthames)"
+ - "Michael De La Rue (@mikedlr)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: Get information about an instance
+ community.aws.rds_instance_info:
+ db_instance_identifier: new-database
+ register: new_database_info
+
+- name: Get all RDS instances
+ community.aws.rds_instance_info:
+'''
+
+RETURN = '''
+instances:
+ description: List of RDS instances
+ returned: always
+ type: complex
+ contains:
+ allocated_storage:
+ description: Gigabytes of storage allocated to the database
+ returned: always
+ type: int
+ sample: 10
+ auto_minor_version_upgrade:
+ description: Whether minor version upgrades happen automatically
+ returned: always
+ type: bool
+ sample: true
+ availability_zone:
+ description: Availability Zone in which the database resides
+ returned: always
+ type: str
+ sample: us-west-2b
+ backup_retention_period:
+ description: Days for which backups are retained
+ returned: always
+ type: int
+ sample: 7
+ ca_certificate_identifier:
+ description: ID for the CA certificate
+ returned: always
+ type: str
+ sample: rds-ca-2015
+ copy_tags_to_snapshot:
+ description: Whether DB tags should be copied to the snapshot
+ returned: always
+ type: bool
+ sample: false
+ db_instance_arn:
+ description: ARN of the database instance
+ returned: always
+ type: str
+ sample: arn:aws:rds:us-west-2:111111111111:db:helloworld-rds
+ db_instance_class:
+ description: Instance class of the database instance
+ returned: always
+ type: str
+ sample: db.t2.small
+ db_instance_identifier:
+ description: Database instance identifier
+ returned: always
+ type: str
+ sample: helloworld-rds
+ db_instance_port:
+ description: Port used by the database instance
+ returned: always
+ type: int
+ sample: 0
+ db_instance_status:
+ description: Status of the database instance
+ returned: always
+ type: str
+ sample: available
+ db_name:
+ description: Name of the database
+ returned: always
+ type: str
+ sample: management
+ db_parameter_groups:
+ description: List of database parameter groups
+ returned: always
+ type: complex
+ contains:
+ db_parameter_group_name:
+ description: Name of the database parameter group
+ returned: always
+ type: str
+ sample: psql-pg-helloworld
+ parameter_apply_status:
+ description: Whether the parameter group has been applied
+ returned: always
+ type: str
+ sample: in-sync
+ db_security_groups:
+ description: List of security groups used by the database instance
+ returned: always
+ type: list
+ sample: []
+ db_subnet_group:
+ description: list of subnet groups
+ returned: always
+ type: complex
+ contains:
+ db_subnet_group_description:
+ description: Description of the DB subnet group
+ returned: always
+ type: str
+ sample: My database subnet group
+ db_subnet_group_name:
+ description: Name of the database subnet group
+ returned: always
+ type: str
+ sample: my-subnet-group
+ subnet_group_status:
+ description: Subnet group status
+ returned: always
+ type: str
+ sample: Complete
+ subnets:
+ description: List of subnets in the subnet group
+ returned: always
+ type: complex
+ contains:
+ subnet_availability_zone:
+ description: Availability zone of the subnet
+ returned: always
+ type: complex
+ contains:
+ name:
+ description: Name of the availability zone
+ returned: always
+ type: str
+ sample: us-west-2c
+ subnet_identifier:
+ description: Subnet ID
+ returned: always
+ type: str
+ sample: subnet-abcd1234
+ subnet_status:
+ description: Subnet status
+ returned: always
+ type: str
+ sample: Active
+ vpc_id:
+ description: VPC id of the subnet group
+ returned: always
+ type: str
+ sample: vpc-abcd1234
+ dbi_resource_id:
+ description: AWS Region-unique, immutable identifier for the DB instance
+ returned: always
+ type: str
+ sample: db-AAAAAAAAAAAAAAAAAAAAAAAAAA
+ domain_memberships:
+ description: List of domain memberships
+ returned: always
+ type: list
+ sample: []
+ endpoint:
+ description: Database endpoint
+ returned: always
+ type: complex
+ contains:
+ address:
+ description: Database endpoint address
+ returned: always
+ type: str
+ sample: helloworld-rds.ctrqpe3so1sf.us-west-2.rds.amazonaws.com
+ hosted_zone_id:
+ description: Route53 hosted zone ID
+ returned: always
+ type: str
+ sample: Z1PABCD0000000
+ port:
+ description: Database endpoint port
+ returned: always
+ type: int
+ sample: 5432
+ engine:
+ description: Database engine
+ returned: always
+ type: str
+ sample: postgres
+ engine_version:
+ description: Database engine version
+ returned: always
+ type: str
+ sample: 9.5.10
+ iam_database_authentication_enabled:
+ description: Whether database authentication through IAM is enabled
+ returned: always
+ type: bool
+ sample: false
+ instance_create_time:
+ description: Date and time the instance was created
+ returned: always
+ type: str
+ sample: '2017-10-10T04:00:07.434000+00:00'
+ kms_key_id:
+ description: KMS Key ID
+ returned: always
+ type: str
+ sample: arn:aws:kms:us-west-2:111111111111:key/abcd1234-0000-abcd-1111-0123456789ab
+ latest_restorable_time:
+ description: Latest time to which a database can be restored with point-in-time restore
+ returned: always
+ type: str
+ sample: '2018-05-17T00:03:56+00:00'
+ license_model:
+ description: License model
+ returned: always
+ type: str
+ sample: postgresql-license
+ master_username:
+ description: Database master username
+ returned: always
+ type: str
+ sample: dbadmin
+ monitoring_interval:
+ description: Interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance
+ returned: always
+ type: int
+ sample: 0
+ multi_az:
+ description: Whether Multi-AZ is on
+ returned: always
+ type: bool
+ sample: false
+ option_group_memberships:
+ description: List of option groups
+ returned: always
+ type: complex
+ contains:
+ option_group_name:
+ description: Option group name
+ returned: always
+ type: str
+ sample: default:postgres-9-5
+ status:
+ description: Status of option group
+ returned: always
+ type: str
+ sample: in-sync
+ pending_modified_values:
+ description: Modified values pending application
+ returned: always
+ type: complex
+ contains: {}
+ performance_insights_enabled:
+ description: Whether performance insights are enabled
+ returned: always
+ type: bool
+ sample: false
+ preferred_backup_window:
+ description: Preferred backup window
+ returned: always
+ type: str
+ sample: 04:00-05:00
+ preferred_maintenance_window:
+ description: Preferred maintenance window
+ returned: always
+ type: str
+ sample: mon:05:00-mon:05:30
+ publicly_accessible:
+ description: Whether the DB is publicly accessible
+ returned: always
+ type: bool
+ sample: false
+ read_replica_db_instance_identifiers:
+ description: List of database instance read replicas
+ returned: always
+ type: list
+ sample: []
+ storage_encrypted:
+ description: Whether the storage is encrypted
+ returned: always
+ type: bool
+ sample: true
+ storage_type:
+ description: Storage type of the Database instance
+ returned: always
+ type: str
+ sample: gp2
+ tags:
+ description: Tags used by the database instance
+ returned: always
+ type: complex
+ contains: {}
+ vpc_security_groups:
+ description: List of VPC security groups
+ returned: always
+ type: complex
+ contains:
+ status:
+ description: Status of the VPC security group
+ returned: always
+ type: str
+ sample: active
+ vpc_security_group_id:
+ description: VPC Security Group ID
+ returned: always
+ type: str
+ sample: sg-abcd1234
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
+ boto3_tag_list_to_ansible_dict,
+ AWSRetry,
+ camel_dict_to_snake_dict,
+ )
+
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+
+def instance_info(module, conn):
+ instance_name = module.params.get('db_instance_identifier')
+ filters = module.params.get('filters')
+
+ params = dict()
+ if instance_name:
+ params['DBInstanceIdentifier'] = instance_name
+ if filters:
+ params['Filters'] = ansible_dict_to_boto3_filter_list(filters)
+
+ paginator = conn.get_paginator('describe_db_instances')
+ try:
+ results = paginator.paginate(**params).build_full_result()['DBInstances']
+ except is_boto3_error_code('DBInstanceNotFound'):
+ results = []
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, "Couldn't get instance information")
+
+ for instance in results:
+ try:
+ instance['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=instance['DBInstanceArn'],
+ aws_retry=True)['TagList'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Couldn't get tags for instance %s" % instance['DBInstanceIdentifier'])
+
+ return dict(changed=False, instances=[camel_dict_to_snake_dict(instance, ignore_list=['Tags']) for instance in results])
+
+
+def main():
+ argument_spec = dict(
+ db_instance_identifier=dict(aliases=['id']),
+ filters=dict(type='dict')
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ if module._name == 'rds_instance_facts':
+ module.deprecate("The 'rds_instance_facts' module has been renamed to 'rds_instance_info'", date='2021-12-01', collection_name='community.aws')
+
+ conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+
+ module.exit_json(**instance_info(module, conn))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_param_group.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_param_group.py
new file mode 100644
index 00000000..53669847
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_param_group.py
@@ -0,0 +1,342 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rds_param_group
+version_added: 1.0.0
+short_description: manage RDS parameter groups
+description:
+ - Creates, modifies, and deletes RDS parameter groups.
+requirements: [ boto3 ]
+options:
+ state:
+ description:
+ - Specifies whether the group should be present or absent.
+ required: true
+ choices: [ 'present' , 'absent' ]
+ type: str
+ name:
+ description:
+ - Database parameter group identifier.
+ required: true
+ type: str
+ description:
+ description:
+ - Database parameter group description. Only set when a new group is added.
+ type: str
+ engine:
+ description:
+ - The type of database for this group.
+ - Please use following command to get list of all supported db engines and their respective versions.
+ - '# aws rds describe-db-engine-versions --query "DBEngineVersions[].DBParameterGroupFamily"'
+ - Required for I(state=present).
+ type: str
+ immediate:
+ description:
+ - Whether to apply the changes immediately, or after the next reboot of any associated instances.
+ aliases:
+ - apply_immediately
+ type: bool
+ params:
+ description:
+ - Map of parameter names and values. Numeric values may be represented as K for kilo (1024), M for mega (1024^2), G for giga (1024^3),
+ or T for tera (1024^4), and these values will be expanded into the appropriate number before being set in the parameter group.
+ aliases: [parameters]
+ type: dict
+ tags:
+ description:
+ - Dictionary of tags to attach to the parameter group.
+ type: dict
+ purge_tags:
+ description:
+ - Whether or not to remove tags that do not appear in the C(tags) list.
+ type: bool
+ default: False
+author:
+ - "Scott Anderson (@tastychutney)"
+ - "Will Thames (@willthames)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024
+ community.aws.rds_param_group:
+ state: present
+ name: norwegian-blue
+ description: 'My Fancy Ex Parrot Group'
+ engine: 'mysql5.6'
+ params:
+ auto_increment_increment: "42K"
+ tags:
+ Environment: production
+ Application: parrot
+
+- name: Remove a parameter group
+ community.aws.rds_param_group:
+ state: absent
+ name: norwegian-blue
+'''
+
+RETURN = '''
+db_parameter_group_name:
+ description: Name of DB parameter group
+ type: str
+ returned: when state is present
+db_parameter_group_family:
+ description: DB parameter group family that this DB parameter group is compatible with.
+ type: str
+ returned: when state is present
+db_parameter_group_arn:
+ description: ARN of the DB parameter group
+ type: str
+ returned: when state is present
+description:
+ description: description of the DB parameter group
+ type: str
+ returned: when state is present
+errors:
+ description: list of errors from attempting to modify parameters that are not modifiable
+ type: list
+ returned: when state is present
+tags:
+ description: dictionary of tags
+ type: dict
+ returned: when state is present
+'''
+
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_native
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+INT_MODIFIERS = {
+ 'K': 1024,
+ 'M': pow(1024, 2),
+ 'G': pow(1024, 3),
+ 'T': pow(1024, 4),
+}
+
+
+def convert_parameter(param, value):
+ """
+ Allows setting parameters with 10M = 10* 1024 * 1024 and so on.
+ """
+ converted_value = value
+
+ if param['DataType'] == 'integer':
+ if isinstance(value, string_types):
+ try:
+ for modifier in INT_MODIFIERS.keys():
+ if value.endswith(modifier):
+ converted_value = int(value[:-1]) * INT_MODIFIERS[modifier]
+ except ValueError:
+ # may be based on a variable (ie. {foo*3/4}) so
+ # just pass it on through to boto
+ pass
+ elif isinstance(value, bool):
+ converted_value = 1 if value else 0
+
+ elif param['DataType'] == 'boolean':
+ if isinstance(value, string_types):
+ converted_value = to_native(value) in BOOLEANS_TRUE
+ # convert True/False to 1/0
+ converted_value = 1 if converted_value else 0
+ return str(converted_value)
+
+
+def update_parameters(module, connection):
+ groupname = module.params['name']
+ desired = module.params['params']
+ apply_method = 'immediate' if module.params['immediate'] else 'pending-reboot'
+ errors = []
+ modify_list = []
+ parameters_paginator = connection.get_paginator('describe_db_parameters')
+ existing = parameters_paginator.paginate(DBParameterGroupName=groupname).build_full_result()['Parameters']
+ lookup = dict((param['ParameterName'], param) for param in existing)
+ for param_key, param_value in desired.items():
+ if param_key not in lookup:
+ errors.append("Parameter %s is not an available parameter for the %s engine" %
+ (param_key, module.params.get('engine')))
+ else:
+ converted_value = convert_parameter(lookup[param_key], param_value)
+ # engine-default parameters do not have a ParameterValue, so we'll always override those.
+ if converted_value != lookup[param_key].get('ParameterValue'):
+ if lookup[param_key]['IsModifiable']:
+ modify_list.append(dict(ParameterValue=converted_value, ParameterName=param_key, ApplyMethod=apply_method))
+ else:
+ errors.append("Parameter %s is not modifiable" % param_key)
+
+ # modify_db_parameters takes at most 20 parameters
+ if modify_list:
+ try:
+ from itertools import izip_longest as zip_longest # python 2
+ except ImportError:
+ from itertools import zip_longest # python 3
+ for modify_slice in zip_longest(*[iter(modify_list)] * 20, fillvalue=None):
+ non_empty_slice = [item for item in modify_slice if item]
+ try:
+ connection.modify_db_parameter_group(DBParameterGroupName=groupname, Parameters=non_empty_slice)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Couldn't update parameters: %s" % str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ return True, errors
+ return False, errors
+
+
+def update_tags(module, connection, group, tags):
+ changed = False
+ existing_tags = connection.list_tags_for_resource(ResourceName=group['DBParameterGroupArn'])['TagList']
+ to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags),
+ tags, module.params['purge_tags'])
+ if to_update:
+ try:
+ connection.add_tags_to_resource(ResourceName=group['DBParameterGroupArn'],
+ Tags=ansible_dict_to_boto3_tag_list(to_update))
+ changed = True
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Couldn't add tags to parameter group: %s" % str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ except botocore.exceptions.ParamValidationError as e:
+ # Usually a tag value has been passed as an int or bool, needs to be a string
+ # The AWS exception message is reasonably ok for this purpose
+ module.fail_json(msg="Couldn't add tags to parameter group: %s." % str(e),
+ exception=traceback.format_exc())
+ if to_delete:
+ try:
+ connection.remove_tags_from_resource(ResourceName=group['DBParameterGroupArn'],
+ TagKeys=to_delete)
+ changed = True
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Couldn't remove tags from parameter group: %s" % str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ return changed
+
+
+def ensure_present(module, connection):
+ groupname = module.params['name']
+ tags = module.params.get('tags')
+ changed = False
+ errors = []
+ try:
+ response = connection.describe_db_parameter_groups(DBParameterGroupName=groupname)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'DBParameterGroupNotFound':
+ response = None
+ else:
+ module.fail_json(msg="Couldn't access parameter group information: %s" % str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ if not response:
+ params = dict(DBParameterGroupName=groupname,
+ DBParameterGroupFamily=module.params['engine'],
+ Description=module.params['description'])
+ if tags:
+ params['Tags'] = ansible_dict_to_boto3_tag_list(tags)
+ try:
+ response = connection.create_db_parameter_group(**params)
+ changed = True
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Couldn't create parameter group: %s" % str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ else:
+ group = response['DBParameterGroups'][0]
+ if tags:
+ changed = update_tags(module, connection, group, tags)
+
+ if module.params.get('params'):
+ params_changed, errors = update_parameters(module, connection)
+ changed = changed or params_changed
+
+ try:
+ response = connection.describe_db_parameter_groups(DBParameterGroupName=groupname)
+ group = camel_dict_to_snake_dict(response['DBParameterGroups'][0])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Couldn't obtain parameter group information: %s" % str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ try:
+ tags = connection.list_tags_for_resource(ResourceName=group['db_parameter_group_arn'])['TagList']
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Couldn't obtain parameter group tags: %s" % str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ group['tags'] = boto3_tag_list_to_ansible_dict(tags)
+
+ module.exit_json(changed=changed, errors=errors, **group)
+
+
+def ensure_absent(module, connection):
+ group = module.params['name']
+ try:
+ response = connection.describe_db_parameter_groups(DBParameterGroupName=group)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'DBParameterGroupNotFound':
+ module.exit_json(changed=False)
+ else:
+ module.fail_json(msg="Couldn't access parameter group information: %s" % str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ try:
+ response = connection.delete_db_parameter_group(DBParameterGroupName=group)
+ module.exit_json(changed=True)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Couldn't delete parameter group: %s" % str(e),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+
+def main():
+ argument_spec = dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True),
+ engine=dict(),
+ description=dict(),
+ params=dict(aliases=['parameters'], type='dict'),
+ immediate=dict(type='bool', aliases=['apply_immediately']),
+ tags=dict(type='dict', default={}),
+ purge_tags=dict(type='bool', default=False),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=[['state', 'present', ['description', 'engine']]],
+ )
+
+ try:
+ conn = module.client('rds')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ state = module.params.get('state')
+ if state == 'present':
+ ensure_present(module, conn)
+ if state == 'absent':
+ ensure_absent(module, conn)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_snapshot.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_snapshot.py
new file mode 100644
index 00000000..dd9f5028
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_snapshot.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+# Copyright (c) 2014 Ansible Project
+# Copyright (c) 2017, 2018, 2019 Will Thames
+# Copyright (c) 2017, 2018 Michael De La Rue
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rds_snapshot
+version_added: 1.0.0
+short_description: manage Amazon RDS snapshots.
+description:
+ - Creates or deletes RDS snapshots.
+options:
+ state:
+ description:
+ - Specify the desired state of the snapshot.
+ default: present
+ choices: [ 'present', 'absent']
+ type: str
+ db_snapshot_identifier:
+ description:
+ - The snapshot to manage.
+ required: true
+ aliases:
+ - id
+ - snapshot_id
+ type: str
+ db_instance_identifier:
+ description:
+ - Database instance identifier. Required when state is present.
+ aliases:
+ - instance_id
+ type: str
+ wait:
+ description:
+ - Whether or not to wait for snapshot creation or deletion.
+ type: bool
+ default: 'no'
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds.
+ default: 300
+ type: int
+ tags:
+ description:
+ - tags dict to apply to a snapshot.
+ type: dict
+ purge_tags:
+ description:
+ - whether to remove tags not present in the C(tags) parameter.
+ default: True
+ type: bool
+requirements:
+ - "python >= 2.6"
+ - "boto3"
+author:
+ - "Will Thames (@willthames)"
+ - "Michael De La Rue (@mikedlr)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: Create snapshot
+ community.aws.rds_snapshot:
+ db_instance_identifier: new-database
+ db_snapshot_identifier: new-database-snapshot
+
+- name: Delete snapshot
+ community.aws.rds_snapshot:
+ db_snapshot_identifier: new-database-snapshot
+ state: absent
+'''
+
+RETURN = '''
+allocated_storage:
+ description: How much storage is allocated in GB.
+ returned: always
+ type: int
+ sample: 20
+availability_zone:
+ description: Availability zone of the database from which the snapshot was created.
+ returned: always
+ type: str
+ sample: us-west-2a
+db_instance_identifier:
+ description: Database from which the snapshot was created.
+ returned: always
+ type: str
+ sample: ansible-test-16638696
+db_snapshot_arn:
+ description: Amazon Resource Name for the snapshot.
+ returned: always
+ type: str
+ sample: arn:aws:rds:us-west-2:123456789012:snapshot:ansible-test-16638696-test-snapshot
+db_snapshot_identifier:
+ description: Name of the snapshot.
+ returned: always
+ type: str
+ sample: ansible-test-16638696-test-snapshot
+dbi_resource_id:
+ description: The identifier for the source DB instance, which can't be changed and which is unique to an AWS Region.
+ returned: always
+ type: str
+ sample: db-MM4P2U35RQRAMWD3QDOXWPZP4U
+encrypted:
+ description: Whether the snapshot is encrypted.
+ returned: always
+ type: bool
+ sample: false
+engine:
+ description: Engine of the database from which the snapshot was created.
+ returned: always
+ type: str
+ sample: mariadb
+engine_version:
+ description: Version of the database from which the snapshot was created.
+ returned: always
+ type: str
+ sample: 10.2.21
+iam_database_authentication_enabled:
+ description: Whether IAM database authentication is enabled.
+ returned: always
+ type: bool
+ sample: false
+instance_create_time:
+ description: Creation time of the instance from which the snapshot was created.
+ returned: always
+ type: str
+ sample: '2019-06-15T10:15:56.221000+00:00'
+license_model:
+ description: License model of the database.
+ returned: always
+ type: str
+ sample: general-public-license
+master_username:
+ description: Master username of the database.
+ returned: always
+ type: str
+ sample: test
+option_group_name:
+ description: Option group of the database.
+ returned: always
+ type: str
+ sample: default:mariadb-10-2
+percent_progress:
+ description: How much progress has been made taking the snapshot. Will be 100 for an available snapshot.
+ returned: always
+ type: int
+ sample: 100
+port:
+ description: Port on which the database is listening.
+ returned: always
+ type: int
+ sample: 3306
+processor_features:
+ description: List of processor features of the database.
+ returned: always
+ type: list
+ sample: []
+snapshot_create_time:
+ description: Creation time of the snapshot.
+ returned: always
+ type: str
+ sample: '2019-06-15T10:46:23.776000+00:00'
+snapshot_type:
+ description: How the snapshot was created (always manual for this module!).
+ returned: always
+ type: str
+ sample: manual
+status:
+ description: Status of the snapshot.
+ returned: always
+ type: str
+ sample: available
+storage_type:
+ description: Storage type of the database.
+ returned: always
+ type: str
+ sample: gp2
+tags:
+ description: Tags applied to the snapshot.
+ returned: always
+ type: complex
+ contains: {}
+vpc_id:
+ description: ID of the VPC in which the DB lives.
+ returned: always
+ type: str
+ sample: vpc-09ff232e222710ae0
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # protected by AnsibleAWSModule
+
+# import module snippets
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
+
+
+def get_snapshot(client, module, snapshot_id):
+ try:
+ response = client.describe_db_snapshots(DBSnapshotIdentifier=snapshot_id)
+ except client.exceptions.DBSnapshotNotFoundFault:
+ return None
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't get snapshot {0}".format(snapshot_id))
+ return response['DBSnapshots'][0]
+
+
+def snapshot_to_facts(client, module, snapshot):
+ try:
+ snapshot['Tags'] = boto3_tag_list_to_ansible_dict(client.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn'],
+ aws_retry=True)['TagList'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Couldn't get tags for snapshot %s" % snapshot['DBSnapshotIdentifier'])
+ except KeyError:
+ module.fail_json(msg=str(snapshot))
+
+ return camel_dict_to_snake_dict(snapshot, ignore_list=['Tags'])
+
+
+def wait_for_snapshot_status(client, module, db_snapshot_id, waiter_name):
+ if not module.params['wait']:
+ return
+ timeout = module.params['wait_timeout']
+ try:
+ client.get_waiter(waiter_name).wait(DBSnapshotIdentifier=db_snapshot_id,
+ WaiterConfig=dict(
+ Delay=5,
+ MaxAttempts=int((timeout + 2.5) / 5)
+ ))
+ except botocore.exceptions.WaiterError as e:
+ if waiter_name == 'db_snapshot_deleted':
+ msg = "Failed to wait for DB snapshot {0} to be deleted".format(db_snapshot_id)
+ else:
+ msg = "Failed to wait for DB snapshot {0} to be available".format(db_snapshot_id)
+ module.fail_json_aws(e, msg=msg)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB cluster {0}".format(db_snapshot_id))
+
+
+def ensure_snapshot_absent(client, module):
+ snapshot_name = module.params.get('db_snapshot_identifier')
+ changed = False
+
+ snapshot = get_snapshot(client, module, snapshot_name)
+ if snapshot and snapshot['Status'] != 'deleting':
+ try:
+ client.delete_db_snapshot(DBSnapshotIdentifier=snapshot_name)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="trying to delete snapshot")
+
+ # If we're not waiting for a delete to complete then we're all done
+ # so just return
+ if not snapshot or not module.params.get('wait'):
+ return dict(changed=changed)
+ try:
+ wait_for_snapshot_status(client, module, snapshot_name, 'db_snapshot_deleted')
+ return dict(changed=changed)
+ except client.exceptions.DBSnapshotNotFoundFault:
+ return dict(changed=changed)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "awaiting snapshot deletion")
+
+
+def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags):
+ if tags is None:
+ return False
+ tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, tags, purge_tags)
+ changed = bool(tags_to_add or tags_to_remove)
+ if tags_to_add:
+ try:
+ client.add_tags_to_resource(ResourceName=resource_arn, Tags=ansible_dict_to_boto3_tag_list(tags_to_add))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Couldn't add tags to snapshot {0}".format(resource_arn))
+ if tags_to_remove:
+ try:
+ client.remove_tags_from_resource(ResourceName=resource_arn, TagKeys=tags_to_remove)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Couldn't remove tags from snapshot {0}".format(resource_arn))
+ return changed
+
+
+def ensure_snapshot_present(client, module):
+ db_instance_identifier = module.params.get('db_instance_identifier')
+ snapshot_name = module.params.get('db_snapshot_identifier')
+ changed = False
+ snapshot = get_snapshot(client, module, snapshot_name)
+ if not snapshot:
+ try:
+ snapshot = client.create_db_snapshot(DBSnapshotIdentifier=snapshot_name,
+ DBInstanceIdentifier=db_instance_identifier)['DBSnapshot']
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="trying to create db snapshot")
+
+ if module.params.get('wait'):
+ wait_for_snapshot_status(client, module, snapshot_name, 'db_snapshot_available')
+
+ existing_tags = boto3_tag_list_to_ansible_dict(client.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn'],
+ aws_retry=True)['TagList'])
+ desired_tags = module.params['tags']
+ purge_tags = module.params['purge_tags']
+ changed |= ensure_tags(client, module, snapshot['DBSnapshotArn'], existing_tags, desired_tags, purge_tags)
+
+ snapshot = get_snapshot(client, module, snapshot_name)
+
+ return dict(changed=changed, **snapshot_to_facts(client, module, snapshot))
+
+
+def main():
+
+ module = AnsibleAWSModule(
+ argument_spec=dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ db_snapshot_identifier=dict(aliases=['id', 'snapshot_id'], required=True),
+ db_instance_identifier=dict(aliases=['instance_id']),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ tags=dict(type='dict'),
+ purge_tags=dict(type='bool', default=True),
+ ),
+ required_if=[['state', 'present', ['db_instance_identifier']]]
+ )
+
+ client = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=['DBSnapshotNotFound']))
+
+ if module.params['state'] == 'absent':
+ ret_dict = ensure_snapshot_absent(client, module)
+ else:
+ ret_dict = ensure_snapshot_present(client, module)
+
+ module.exit_json(**ret_dict)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_snapshot_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_snapshot_facts.py
new file mode 100644
index 00000000..63a5e47b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_snapshot_facts.py
@@ -0,0 +1,393 @@
+#!/usr/bin/python
+# Copyright (c) 2014-2017 Ansible Project
+# Copyright (c) 2017, 2018 Will Thames
+# Copyright (c) 2017, 2018 Michael De La Rue
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rds_snapshot_info
+version_added: 1.0.0
+short_description: obtain information about one or more RDS snapshots
+description:
+ - Obtain information about one or more RDS snapshots. These can be for unclustered snapshots or snapshots of clustered DBs (Aurora).
+ - Aurora snapshot information may be obtained if no identifier parameters are passed or if one of the cluster parameters are passed.
+ - This module was called C(rds_snapshot_facts) before Ansible 2.9. The usage did not change.
+options:
+ db_snapshot_identifier:
+ description:
+ - Name of an RDS (unclustered) snapshot.
+ - Mutually exclusive with I(db_instance_identifier), I(db_cluster_identifier), I(db_cluster_snapshot_identifier)
+ required: false
+ aliases:
+ - snapshot_name
+ type: str
+ db_instance_identifier:
+ description:
+ - RDS instance name for which to find snapshots.
+ - Mutually exclusive with I(db_snapshot_identifier), I(db_cluster_identifier), I(db_cluster_snapshot_identifier)
+ required: false
+ type: str
+ db_cluster_identifier:
+ description:
+ - RDS cluster name for which to find snapshots.
+ - Mutually exclusive with I(db_snapshot_identifier), I(db_instance_identifier), I(db_cluster_snapshot_identifier)
+ required: false
+ type: str
+ db_cluster_snapshot_identifier:
+ description:
+ - Name of an RDS cluster snapshot.
+ - Mutually exclusive with I(db_instance_identifier), I(db_snapshot_identifier), I(db_cluster_identifier)
+ required: false
+ type: str
+ snapshot_type:
+ description:
+ - Type of snapshot to find.
+ - By default both automated and manual snapshots will be returned.
+ required: false
+ choices: ['automated', 'manual', 'shared', 'public']
+ type: str
+requirements:
+ - "python >= 2.6"
+ - "boto3"
+author:
+ - "Will Thames (@willthames)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: Get information about an snapshot
+ community.aws.rds_snapshot_info:
+ db_snapshot_identifier: snapshot_name
+ register: new_database_info
+
+- name: Get all RDS snapshots for an RDS instance
+ community.aws.rds_snapshot_info:
+ db_instance_identifier: helloworld-rds-master
+'''
+
+RETURN = '''
+snapshots:
+ description: List of non-clustered snapshots
+ returned: When cluster parameters are not passed
+ type: complex
+ contains:
+ allocated_storage:
+ description: How many gigabytes of storage are allocated
+ returned: always
+ type: int
+ sample: 10
+ availability_zone:
+ description: The availability zone of the database from which the snapshot was taken
+ returned: always
+ type: str
+ sample: us-west-2b
+ db_instance_identifier:
+ description: Database instance identifier
+ returned: always
+ type: str
+ sample: hello-world-rds
+ db_snapshot_arn:
+ description: Snapshot ARN
+ returned: always
+ type: str
+ sample: arn:aws:rds:us-west-2:111111111111:snapshot:rds:hello-world-rds-us1-2018-05-16-04-03
+ db_snapshot_identifier:
+ description: Snapshot name
+ returned: always
+ type: str
+ sample: rds:hello-world-rds-us1-2018-05-16-04-03
+ encrypted:
+ description: Whether the snapshot was encrypted
+ returned: always
+ type: bool
+ sample: true
+ engine:
+ description: Database engine
+ returned: always
+ type: str
+ sample: postgres
+ engine_version:
+ description: Database engine version
+ returned: always
+ type: str
+ sample: 9.5.10
+ iam_database_authentication_enabled:
+ description: Whether database authentication through IAM is enabled
+ returned: always
+ type: bool
+ sample: false
+ instance_create_time:
+ description: Time the Instance was created
+ returned: always
+ type: str
+ sample: '2017-10-10T04:00:07.434000+00:00'
+ kms_key_id:
+ description: ID of the KMS Key encrypting the snapshot
+ returned: always
+ type: str
+ sample: arn:aws:kms:us-west-2:111111111111:key/abcd1234-1234-aaaa-0000-1234567890ab
+ license_model:
+ description: License model
+ returned: always
+ type: str
+ sample: postgresql-license
+ master_username:
+ description: Database master username
+ returned: always
+ type: str
+ sample: dbadmin
+ option_group_name:
+ description: Database option group name
+ returned: always
+ type: str
+ sample: default:postgres-9-5
+ percent_progress:
+ description: Percent progress of snapshot
+ returned: always
+ type: int
+ sample: 100
+ snapshot_create_time:
+ description: Time snapshot was created
+ returned: always
+ type: str
+ sample: '2018-05-16T04:03:33.871000+00:00'
+ snapshot_type:
+ description: Type of snapshot
+ returned: always
+ type: str
+ sample: automated
+ status:
+ description: Status of snapshot
+ returned: always
+ type: str
+ sample: available
+ storage_type:
+ description: Storage type of underlying DB
+ returned: always
+ type: str
+ sample: gp2
+ tags:
+ description: Snapshot tags
+ returned: when snapshot is not shared
+ type: complex
+ contains: {}
+ vpc_id:
+ description: ID of VPC containing the DB
+ returned: always
+ type: str
+ sample: vpc-abcd1234
+cluster_snapshots:
+ description: List of cluster snapshots
+ returned: always
+ type: complex
+ contains:
+ allocated_storage:
+ description: How many gigabytes of storage are allocated
+ returned: always
+ type: int
+ sample: 1
+ availability_zones:
+ description: The availability zones of the database from which the snapshot was taken
+ returned: always
+ type: list
+ sample:
+ - ca-central-1a
+ - ca-central-1b
+ cluster_create_time:
+ description: Date and time the cluster was created
+ returned: always
+ type: str
+ sample: '2018-05-17T00:13:40.223000+00:00'
+ db_cluster_identifier:
+ description: Database cluster identifier
+ returned: always
+ type: str
+ sample: test-aurora-cluster
+ db_cluster_snapshot_arn:
+ description: ARN of the database snapshot
+ returned: always
+ type: str
+ sample: arn:aws:rds:ca-central-1:111111111111:cluster-snapshot:test-aurora-snapshot
+ db_cluster_snapshot_identifier:
+ description: Snapshot identifier
+ returned: always
+ type: str
+ sample: test-aurora-snapshot
+ engine:
+ description: Database engine
+ returned: always
+ type: str
+ sample: aurora
+ engine_version:
+ description: Database engine version
+ returned: always
+ type: str
+ sample: 5.6.10a
+ iam_database_authentication_enabled:
+ description: Whether database authentication through IAM is enabled
+ returned: always
+ type: bool
+ sample: false
+ kms_key_id:
+ description: ID of the KMS Key encrypting the snapshot
+ returned: always
+ type: str
+ sample: arn:aws:kms:ca-central-1:111111111111:key/abcd1234-abcd-1111-aaaa-0123456789ab
+ license_model:
+ description: License model
+ returned: always
+ type: str
+ sample: aurora
+ master_username:
+ description: Database master username
+ returned: always
+ type: str
+ sample: shertel
+ percent_progress:
+ description: Percent progress of snapshot
+ returned: always
+ type: int
+ sample: 0
+ port:
+ description: Database port
+ returned: always
+ type: int
+ sample: 0
+ snapshot_create_time:
+ description: Date and time when the snapshot was created
+ returned: always
+ type: str
+ sample: '2018-05-17T00:23:23.731000+00:00'
+ snapshot_type:
+ description: Type of snapshot
+ returned: always
+ type: str
+ sample: manual
+ status:
+ description: Status of snapshot
+ returned: always
+ type: str
+ sample: creating
+ storage_encrypted:
+ description: Whether the snapshot is encrypted
+ returned: always
+ type: bool
+ sample: true
+ tags:
+ description: Tags of the snapshot
+ returned: when snapshot is not shared
+ type: complex
+ contains: {}
+ vpc_id:
+ description: VPC of the database
+ returned: always
+ type: str
+ sample: vpc-abcd1234
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def common_snapshot_info(module, conn, method, prefix, params):
+ paginator = conn.get_paginator(method)
+ try:
+ results = paginator.paginate(**params).build_full_result()['%ss' % prefix]
+ except is_boto3_error_code('%sNotFound' % prefix):
+ results = []
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, "trying to get snapshot information")
+
+ for snapshot in results:
+ try:
+ if snapshot['SnapshotType'] != 'shared':
+ snapshot['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=snapshot['%sArn' % prefix],
+ aws_retry=True)['TagList'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Couldn't get tags for snapshot %s" % snapshot['%sIdentifier' % prefix])
+
+ return [camel_dict_to_snake_dict(snapshot, ignore_list=['Tags']) for snapshot in results]
+
+
+def cluster_snapshot_info(module, conn):
+ snapshot_name = module.params.get('db_cluster_snapshot_identifier')
+ snapshot_type = module.params.get('snapshot_type')
+ instance_name = module.params.get('db_cluster_identifier')
+
+ params = dict()
+ if snapshot_name:
+ params['DBClusterSnapshotIdentifier'] = snapshot_name
+ if instance_name:
+ params['DBClusterIdentifier'] = instance_name
+ if snapshot_type:
+ params['SnapshotType'] = snapshot_type
+ if snapshot_type == 'public':
+ params['IncludePublic'] = True
+ elif snapshot_type == 'shared':
+ params['IncludeShared'] = True
+
+ return common_snapshot_info(module, conn, 'describe_db_cluster_snapshots', 'DBClusterSnapshot', params)
+
+
+def standalone_snapshot_info(module, conn):
+ snapshot_name = module.params.get('db_snapshot_identifier')
+ snapshot_type = module.params.get('snapshot_type')
+ instance_name = module.params.get('db_instance_identifier')
+
+ params = dict()
+ if snapshot_name:
+ params['DBSnapshotIdentifier'] = snapshot_name
+ if instance_name:
+ params['DBInstanceIdentifier'] = instance_name
+ if snapshot_type:
+ params['SnapshotType'] = snapshot_type
+ if snapshot_type == 'public':
+ params['IncludePublic'] = True
+ elif snapshot_type == 'shared':
+ params['IncludeShared'] = True
+
+ return common_snapshot_info(module, conn, 'describe_db_snapshots', 'DBSnapshot', params)
+
+
+def main():
+ argument_spec = dict(
+ db_snapshot_identifier=dict(aliases=['snapshot_name']),
+ db_instance_identifier=dict(),
+ db_cluster_identifier=dict(),
+ db_cluster_snapshot_identifier=dict(),
+ snapshot_type=dict(choices=['automated', 'manual', 'shared', 'public'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['db_snapshot_identifier', 'db_instance_identifier', 'db_cluster_identifier', 'db_cluster_snapshot_identifier']]
+ )
+ if module._name == 'rds_snapshot_facts':
+ module.deprecate("The 'rds_snapshot_facts' module has been renamed to 'rds_snapshot_info'", date='2021-12-01', collection_name='community.aws')
+
+ conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+ results = dict()
+ if not module.params['db_cluster_identifier'] and not module.params['db_cluster_snapshot_identifier']:
+ results['snapshots'] = standalone_snapshot_info(module, conn)
+ if not module.params['db_snapshot_identifier'] and not module.params['db_instance_identifier']:
+ results['cluster_snapshots'] = cluster_snapshot_info(module, conn)
+
+ module.exit_json(changed=False, **results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_snapshot_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_snapshot_info.py
new file mode 100644
index 00000000..63a5e47b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_snapshot_info.py
@@ -0,0 +1,393 @@
+#!/usr/bin/python
+# Copyright (c) 2014-2017 Ansible Project
+# Copyright (c) 2017, 2018 Will Thames
+# Copyright (c) 2017, 2018 Michael De La Rue
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rds_snapshot_info
+version_added: 1.0.0
+short_description: obtain information about one or more RDS snapshots
+description:
+ - Obtain information about one or more RDS snapshots. These can be for unclustered snapshots or snapshots of clustered DBs (Aurora).
+ - Aurora snapshot information may be obtained if no identifier parameters are passed or if one of the cluster parameters are passed.
+ - This module was called C(rds_snapshot_facts) before Ansible 2.9. The usage did not change.
+options:
+ db_snapshot_identifier:
+ description:
+ - Name of an RDS (unclustered) snapshot.
+ - Mutually exclusive with I(db_instance_identifier), I(db_cluster_identifier), I(db_cluster_snapshot_identifier)
+ required: false
+ aliases:
+ - snapshot_name
+ type: str
+ db_instance_identifier:
+ description:
+ - RDS instance name for which to find snapshots.
+ - Mutually exclusive with I(db_snapshot_identifier), I(db_cluster_identifier), I(db_cluster_snapshot_identifier)
+ required: false
+ type: str
+ db_cluster_identifier:
+ description:
+ - RDS cluster name for which to find snapshots.
+ - Mutually exclusive with I(db_snapshot_identifier), I(db_instance_identifier), I(db_cluster_snapshot_identifier)
+ required: false
+ type: str
+ db_cluster_snapshot_identifier:
+ description:
+ - Name of an RDS cluster snapshot.
+ - Mutually exclusive with I(db_instance_identifier), I(db_snapshot_identifier), I(db_cluster_identifier)
+ required: false
+ type: str
+ snapshot_type:
+ description:
+ - Type of snapshot to find.
+ - By default both automated and manual snapshots will be returned.
+ required: false
+ choices: ['automated', 'manual', 'shared', 'public']
+ type: str
+requirements:
+ - "python >= 2.6"
+ - "boto3"
+author:
+ - "Will Thames (@willthames)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: Get information about an snapshot
+ community.aws.rds_snapshot_info:
+ db_snapshot_identifier: snapshot_name
+ register: new_database_info
+
+- name: Get all RDS snapshots for an RDS instance
+ community.aws.rds_snapshot_info:
+ db_instance_identifier: helloworld-rds-master
+'''
+
+RETURN = '''
+snapshots:
+ description: List of non-clustered snapshots
+ returned: When cluster parameters are not passed
+ type: complex
+ contains:
+ allocated_storage:
+ description: How many gigabytes of storage are allocated
+ returned: always
+ type: int
+ sample: 10
+ availability_zone:
+ description: The availability zone of the database from which the snapshot was taken
+ returned: always
+ type: str
+ sample: us-west-2b
+ db_instance_identifier:
+ description: Database instance identifier
+ returned: always
+ type: str
+ sample: hello-world-rds
+ db_snapshot_arn:
+ description: Snapshot ARN
+ returned: always
+ type: str
+ sample: arn:aws:rds:us-west-2:111111111111:snapshot:rds:hello-world-rds-us1-2018-05-16-04-03
+ db_snapshot_identifier:
+ description: Snapshot name
+ returned: always
+ type: str
+ sample: rds:hello-world-rds-us1-2018-05-16-04-03
+ encrypted:
+ description: Whether the snapshot was encrypted
+ returned: always
+ type: bool
+ sample: true
+ engine:
+ description: Database engine
+ returned: always
+ type: str
+ sample: postgres
+ engine_version:
+ description: Database engine version
+ returned: always
+ type: str
+ sample: 9.5.10
+ iam_database_authentication_enabled:
+ description: Whether database authentication through IAM is enabled
+ returned: always
+ type: bool
+ sample: false
+ instance_create_time:
+ description: Time the Instance was created
+ returned: always
+ type: str
+ sample: '2017-10-10T04:00:07.434000+00:00'
+ kms_key_id:
+ description: ID of the KMS Key encrypting the snapshot
+ returned: always
+ type: str
+ sample: arn:aws:kms:us-west-2:111111111111:key/abcd1234-1234-aaaa-0000-1234567890ab
+ license_model:
+ description: License model
+ returned: always
+ type: str
+ sample: postgresql-license
+ master_username:
+ description: Database master username
+ returned: always
+ type: str
+ sample: dbadmin
+ option_group_name:
+ description: Database option group name
+ returned: always
+ type: str
+ sample: default:postgres-9-5
+ percent_progress:
+ description: Percent progress of snapshot
+ returned: always
+ type: int
+ sample: 100
+ snapshot_create_time:
+ description: Time snapshot was created
+ returned: always
+ type: str
+ sample: '2018-05-16T04:03:33.871000+00:00'
+ snapshot_type:
+ description: Type of snapshot
+ returned: always
+ type: str
+ sample: automated
+ status:
+ description: Status of snapshot
+ returned: always
+ type: str
+ sample: available
+ storage_type:
+ description: Storage type of underlying DB
+ returned: always
+ type: str
+ sample: gp2
+ tags:
+ description: Snapshot tags
+ returned: when snapshot is not shared
+ type: complex
+ contains: {}
+ vpc_id:
+ description: ID of VPC containing the DB
+ returned: always
+ type: str
+ sample: vpc-abcd1234
+cluster_snapshots:
+ description: List of cluster snapshots
+ returned: always
+ type: complex
+ contains:
+ allocated_storage:
+ description: How many gigabytes of storage are allocated
+ returned: always
+ type: int
+ sample: 1
+ availability_zones:
+ description: The availability zones of the database from which the snapshot was taken
+ returned: always
+ type: list
+ sample:
+ - ca-central-1a
+ - ca-central-1b
+ cluster_create_time:
+ description: Date and time the cluster was created
+ returned: always
+ type: str
+ sample: '2018-05-17T00:13:40.223000+00:00'
+ db_cluster_identifier:
+ description: Database cluster identifier
+ returned: always
+ type: str
+ sample: test-aurora-cluster
+ db_cluster_snapshot_arn:
+ description: ARN of the database snapshot
+ returned: always
+ type: str
+ sample: arn:aws:rds:ca-central-1:111111111111:cluster-snapshot:test-aurora-snapshot
+ db_cluster_snapshot_identifier:
+ description: Snapshot identifier
+ returned: always
+ type: str
+ sample: test-aurora-snapshot
+ engine:
+ description: Database engine
+ returned: always
+ type: str
+ sample: aurora
+ engine_version:
+ description: Database engine version
+ returned: always
+ type: str
+ sample: 5.6.10a
+ iam_database_authentication_enabled:
+ description: Whether database authentication through IAM is enabled
+ returned: always
+ type: bool
+ sample: false
+ kms_key_id:
+ description: ID of the KMS Key encrypting the snapshot
+ returned: always
+ type: str
+ sample: arn:aws:kms:ca-central-1:111111111111:key/abcd1234-abcd-1111-aaaa-0123456789ab
+ license_model:
+ description: License model
+ returned: always
+ type: str
+ sample: aurora
+ master_username:
+ description: Database master username
+ returned: always
+ type: str
+ sample: shertel
+ percent_progress:
+ description: Percent progress of snapshot
+ returned: always
+ type: int
+ sample: 0
+ port:
+ description: Database port
+ returned: always
+ type: int
+ sample: 0
+ snapshot_create_time:
+ description: Date and time when the snapshot was created
+ returned: always
+ type: str
+ sample: '2018-05-17T00:23:23.731000+00:00'
+ snapshot_type:
+ description: Type of snapshot
+ returned: always
+ type: str
+ sample: manual
+ status:
+ description: Status of snapshot
+ returned: always
+ type: str
+ sample: creating
+ storage_encrypted:
+ description: Whether the snapshot is encrypted
+ returned: always
+ type: bool
+ sample: true
+ tags:
+ description: Tags of the snapshot
+ returned: when snapshot is not shared
+ type: complex
+ contains: {}
+ vpc_id:
+ description: VPC of the database
+ returned: always
+ type: str
+ sample: vpc-abcd1234
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def common_snapshot_info(module, conn, method, prefix, params):
+ paginator = conn.get_paginator(method)
+ try:
+ results = paginator.paginate(**params).build_full_result()['%ss' % prefix]
+ except is_boto3_error_code('%sNotFound' % prefix):
+ results = []
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, "trying to get snapshot information")
+
+ for snapshot in results:
+ try:
+ if snapshot['SnapshotType'] != 'shared':
+ snapshot['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=snapshot['%sArn' % prefix],
+ aws_retry=True)['TagList'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Couldn't get tags for snapshot %s" % snapshot['%sIdentifier' % prefix])
+
+ return [camel_dict_to_snake_dict(snapshot, ignore_list=['Tags']) for snapshot in results]
+
+
+def cluster_snapshot_info(module, conn):
+ snapshot_name = module.params.get('db_cluster_snapshot_identifier')
+ snapshot_type = module.params.get('snapshot_type')
+ instance_name = module.params.get('db_cluster_identifier')
+
+ params = dict()
+ if snapshot_name:
+ params['DBClusterSnapshotIdentifier'] = snapshot_name
+ if instance_name:
+ params['DBClusterIdentifier'] = instance_name
+ if snapshot_type:
+ params['SnapshotType'] = snapshot_type
+ if snapshot_type == 'public':
+ params['IncludePublic'] = True
+ elif snapshot_type == 'shared':
+ params['IncludeShared'] = True
+
+ return common_snapshot_info(module, conn, 'describe_db_cluster_snapshots', 'DBClusterSnapshot', params)
+
+
+def standalone_snapshot_info(module, conn):
+ snapshot_name = module.params.get('db_snapshot_identifier')
+ snapshot_type = module.params.get('snapshot_type')
+ instance_name = module.params.get('db_instance_identifier')
+
+ params = dict()
+ if snapshot_name:
+ params['DBSnapshotIdentifier'] = snapshot_name
+ if instance_name:
+ params['DBInstanceIdentifier'] = instance_name
+ if snapshot_type:
+ params['SnapshotType'] = snapshot_type
+ if snapshot_type == 'public':
+ params['IncludePublic'] = True
+ elif snapshot_type == 'shared':
+ params['IncludeShared'] = True
+
+ return common_snapshot_info(module, conn, 'describe_db_snapshots', 'DBSnapshot', params)
+
+
+def main():
+ argument_spec = dict(
+ db_snapshot_identifier=dict(aliases=['snapshot_name']),
+ db_instance_identifier=dict(),
+ db_cluster_identifier=dict(),
+ db_cluster_snapshot_identifier=dict(),
+ snapshot_type=dict(choices=['automated', 'manual', 'shared', 'public'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['db_snapshot_identifier', 'db_instance_identifier', 'db_cluster_identifier', 'db_cluster_snapshot_identifier']]
+ )
+ if module._name == 'rds_snapshot_facts':
+ module.deprecate("The 'rds_snapshot_facts' module has been renamed to 'rds_snapshot_info'", date='2021-12-01', collection_name='community.aws')
+
+ conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+ results = dict()
+ if not module.params['db_cluster_identifier'] and not module.params['db_cluster_snapshot_identifier']:
+ results['snapshots'] = standalone_snapshot_info(module, conn)
+ if not module.params['db_snapshot_identifier'] and not module.params['db_instance_identifier']:
+ results['cluster_snapshots'] = cluster_snapshot_info(module, conn)
+
+ module.exit_json(changed=False, **results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_subnet_group.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_subnet_group.py
new file mode 100644
index 00000000..bb0cc685
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/rds_subnet_group.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: rds_subnet_group
+version_added: 1.0.0
+short_description: manage RDS database subnet groups
+description:
+ - Creates, modifies, and deletes RDS database subnet groups. This module has a dependency on python-boto >= 2.5.
+options:
+ state:
+ description:
+ - Specifies whether the subnet should be present or absent.
+ required: true
+ choices: [ 'present' , 'absent' ]
+ type: str
+ name:
+ description:
+ - Database subnet group identifier.
+ required: true
+ type: str
+ description:
+ description:
+ - Database subnet group description.
+ - Required when I(state=present).
+ type: str
+ subnets:
+ description:
+ - List of subnet IDs that make up the database subnet group.
+ - Required when I(state=present).
+ type: list
+ elements: str
+author: "Scott Anderson (@tastychutney)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+- name: Add or change a subnet group
+ community.aws.rds_subnet_group:
+ state: present
+ name: norwegian-blue
+ description: My Fancy Ex Parrot Subnet Group
+ subnets:
+ - subnet-aaaaaaaa
+ - subnet-bbbbbbbb
+
+- name: Remove a subnet group
+ community.aws.rds_subnet_group:
+ state: absent
+ name: norwegian-blue
+'''
+
+RETURN = r'''
+subnet_group:
+ description: Dictionary of DB subnet group values
+ returned: I(state=present)
+ type: complex
+ contains:
+ name:
+ description: The name of the DB subnet group (maintained for backward compatibility)
+ returned: I(state=present)
+ type: str
+ db_subnet_group_name:
+ description: The name of the DB subnet group
+ returned: I(state=present)
+ type: str
+ description:
+ description: The description of the DB subnet group (maintained for backward compatibility)
+ returned: I(state=present)
+ type: str
+ db_subnet_group_description:
+ description: The description of the DB subnet group
+ returned: I(state=present)
+ type: str
+ vpc_id:
+ description: The VpcId of the DB subnet group
+ returned: I(state=present)
+ type: str
+ subnet_ids:
+ description: Contains a list of Subnet IDs
+ returned: I(state=present)
+ type: list
+ subnets:
+ description: Contains a list of Subnet elements (@see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.describe_db_subnet_groups) # noqa
+ returned: I(state=present)
+ type: list
+ status:
+ description: The status of the DB subnet group (maintained for backward compatibility)
+ returned: I(state=present)
+ type: str
+ subnet_group_status:
+ description: The status of the DB subnet group
+ returned: I(state=present)
+ type: str
+ db_subnet_group_arn:
+ description: The ARN of the DB subnet group
+ returned: I(state=present)
+ type: str
+'''
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
+
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+
+def create_result(changed, subnet_group=None):
+ if subnet_group is None:
+ return dict(
+ changed=changed
+ )
+ result_subnet_group = dict(camel_dict_to_snake_dict(subnet_group))
+ result_subnet_group['name'] = result_subnet_group.get(
+ 'db_subnet_group_name')
+ result_subnet_group['description'] = result_subnet_group.get(
+ 'db_subnet_group_description')
+ result_subnet_group['status'] = result_subnet_group.get(
+ 'subnet_group_status')
+ result_subnet_group['subnet_ids'] = create_subnet_list(
+ subnet_group.get('Subnets'))
+ return dict(
+ changed=changed,
+ subnet_group=result_subnet_group
+ )
+
+
+def create_subnet_list(subnets):
+ '''
+ Construct a list of subnet ids from a list of subnets dicts returned by boto.
+ Parameters:
+ subnets (list): A list of subnets definitions.
+ @see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.describe_db_subnet_groups
+ Returns:
+ (list): List of subnet ids (str)
+ '''
+ subnets_ids = []
+ for subnet in subnets:
+ subnets_ids.append(subnet.get('SubnetIdentifier'))
+ return subnets_ids
+
+
+def main():
+ argument_spec = dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True),
+ description=dict(required=False),
+ subnets=dict(required=False, type='list', elements='str'),
+ )
+ required_if = [('state', 'present', ['description', 'subnets'])]
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec, required_if=required_if)
+ state = module.params.get('state')
+ group_name = module.params.get('name').lower()
+ group_description = module.params.get('description')
+ group_subnets = module.params.get('subnets') or []
+
+ try:
+ conn = module.client('rds')
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, 'Failed to instantiate AWS connection')
+ # Default.
+ result = create_result(False)
+
+ try:
+ matching_groups = conn.describe_db_subnet_groups(
+ DBSubnetGroupName=group_name, MaxRecords=100).get('DBSubnetGroups')
+ except is_boto3_error_code('DBSubnetGroupNotFoundFault'):
+ # No existing subnet, create it if needed, else we can just exit.
+ if state == 'present':
+ try:
+ new_group = conn.create_db_subnet_group(
+ DBSubnetGroupName=group_name, DBSubnetGroupDescription=group_description, SubnetIds=group_subnets)
+ result = create_result(True, new_group.get('DBSubnetGroup'))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, 'Failed to create a new subnet group')
+ module.exit_json(**result)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, 'Failed to get subnet groups description')
+ # We have one or more subnets at this point.
+ if state == 'absent':
+ try:
+ conn.delete_db_subnet_group(DBSubnetGroupName=group_name)
+ result = create_result(True)
+ module.exit_json(**result)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, 'Failed to delete a subnet group')
+
+ # Sort the subnet groups before we compare them
+ existing_subnets = create_subnet_list(matching_groups[0].get('Subnets'))
+ existing_subnets.sort()
+ group_subnets.sort()
+ # See if anything changed.
+ if (matching_groups[0].get('DBSubnetGroupName') == group_name and
+ matching_groups[0].get('DBSubnetGroupDescription') == group_description and
+ existing_subnets == group_subnets):
+ result = create_result(False, matching_groups[0])
+ module.exit_json(**result)
+ # Modify existing group.
+ try:
+ changed_group = conn.modify_db_subnet_group(
+ DBSubnetGroupName=group_name, DBSubnetGroupDescription=group_description, SubnetIds=group_subnets)
+ result = create_result(True, changed_group.get('DBSubnetGroup'))
+ module.exit_json(**result)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, 'Failed to update a subnet group')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/redshift.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/redshift.py
new file mode 100644
index 00000000..7c992685
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/redshift.py
@@ -0,0 +1,685 @@
+#!/usr/bin/python
+
+# Copyright 2014 Jens Carl, Hothead Games Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author:
+ - "Jens Carl (@j-carl), Hothead Games Inc."
+ - "Rafael Driutti (@rafaeldriutti)"
+module: redshift
+version_added: 1.0.0
+short_description: create, delete, or modify an Amazon Redshift instance
+description:
+ - Creates, deletes, or modifies Amazon Redshift cluster instances.
+options:
+ command:
+ description:
+ - Specifies the action to take.
+ required: true
+ choices: [ 'create', 'facts', 'delete', 'modify' ]
+ type: str
+ identifier:
+ description:
+ - Redshift cluster identifier.
+ required: true
+ type: str
+ node_type:
+ description:
+ - The node type of the cluster.
+ - Require when I(command=create).
+ choices: ['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large','dc2.large',
+ 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge']
+ type: str
+ username:
+ description:
+ - Master database username.
+ - Used only when I(command=create).
+ type: str
+ password:
+ description:
+ - Master database password.
+ - Used only when I(command=create).
+ type: str
+ cluster_type:
+ description:
+ - The type of cluster.
+ choices: ['multi-node', 'single-node' ]
+ default: 'single-node'
+ type: str
+ db_name:
+ description:
+ - Name of the database.
+ type: str
+ availability_zone:
+ description:
+ - Availability zone in which to launch cluster.
+ aliases: ['zone', 'aws_zone']
+ type: str
+ number_of_nodes:
+ description:
+ - Number of nodes.
+ - Only used when I(cluster_type=multi-node).
+ type: int
+ cluster_subnet_group_name:
+ description:
+ - Which subnet to place the cluster.
+ aliases: ['subnet']
+ type: str
+ cluster_security_groups:
+ description:
+ - In which security group the cluster belongs.
+ type: list
+ elements: str
+ aliases: ['security_groups']
+ vpc_security_group_ids:
+ description:
+ - VPC security group
+ aliases: ['vpc_security_groups']
+ type: list
+ elements: str
+ skip_final_cluster_snapshot:
+ description:
+ - Skip a final snapshot before deleting the cluster.
+ - Used only when I(command=delete).
+ aliases: ['skip_final_snapshot']
+ default: false
+ type: bool
+ final_cluster_snapshot_identifier:
+ description:
+ - Identifier of the final snapshot to be created before deleting the cluster.
+ - If this parameter is provided, I(skip_final_cluster_snapshot) must be C(false).
+ - Used only when I(command=delete).
+ aliases: ['final_snapshot_id']
+ type: str
+ preferred_maintenance_window:
+ description:
+ - 'Maintenance window in format of C(ddd:hh24:mi-ddd:hh24:mi). (Example: C(Mon:22:00-Mon:23:15))'
+ - Times are specified in UTC.
+ - If not specified then a random 30 minute maintenance window is assigned.
+ aliases: ['maintance_window', 'maint_window']
+ type: str
+ cluster_parameter_group_name:
+ description:
+ - Name of the cluster parameter group.
+ aliases: ['param_group_name']
+ type: str
+ automated_snapshot_retention_period:
+ description:
+ - The number of days that automated snapshots are retained.
+ aliases: ['retention_period']
+ type: int
+ port:
+ description:
+ - Which port the cluster is listening on.
+ type: int
+ cluster_version:
+ description:
+ - Which version the cluster should have.
+ aliases: ['version']
+ choices: ['1.0']
+ type: str
+ allow_version_upgrade:
+ description:
+ - When I(allow_version_upgrade=true) the cluster may be automatically
+ upgraded during the maintenance window.
+ aliases: ['version_upgrade']
+ default: true
+ type: bool
+ publicly_accessible:
+ description:
+ - If the cluster is accessible publicly or not.
+ default: false
+ type: bool
+ encrypted:
+ description:
+ - If the cluster is encrypted or not.
+ default: false
+ type: bool
+ elastic_ip:
+ description:
+ - An Elastic IP to use for the cluster.
+ type: str
+ new_cluster_identifier:
+ description:
+ - Only used when command=modify.
+ aliases: ['new_identifier']
+ type: str
+ wait:
+ description:
+ - When I(command=create), I(command=modify) or I(command=restore) then wait for the database to enter the 'available' state.
+ - When I(command=delete) wait for the database to be terminated.
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - When I(wait=true) defines how long in seconds before giving up.
+ default: 300
+ type: int
+ enhanced_vpc_routing:
+ description:
+ - Whether the cluster should have enhanced VPC routing enabled.
+ default: false
+ type: bool
+ tags:
+ description:
+ - A dictionary of resource tags.
+ type: dict
+ aliases: ['resource_tags']
+ version_added: "1.3.0"
+ purge_tags:
+ description:
+ - Purge existing tags that are not found in the cluster
+ type: bool
+ default: 'yes'
+ version_added: "1.3.0"
+requirements: [ 'boto3' ]
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+'''
+
+EXAMPLES = r'''
+- name: Basic cluster provisioning example
+ community.aws.redshift:
+ command: create
+ node_type: ds1.xlarge
+ identifier: new_cluster
+ username: cluster_admin
+ password: 1nsecure
+
+- name: Cluster delete example
+ community.aws.redshift:
+ command: delete
+ identifier: new_cluster
+ skip_final_cluster_snapshot: true
+ wait: true
+'''
+
+RETURN = r'''
+cluster:
+ description: dictionary containing all the cluster information
+ returned: success
+ type: complex
+ contains:
+ identifier:
+ description: Id of the cluster.
+ returned: success
+ type: str
+ sample: "new_redshift_cluster"
+ create_time:
+ description: Time of the cluster creation as timestamp.
+ returned: success
+ type: float
+ sample: 1430158536.308
+ status:
+ description: Status of the cluster.
+ returned: success
+ type: str
+ sample: "available"
+ db_name:
+ description: Name of the database.
+ returned: success
+ type: str
+ sample: "new_db_name"
+ availability_zone:
+ description: Amazon availability zone where the cluster is located. "None" until cluster is available.
+ returned: success
+ type: str
+ sample: "us-east-1b"
+ maintenance_window:
+ description: Time frame when maintenance/upgrade are done.
+ returned: success
+ type: str
+ sample: "sun:09:30-sun:10:00"
+ private_ip_address:
+ description: Private IP address of the main node.
+ returned: success
+ type: str
+ sample: "10.10.10.10"
+ public_ip_address:
+ description: Public IP address of the main node. "None" when enhanced_vpc_routing is enabled.
+ returned: success
+ type: str
+ sample: "0.0.0.0"
+ port:
+ description: Port of the cluster. "None" until cluster is available.
+ returned: success
+ type: int
+ sample: 5439
+ url:
+ description: FQDN of the main cluster node. "None" until cluster is available.
+ returned: success
+ type: str
+ sample: "new-redshift_cluster.jfkdjfdkj.us-east-1.redshift.amazonaws.com"
+ enhanced_vpc_routing:
+ description: status of the enhanced vpc routing feature.
+ returned: success
+ type: bool
+ tags:
+ description: aws tags for cluster.
+ returned: success
+ type: dict
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict
+from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_id
+
+
+def _ensure_tags(redshift, identifier, existing_tags, module):
+ """Compares and update resource tags"""
+
+ account_id = get_aws_account_id(module)
+ region = module.params.get('region')
+ resource_arn = "arn:aws:redshift:{0}:{1}:cluster:{2}" .format(region, account_id, identifier)
+ tags = module.params.get('tags')
+ purge_tags = module.params.get('purge_tags')
+
+ tags_to_add, tags_to_remove = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags), tags, purge_tags)
+
+ if tags_to_add:
+ try:
+ redshift.create_tags(ResourceName=resource_arn, Tags=ansible_dict_to_boto3_tag_list(tags_to_add))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to add tags to cluster")
+
+ if tags_to_remove:
+ try:
+ redshift.delete_tags(ResourceName=resource_arn, TagKeys=tags_to_remove)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete tags on cluster")
+
+ changed = bool(tags_to_add or tags_to_remove)
+ return changed
+
+
+def _collect_facts(resource):
+ """Transform cluster information to dict."""
+ facts = {
+ 'identifier': resource['ClusterIdentifier'],
+ 'status': resource['ClusterStatus'],
+ 'username': resource['MasterUsername'],
+ 'db_name': resource['DBName'],
+ 'maintenance_window': resource['PreferredMaintenanceWindow'],
+ 'enhanced_vpc_routing': resource['EnhancedVpcRouting']
+
+ }
+
+ for node in resource['ClusterNodes']:
+ if node['NodeRole'] in ('SHARED', 'LEADER'):
+ facts['private_ip_address'] = node['PrivateIPAddress']
+ if facts['enhanced_vpc_routing'] is False:
+ facts['public_ip_address'] = node['PublicIPAddress']
+ else:
+ facts['public_ip_address'] = None
+ break
+
+ # Some parameters are not ready instantly if you don't wait for available
+ # cluster status
+ facts['create_time'] = None
+ facts['url'] = None
+ facts['port'] = None
+ facts['availability_zone'] = None
+ facts['tags'] = {}
+
+ if resource['ClusterStatus'] != "creating":
+ facts['create_time'] = resource['ClusterCreateTime']
+ facts['url'] = resource['Endpoint']['Address']
+ facts['port'] = resource['Endpoint']['Port']
+ facts['availability_zone'] = resource['AvailabilityZone']
+ facts['tags'] = boto3_tag_list_to_ansible_dict(resource['Tags'])
+
+ return facts
+
+
+@AWSRetry.jittered_backoff()
+def _describe_cluster(redshift, identifier):
+ '''
+ Basic wrapper around describe_clusters with a retry applied
+ '''
+ return redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0]
+
+
+@AWSRetry.jittered_backoff()
+def _create_cluster(redshift, **kwargs):
+ '''
+ Basic wrapper around create_cluster with a retry applied
+ '''
+ return redshift.create_cluster(**kwargs)
+
+
+# Simple wrapper around delete, try to avoid throwing an error if some other
+# operation is in progress
+@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidClusterState'])
+def _delete_cluster(redshift, **kwargs):
+ '''
+ Basic wrapper around delete_cluster with a retry applied.
+ Explicitly catches 'InvalidClusterState' (~ Operation in progress) so that
+ we can still delete a cluster if some kind of change operation was in
+ progress.
+ '''
+ return redshift.delete_cluster(**kwargs)
+
+
+@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidClusterState'])
+def _modify_cluster(redshift, **kwargs):
+ '''
+ Basic wrapper around modify_cluster with a retry applied.
+ Explicitly catches 'InvalidClusterState' (~ Operation in progress) for cases
+ where another modification is still in progress
+ '''
+ return redshift.modify_cluster(**kwargs)
+
+
+def create_cluster(module, redshift):
+ """
+ Create a new cluster
+
+ module: AnsibleAWSModule object
+ redshift: authenticated redshift connection object
+
+ Returns:
+ """
+
+ identifier = module.params.get('identifier')
+ node_type = module.params.get('node_type')
+ username = module.params.get('username')
+ password = module.params.get('password')
+ d_b_name = module.params.get('db_name')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ tags = module.params.get('tags')
+
+ changed = True
+ # Package up the optional parameters
+ params = {}
+ for p in ('cluster_type', 'cluster_security_groups',
+ 'vpc_security_group_ids', 'cluster_subnet_group_name',
+ 'availability_zone', 'preferred_maintenance_window',
+ 'cluster_parameter_group_name',
+ 'automated_snapshot_retention_period', 'port',
+ 'cluster_version', 'allow_version_upgrade',
+ 'number_of_nodes', 'publicly_accessible', 'encrypted',
+ 'elastic_ip', 'enhanced_vpc_routing'):
+ # https://github.com/boto/boto3/issues/400
+ if module.params.get(p) is not None:
+ params[p] = module.params.get(p)
+
+ if d_b_name:
+ params['d_b_name'] = d_b_name
+ if tags:
+ tags = ansible_dict_to_boto3_tag_list(tags)
+ params['tags'] = tags
+
+ try:
+ _describe_cluster(redshift, identifier)
+ changed = False
+ except is_boto3_error_code('ClusterNotFound'):
+ try:
+ _create_cluster(redshift,
+ ClusterIdentifier=identifier,
+ NodeType=node_type,
+ MasterUsername=username,
+ MasterUserPassword=password,
+ **snake_dict_to_camel_dict(params, capitalize_first=True))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to create cluster")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to describe cluster")
+ if wait:
+ attempts = wait_timeout // 60
+ waiter = redshift.get_waiter('cluster_available')
+ try:
+ waiter.wait(
+ ClusterIdentifier=identifier,
+ WaiterConfig=dict(MaxAttempts=attempts)
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Timeout waiting for the cluster creation")
+ try:
+ resource = _describe_cluster(redshift, identifier)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to describe cluster")
+
+ if tags:
+ if _ensure_tags(redshift, identifier, resource['Tags'], module):
+ changed = True
+ resource = _describe_cluster(redshift, identifier)
+
+ return(changed, _collect_facts(resource))
+
+
+def describe_cluster(module, redshift):
+ """
+ Collect data about the cluster.
+
+ module: Ansible module object
+ redshift: authenticated redshift connection object
+ """
+ identifier = module.params.get('identifier')
+
+ try:
+ resource = _describe_cluster(redshift, identifier)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Error describing cluster")
+
+ return(True, _collect_facts(resource))
+
+
+def delete_cluster(module, redshift):
+ """
+ Delete a cluster.
+
+ module: Ansible module object
+ redshift: authenticated redshift connection object
+ """
+
+ identifier = module.params.get('identifier')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ params = {}
+ for p in ('skip_final_cluster_snapshot',
+ 'final_cluster_snapshot_identifier'):
+ if p in module.params:
+ # https://github.com/boto/boto3/issues/400
+ if module.params.get(p) is not None:
+ params[p] = module.params.get(p)
+
+ try:
+ _delete_cluster(
+ redshift,
+ ClusterIdentifier=identifier,
+ **snake_dict_to_camel_dict(params, capitalize_first=True))
+ except is_boto3_error_code('ClusterNotFound'):
+ return(False, {})
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to delete cluster")
+
+ if wait:
+ attempts = wait_timeout // 60
+ waiter = redshift.get_waiter('cluster_deleted')
+ try:
+ waiter.wait(
+ ClusterIdentifier=identifier,
+ WaiterConfig=dict(MaxAttempts=attempts)
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Timeout deleting the cluster")
+
+ return(True, {})
+
+
+def modify_cluster(module, redshift):
+ """
+ Modify an existing cluster.
+
+ module: Ansible module object
+ redshift: authenticated redshift connection object
+ """
+
+ identifier = module.params.get('identifier')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ tags = module.params.get('tags')
+ purge_tags = module.params.get('purge_tags')
+ region = region = module.params.get('region')
+
+ # Package up the optional parameters
+ params = {}
+ for p in ('cluster_type', 'cluster_security_groups',
+ 'vpc_security_group_ids', 'cluster_subnet_group_name',
+ 'availability_zone', 'preferred_maintenance_window',
+ 'cluster_parameter_group_name',
+ 'automated_snapshot_retention_period', 'port', 'cluster_version',
+ 'allow_version_upgrade', 'number_of_nodes', 'new_cluster_identifier'):
+ # https://github.com/boto/boto3/issues/400
+ if module.params.get(p) is not None:
+ params[p] = module.params.get(p)
+
+ # enhanced_vpc_routing parameter change needs an exclusive request
+ if module.params.get('enhanced_vpc_routing') is not None:
+ try:
+ _modify_cluster(
+ redshift,
+ ClusterIdentifier=identifier,
+ EnhancedVpcRouting=module.params.get('enhanced_vpc_routing'))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier)
+ if wait:
+ attempts = wait_timeout // 60
+ waiter = redshift.get_waiter('cluster_available')
+ try:
+ waiter.wait(
+ ClusterIdentifier=identifier,
+ WaiterConfig=dict(MaxAttempts=attempts))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e,
+ msg="Timeout waiting for cluster enhanced vpc routing modification")
+
+ # change the rest
+ try:
+ _modify_cluster(
+ redshift,
+ ClusterIdentifier=identifier,
+ **snake_dict_to_camel_dict(params, capitalize_first=True))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier)
+
+ if module.params.get('new_cluster_identifier'):
+ identifier = module.params.get('new_cluster_identifier')
+
+ if wait:
+ attempts = wait_timeout // 60
+ waiter2 = redshift.get_waiter('cluster_available')
+ try:
+ waiter2.wait(
+ ClusterIdentifier=identifier,
+ WaiterConfig=dict(MaxAttempts=attempts)
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Timeout waiting for cluster modification")
+ try:
+ resource = _describe_cluster(redshift, identifier)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json(e, msg="Couldn't modify redshift cluster %s " % identifier)
+
+ if _ensure_tags(redshift, identifier, resource['Tags'], module):
+ resource = redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0]
+
+ return(True, _collect_facts(resource))
+
+
+def main():
+ argument_spec = dict(
+ command=dict(choices=['create', 'facts', 'delete', 'modify'], required=True),
+ identifier=dict(required=True),
+ node_type=dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge',
+ 'ds2.8xlarge', 'dc1.large', 'dc2.large',
+ 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge',
+ 'dw2.large', 'dw2.8xlarge'], required=False),
+ username=dict(required=False),
+ password=dict(no_log=True, required=False),
+ db_name=dict(required=False),
+ cluster_type=dict(choices=['multi-node', 'single-node'], default='single-node'),
+ cluster_security_groups=dict(aliases=['security_groups'], type='list', elements='str'),
+ vpc_security_group_ids=dict(aliases=['vpc_security_groups'], type='list', elements='str'),
+ skip_final_cluster_snapshot=dict(aliases=['skip_final_snapshot'],
+ type='bool', default=False),
+ final_cluster_snapshot_identifier=dict(aliases=['final_snapshot_id'], required=False),
+ cluster_subnet_group_name=dict(aliases=['subnet']),
+ availability_zone=dict(aliases=['aws_zone', 'zone']),
+ preferred_maintenance_window=dict(aliases=['maintance_window', 'maint_window']),
+ cluster_parameter_group_name=dict(aliases=['param_group_name']),
+ automated_snapshot_retention_period=dict(aliases=['retention_period'], type='int'),
+ port=dict(type='int'),
+ cluster_version=dict(aliases=['version'], choices=['1.0']),
+ allow_version_upgrade=dict(aliases=['version_upgrade'], type='bool', default=True),
+ number_of_nodes=dict(type='int'),
+ publicly_accessible=dict(type='bool', default=False),
+ encrypted=dict(type='bool', default=False),
+ elastic_ip=dict(required=False),
+ new_cluster_identifier=dict(aliases=['new_identifier']),
+ enhanced_vpc_routing=dict(type='bool', default=False),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True)
+ )
+
+ required_if = [
+ ('command', 'delete', ['skip_final_cluster_snapshot']),
+ ('command', 'create', ['node_type',
+ 'username',
+ 'password'])
+ ]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=required_if
+ )
+
+ command = module.params.get('command')
+ skip_final_cluster_snapshot = module.params.get('skip_final_cluster_snapshot')
+ final_cluster_snapshot_identifier = module.params.get('final_cluster_snapshot_identifier')
+ # can't use module basic required_if check for this case
+ if command == 'delete' and skip_final_cluster_snapshot is False and final_cluster_snapshot_identifier is None:
+ module.fail_json(msg="Need to specify final_cluster_snapshot_identifier if skip_final_cluster_snapshot is False")
+
+ conn = module.client('redshift')
+
+ changed = True
+ if command == 'create':
+ (changed, cluster) = create_cluster(module, conn)
+
+ elif command == 'facts':
+ (changed, cluster) = describe_cluster(module, conn)
+
+ elif command == 'delete':
+ (changed, cluster) = delete_cluster(module, conn)
+
+ elif command == 'modify':
+ (changed, cluster) = modify_cluster(module, conn)
+
+ module.exit_json(changed=changed, cluster=cluster)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/redshift_cross_region_snapshots.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/redshift_cross_region_snapshots.py
new file mode 100644
index 00000000..fbcf5543
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/redshift_cross_region_snapshots.py
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, JR Kerkstra <jrkerkstra@example.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: redshift_cross_region_snapshots
+version_added: 1.0.0
+short_description: Manage Redshift Cross Region Snapshots
+description:
+ - Manage Redshift Cross Region Snapshots. Supports KMS-Encrypted Snapshots.
+ - For more information, see U(https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html#cross-region-snapshot-copy)
+author: JR Kerkstra (@captainkerk)
+options:
+ cluster_name:
+ description:
+ - The name of the cluster to configure cross-region snapshots for.
+ required: true
+ aliases: [ "cluster" ]
+ type: str
+ state:
+ description:
+ - Create or remove the cross-region snapshot configuration.
+ choices: [ "present", "absent" ]
+ default: present
+ type: str
+ region:
+ description:
+ - "The cluster's region."
+ required: true
+ aliases: [ "source" ]
+ type: str
+ destination_region:
+ description:
+ - The region to copy snapshots to.
+ required: true
+ aliases: [ "destination" ]
+ type: str
+ snapshot_copy_grant:
+ description:
+ - A grant for Amazon Redshift to use a master key in the I(destination_region).
+ - See U(http://boto3.readthedocs.io/en/latest/reference/services/redshift.html#Redshift.Client.create_snapshot_copy_grant)
+ aliases: [ "copy_grant" ]
+ type: str
+ snapshot_retention_period:
+ description:
+ - The number of days to keep cross-region snapshots for.
+ required: true
+ aliases: [ "retention_period" ]
+ type: int
+requirements: [ "botocore", "boto3" ]
+extends_documentation_fragment:
+- amazon.aws.ec2
+- amazon.aws.aws
+
+'''
+
+EXAMPLES = '''
+- name: configure cross-region snapshot on cluster `johniscool`
+ community.aws.redshift_cross_region_snapshots:
+ cluster_name: johniscool
+ state: present
+ region: us-east-1
+ destination_region: us-west-2
+ retention_period: 1
+
+- name: configure cross-region snapshot on kms-encrypted cluster
+ community.aws.redshift_cross_region_snapshots:
+ cluster_name: whatever
+ state: present
+ region: us-east-1
+ destination: us-west-2
+ copy_grant: 'my-grant-in-destination'
+ retention_period: 10
+
+- name: disable cross-region snapshots, necessary before most cluster modifications (rename, resize)
+ community.aws.redshift_cross_region_snapshots:
+ cluster_name: whatever
+ state: absent
+ region: us-east-1
+ destination_region: us-west-2
+'''
+
+RETURN = ''' # '''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+class SnapshotController(object):
+
+ def __init__(self, client, cluster_name):
+ self.client = client
+ self.cluster_name = cluster_name
+
+ def get_cluster_snapshot_copy_status(self):
+ response = self.client.describe_clusters(
+ ClusterIdentifier=self.cluster_name
+ )
+ return response['Clusters'][0].get('ClusterSnapshotCopyStatus')
+
+ def enable_snapshot_copy(self, destination_region, grant_name, retention_period):
+ if grant_name:
+ self.client.enable_snapshot_copy(
+ ClusterIdentifier=self.cluster_name,
+ DestinationRegion=destination_region,
+ RetentionPeriod=retention_period,
+ SnapshotCopyGrantName=grant_name,
+ )
+ else:
+ self.client.enable_snapshot_copy(
+ ClusterIdentifier=self.cluster_name,
+ DestinationRegion=destination_region,
+ RetentionPeriod=retention_period,
+ )
+
+ def disable_snapshot_copy(self):
+ self.client.disable_snapshot_copy(
+ ClusterIdentifier=self.cluster_name
+ )
+
+ def modify_snapshot_copy_retention_period(self, retention_period):
+ self.client.modify_snapshot_copy_retention_period(
+ ClusterIdentifier=self.cluster_name,
+ RetentionPeriod=retention_period
+ )
+
+
+def requesting_unsupported_modifications(actual, requested):
+ if (actual['SnapshotCopyGrantName'] != requested['snapshot_copy_grant'] or
+ actual['DestinationRegion'] != requested['destination_region']):
+ return True
+ return False
+
+
+def needs_update(actual, requested):
+ if actual['RetentionPeriod'] != requested['snapshot_retention_period']:
+ return True
+ return False
+
+
+def run_module():
+ argument_spec = dict(
+ cluster_name=dict(type='str', required=True, aliases=['cluster']),
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ region=dict(type='str', required=True, aliases=['source']),
+ destination_region=dict(type='str', required=True, aliases=['destination']),
+ snapshot_copy_grant=dict(type='str', aliases=['copy_grant']),
+ snapshot_retention_period=dict(type='int', required=True, aliases=['retention_period']),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ result = dict(
+ changed=False,
+ message=''
+ )
+ connection = module.client('redshift')
+
+ snapshot_controller = SnapshotController(client=connection,
+ cluster_name=module.params.get('cluster_name'))
+
+ current_config = snapshot_controller.get_cluster_snapshot_copy_status()
+ if current_config is not None:
+ if module.params.get('state') == 'present':
+ if requesting_unsupported_modifications(current_config, module.params):
+ message = 'Cannot modify destination_region or grant_name. ' \
+ 'Please disable cross-region snapshots, and re-run.'
+ module.fail_json(msg=message, **result)
+ if needs_update(current_config, module.params):
+ result['changed'] = True
+ if not module.check_mode:
+ snapshot_controller.modify_snapshot_copy_retention_period(
+ module.params.get('snapshot_retention_period')
+ )
+ else:
+ result['changed'] = True
+ if not module.check_mode:
+ snapshot_controller.disable_snapshot_copy()
+ else:
+ if module.params.get('state') == 'present':
+ result['changed'] = True
+ if not module.check_mode:
+ snapshot_controller.enable_snapshot_copy(module.params.get('destination_region'),
+ module.params.get('snapshot_copy_grant'),
+ module.params.get('snapshot_retention_period'))
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/redshift_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/redshift_facts.py
new file mode 100644
index 00000000..679f53c5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/redshift_facts.py
@@ -0,0 +1,351 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: redshift_info
+version_added: 1.0.0
+author: "Jens Carl (@j-carl)"
+short_description: Gather information about Redshift cluster(s)
+description:
+ - Gather information about Redshift cluster(s).
+ - This module was called C(redshift_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+options:
+ cluster_identifier:
+ description:
+ - The prefix of cluster identifier of the Redshift cluster you are searching for.
+ - "This is a regular expression match with implicit '^'. Append '$' for a complete match."
+ required: false
+ aliases: ['name', 'identifier']
+ type: str
+ tags:
+ description:
+ - "A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' }
+ to match against the security group(s) you are searching for."
+ required: false
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.ec2
+- amazon.aws.aws
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do net set authentication details, see the AWS guide for details.
+
+- name: Find all clusters
+ community.aws.redshift_info:
+ register: redshift
+
+- name: Find cluster(s) with matching tags
+ community.aws.redshift_info:
+ tags:
+ env: prd
+ stack: monitoring
+ register: redshift_tags
+
+- name: Find cluster(s) with matching name/prefix and tags
+ community.aws.redshift_info:
+ tags:
+ env: dev
+ stack: web
+ name: user-
+ register: redshift_web
+
+- name: Fail if no cluster(s) is/are found
+ community.aws.redshift_info:
+ tags:
+ env: stg
+ stack: db
+ register: redshift_user
+ failed_when: "{{ redshift_user.results | length == 0 }}"
+'''
+
+RETURN = '''
+# For more information see U(http://boto3.readthedocs.io/en/latest/reference/services/redshift.html#Redshift.Client.describe_clusters)
+---
+cluster_identifier:
+ description: Unique key to identify the cluster.
+ returned: success
+ type: str
+ sample: "redshift-identifier"
+node_type:
+ description: The node type for nodes in the cluster.
+ returned: success
+ type: str
+ sample: "ds2.xlarge"
+cluster_status:
+ description: Current state of the cluster.
+ returned: success
+ type: str
+ sample: "available"
+modify_status:
+ description: The status of a modify operation.
+ returned: optional
+ type: str
+ sample: ""
+master_username:
+ description: The master user name for the cluster.
+ returned: success
+ type: str
+ sample: "admin"
+db_name:
+ description: The name of the initial database that was created when the cluster was created.
+ returned: success
+ type: str
+ sample: "dev"
+endpoint:
+ description: The connection endpoint.
+ returned: success
+ type: str
+ sample: {
+ "address": "cluster-ds2.ocmugla0rf.us-east-1.redshift.amazonaws.com",
+ "port": 5439
+ }
+cluster_create_time:
+ description: The date and time that the cluster was created.
+ returned: success
+ type: str
+ sample: "2016-05-10T08:33:16.629000+00:00"
+automated_snapshot_retention_period:
+ description: The number of days that automatic cluster snapshots are retained.
+ returned: success
+ type: int
+ sample: 1
+cluster_security_groups:
+ description: A list of cluster security groups that are associated with the cluster.
+ returned: success
+ type: list
+ sample: []
+vpc_security_groups:
+ description: A list of VPC security groups the are associated with the cluster.
+ returned: success
+ type: list
+ sample: [
+ {
+ "status": "active",
+ "vpc_security_group_id": "sg-12cghhg"
+ }
+ ]
+cluster_paramater_groups:
+ description: The list of cluster parameters that are associated with this cluster.
+ returned: success
+ type: list
+ sample: [
+ {
+ "cluster_parameter_status_list": [
+ {
+ "parameter_apply_status": "in-sync",
+ "parameter_name": "statement_timeout"
+ },
+ {
+ "parameter_apply_status": "in-sync",
+ "parameter_name": "require_ssl"
+ }
+ ],
+ "parameter_apply_status": "in-sync",
+ "parameter_group_name": "tuba"
+ }
+ ]
+cluster_subnet_group_name:
+ description: The name of the subnet group that is associated with the cluster.
+ returned: success
+ type: str
+ sample: "redshift-subnet"
+vpc_id:
+ description: The identifier of the VPC the cluster is in, if the cluster is in a VPC.
+ returned: success
+ type: str
+ sample: "vpc-1234567"
+availability_zone:
+ description: The name of the Availability Zone in which the cluster is located.
+ returned: success
+ type: str
+ sample: "us-east-1b"
+preferred_maintenance_window:
+ description: The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur.
+ returned: success
+ type: str
+ sample: "tue:07:30-tue:08:00"
+pending_modified_values:
+ description: A value that, if present, indicates that changes to the cluster are pending.
+ returned: success
+ type: dict
+ sample: {}
+cluster_version:
+ description: The version ID of the Amazon Redshift engine that is running on the cluster.
+ returned: success
+ type: str
+ sample: "1.0"
+allow_version_upgrade:
+ description: >
+ A Boolean value that, if true, indicates that major version upgrades will be applied
+ automatically to the cluster during the maintenance window.
+ returned: success
+ type: bool
+ sample: true|false
+number_of_nodes:
+ description: The number of compute nodes in the cluster.
+ returned: success
+ type: int
+ sample: 12
+publicly_accessible:
+ description: A Boolean value that, if true , indicates that the cluster can be accessed from a public network.
+ returned: success
+ type: bool
+ sample: true|false
+encrypted:
+ description: Boolean value that, if true , indicates that data in the cluster is encrypted at rest.
+ returned: success
+ type: bool
+ sample: true|false
+restore_status:
+ description: A value that describes the status of a cluster restore action.
+ returned: success
+ type: dict
+ sample: {}
+hsm_status:
+ description: >
+ A value that reports whether the Amazon Redshift cluster has finished applying any hardware
+ security module (HSM) settings changes specified in a modify cluster command.
+ returned: success
+ type: dict
+ sample: {}
+cluster_snapshot_copy_status:
+ description: A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
+ returned: success
+ type: dict
+ sample: {}
+cluster_public_keys:
+ description: The public key for the cluster.
+ returned: success
+ type: str
+ sample: "ssh-rsa anjigfam Amazon-Redshift\n"
+cluster_nodes:
+ description: The nodes in the cluster.
+ returned: success
+ type: list
+ sample: [
+ {
+ "node_role": "LEADER",
+ "private_ip_address": "10.0.0.1",
+ "public_ip_address": "x.x.x.x"
+ },
+ {
+ "node_role": "COMPUTE-1",
+ "private_ip_address": "10.0.0.3",
+ "public_ip_address": "x.x.x.x"
+ }
+ ]
+elastic_ip_status:
+ description: The status of the elastic IP (EIP) address.
+ returned: success
+ type: dict
+ sample: {}
+cluster_revision_number:
+ description: The specific revision number of the database in the cluster.
+ returned: success
+ type: str
+ sample: "1231"
+tags:
+ description: The list of tags for the cluster.
+ returned: success
+ type: list
+ sample: []
+kms_key_id:
+ description: The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
+ returned: success
+ type: str
+ sample: ""
+enhanced_vpc_routing:
+ description: An option that specifies whether to create the cluster with enhanced VPC routing enabled.
+ returned: success
+ type: bool
+ sample: true|false
+iam_roles:
+ description: List of IAM roles attached to the cluster.
+ returned: success
+ type: list
+ sample: []
+'''
+
+import re
+
+try:
+ from botocore.exception import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def match_tags(tags_to_match, cluster):
+ for key, value in tags_to_match.items():
+ for tag in cluster['Tags']:
+ if key == tag['Key'] and value == tag['Value']:
+ return True
+
+ return False
+
+
+def find_clusters(conn, module, identifier=None, tags=None):
+
+ try:
+ cluster_paginator = conn.get_paginator('describe_clusters')
+ clusters = cluster_paginator.paginate().build_full_result()
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to fetch clusters.')
+
+ matched_clusters = []
+
+ if identifier is not None:
+ identifier_prog = re.compile('^' + identifier)
+
+ for cluster in clusters['Clusters']:
+
+ matched_identifier = True
+ if identifier:
+ matched_identifier = identifier_prog.search(cluster['ClusterIdentifier'])
+
+ matched_tags = True
+ if tags:
+ matched_tags = match_tags(tags, cluster)
+
+ if matched_identifier and matched_tags:
+ matched_clusters.append(camel_dict_to_snake_dict(cluster))
+
+ return matched_clusters
+
+
+def main():
+
+ argument_spec = dict(
+ cluster_identifier=dict(type='str', aliases=['identifier', 'name']),
+ tags=dict(type='dict')
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+ if module._name == 'redshift_facts':
+ module.deprecate("The 'redshift_facts' module has been renamed to 'redshift_info'", date='2021-12-01', collection_name='community.aws')
+
+ cluster_identifier = module.params.get('cluster_identifier')
+ cluster_tags = module.params.get('tags')
+
+ redshift = module.client('redshift')
+
+ results = find_clusters(redshift, module, identifier=cluster_identifier, tags=cluster_tags)
+ module.exit_json(results=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/redshift_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/redshift_info.py
new file mode 100644
index 00000000..679f53c5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/redshift_info.py
@@ -0,0 +1,351 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: redshift_info
+version_added: 1.0.0
+author: "Jens Carl (@j-carl)"
+short_description: Gather information about Redshift cluster(s)
+description:
+ - Gather information about Redshift cluster(s).
+ - This module was called C(redshift_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+options:
+ cluster_identifier:
+ description:
+ - The prefix of cluster identifier of the Redshift cluster you are searching for.
+ - "This is a regular expression match with implicit '^'. Append '$' for a complete match."
+ required: false
+ aliases: ['name', 'identifier']
+ type: str
+ tags:
+ description:
+ - "A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' }
+ to match against the security group(s) you are searching for."
+ required: false
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.ec2
+- amazon.aws.aws
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do net set authentication details, see the AWS guide for details.
+
+- name: Find all clusters
+ community.aws.redshift_info:
+ register: redshift
+
+- name: Find cluster(s) with matching tags
+ community.aws.redshift_info:
+ tags:
+ env: prd
+ stack: monitoring
+ register: redshift_tags
+
+- name: Find cluster(s) with matching name/prefix and tags
+ community.aws.redshift_info:
+ tags:
+ env: dev
+ stack: web
+ name: user-
+ register: redshift_web
+
+- name: Fail if no cluster(s) is/are found
+ community.aws.redshift_info:
+ tags:
+ env: stg
+ stack: db
+ register: redshift_user
+ failed_when: "{{ redshift_user.results | length == 0 }}"
+'''
+
+RETURN = '''
+# For more information see U(http://boto3.readthedocs.io/en/latest/reference/services/redshift.html#Redshift.Client.describe_clusters)
+---
+cluster_identifier:
+ description: Unique key to identify the cluster.
+ returned: success
+ type: str
+ sample: "redshift-identifier"
+node_type:
+ description: The node type for nodes in the cluster.
+ returned: success
+ type: str
+ sample: "ds2.xlarge"
+cluster_status:
+ description: Current state of the cluster.
+ returned: success
+ type: str
+ sample: "available"
+modify_status:
+ description: The status of a modify operation.
+ returned: optional
+ type: str
+ sample: ""
+master_username:
+ description: The master user name for the cluster.
+ returned: success
+ type: str
+ sample: "admin"
+db_name:
+ description: The name of the initial database that was created when the cluster was created.
+ returned: success
+ type: str
+ sample: "dev"
+endpoint:
+ description: The connection endpoint.
+ returned: success
+ type: str
+ sample: {
+ "address": "cluster-ds2.ocmugla0rf.us-east-1.redshift.amazonaws.com",
+ "port": 5439
+ }
+cluster_create_time:
+ description: The date and time that the cluster was created.
+ returned: success
+ type: str
+ sample: "2016-05-10T08:33:16.629000+00:00"
+automated_snapshot_retention_period:
+ description: The number of days that automatic cluster snapshots are retained.
+ returned: success
+ type: int
+ sample: 1
+cluster_security_groups:
+ description: A list of cluster security groups that are associated with the cluster.
+ returned: success
+ type: list
+ sample: []
+vpc_security_groups:
+ description: A list of VPC security groups the are associated with the cluster.
+ returned: success
+ type: list
+ sample: [
+ {
+ "status": "active",
+ "vpc_security_group_id": "sg-12cghhg"
+ }
+ ]
+cluster_paramater_groups:
+ description: The list of cluster parameters that are associated with this cluster.
+ returned: success
+ type: list
+ sample: [
+ {
+ "cluster_parameter_status_list": [
+ {
+ "parameter_apply_status": "in-sync",
+ "parameter_name": "statement_timeout"
+ },
+ {
+ "parameter_apply_status": "in-sync",
+ "parameter_name": "require_ssl"
+ }
+ ],
+ "parameter_apply_status": "in-sync",
+ "parameter_group_name": "tuba"
+ }
+ ]
+cluster_subnet_group_name:
+ description: The name of the subnet group that is associated with the cluster.
+ returned: success
+ type: str
+ sample: "redshift-subnet"
+vpc_id:
+ description: The identifier of the VPC the cluster is in, if the cluster is in a VPC.
+ returned: success
+ type: str
+ sample: "vpc-1234567"
+availability_zone:
+ description: The name of the Availability Zone in which the cluster is located.
+ returned: success
+ type: str
+ sample: "us-east-1b"
+preferred_maintenance_window:
+ description: The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur.
+ returned: success
+ type: str
+ sample: "tue:07:30-tue:08:00"
+pending_modified_values:
+ description: A value that, if present, indicates that changes to the cluster are pending.
+ returned: success
+ type: dict
+ sample: {}
+cluster_version:
+ description: The version ID of the Amazon Redshift engine that is running on the cluster.
+ returned: success
+ type: str
+ sample: "1.0"
+allow_version_upgrade:
+ description: >
+ A Boolean value that, if true, indicates that major version upgrades will be applied
+ automatically to the cluster during the maintenance window.
+ returned: success
+ type: bool
+ sample: true|false
+number_of_nodes:
+ description: The number of compute nodes in the cluster.
+ returned: success
+ type: int
+ sample: 12
+publicly_accessible:
+ description: A Boolean value that, if true , indicates that the cluster can be accessed from a public network.
+ returned: success
+ type: bool
+ sample: true|false
+encrypted:
+ description: Boolean value that, if true , indicates that data in the cluster is encrypted at rest.
+ returned: success
+ type: bool
+ sample: true|false
+restore_status:
+ description: A value that describes the status of a cluster restore action.
+ returned: success
+ type: dict
+ sample: {}
+hsm_status:
+ description: >
+ A value that reports whether the Amazon Redshift cluster has finished applying any hardware
+ security module (HSM) settings changes specified in a modify cluster command.
+ returned: success
+ type: dict
+ sample: {}
+cluster_snapshot_copy_status:
+ description: A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
+ returned: success
+ type: dict
+ sample: {}
+cluster_public_keys:
+ description: The public key for the cluster.
+ returned: success
+ type: str
+ sample: "ssh-rsa anjigfam Amazon-Redshift\n"
+cluster_nodes:
+ description: The nodes in the cluster.
+ returned: success
+ type: list
+ sample: [
+ {
+ "node_role": "LEADER",
+ "private_ip_address": "10.0.0.1",
+ "public_ip_address": "x.x.x.x"
+ },
+ {
+ "node_role": "COMPUTE-1",
+ "private_ip_address": "10.0.0.3",
+ "public_ip_address": "x.x.x.x"
+ }
+ ]
+elastic_ip_status:
+ description: The status of the elastic IP (EIP) address.
+ returned: success
+ type: dict
+ sample: {}
+cluster_revision_number:
+ description: The specific revision number of the database in the cluster.
+ returned: success
+ type: str
+ sample: "1231"
+tags:
+ description: The list of tags for the cluster.
+ returned: success
+ type: list
+ sample: []
+kms_key_id:
+ description: The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
+ returned: success
+ type: str
+ sample: ""
+enhanced_vpc_routing:
+ description: An option that specifies whether to create the cluster with enhanced VPC routing enabled.
+ returned: success
+ type: bool
+ sample: true|false
+iam_roles:
+ description: List of IAM roles attached to the cluster.
+ returned: success
+ type: list
+ sample: []
+'''
+
+import re
+
+try:
+ from botocore.exception import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def match_tags(tags_to_match, cluster):
+ for key, value in tags_to_match.items():
+ for tag in cluster['Tags']:
+ if key == tag['Key'] and value == tag['Value']:
+ return True
+
+ return False
+
+
+def find_clusters(conn, module, identifier=None, tags=None):
+
+ try:
+ cluster_paginator = conn.get_paginator('describe_clusters')
+ clusters = cluster_paginator.paginate().build_full_result()
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to fetch clusters.')
+
+ matched_clusters = []
+
+ if identifier is not None:
+ identifier_prog = re.compile('^' + identifier)
+
+ for cluster in clusters['Clusters']:
+
+ matched_identifier = True
+ if identifier:
+ matched_identifier = identifier_prog.search(cluster['ClusterIdentifier'])
+
+ matched_tags = True
+ if tags:
+ matched_tags = match_tags(tags, cluster)
+
+ if matched_identifier and matched_tags:
+ matched_clusters.append(camel_dict_to_snake_dict(cluster))
+
+ return matched_clusters
+
+
+def main():
+
+ argument_spec = dict(
+ cluster_identifier=dict(type='str', aliases=['identifier', 'name']),
+ tags=dict(type='dict')
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+ if module._name == 'redshift_facts':
+ module.deprecate("The 'redshift_facts' module has been renamed to 'redshift_info'", date='2021-12-01', collection_name='community.aws')
+
+ cluster_identifier = module.params.get('cluster_identifier')
+ cluster_tags = module.params.get('tags')
+
+ redshift = module.client('redshift')
+
+ results = find_clusters(redshift, module, identifier=cluster_identifier, tags=cluster_tags)
+ module.exit_json(results=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/redshift_subnet_group.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/redshift_subnet_group.py
new file mode 100644
index 00000000..be42fa2d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/redshift_subnet_group.py
@@ -0,0 +1,177 @@
+#!/usr/bin/python
+
+# Copyright 2014 Jens Carl, Hothead Games Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author:
+ - "Jens Carl (@j-carl), Hothead Games Inc."
+module: redshift_subnet_group
+version_added: 1.0.0
+short_description: manage Redshift cluster subnet groups
+description:
+ - Create, modifies, and deletes Redshift cluster subnet groups.
+options:
+ state:
+ description:
+ - Specifies whether the subnet should be present or absent.
+ required: true
+ choices: ['present', 'absent' ]
+ type: str
+ group_name:
+ description:
+ - Cluster subnet group name.
+ required: true
+ aliases: ['name']
+ type: str
+ group_description:
+ description:
+ - Database subnet group description.
+ aliases: ['description']
+ type: str
+ group_subnets:
+ description:
+ - List of subnet IDs that make up the cluster subnet group.
+ aliases: ['subnets']
+ type: list
+ elements: str
+requirements: [ 'boto' ]
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+- name: Create a Redshift subnet group
+ community.aws.redshift_subnet_group:
+ state: present
+ group_name: redshift-subnet
+ group_description: Redshift subnet
+ group_subnets:
+ - 'subnet-aaaaa'
+ - 'subnet-bbbbb'
+
+- name: Remove subnet group
+ community.aws.redshift_subnet_group:
+ state: absent
+ group_name: redshift-subnet
+'''
+
+RETURN = r'''
+group:
+ description: dictionary containing all Redshift subnet group information
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: name of the Redshift subnet group
+ returned: success
+ type: str
+ sample: "redshift_subnet_group_name"
+ vpc_id:
+ description: Id of the VPC where the subnet is located
+ returned: success
+ type: str
+ sample: "vpc-aabb1122"
+'''
+
+try:
+ import boto
+ import boto.redshift
+except ImportError:
+ pass # Handled by HAS_BOTO
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+
+
+def main():
+ argument_spec = dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ group_name=dict(required=True, aliases=['name']),
+ group_description=dict(required=False, aliases=['description']),
+ group_subnets=dict(required=False, aliases=['subnets'], type='list', elements='str'),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, check_boto3=False)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto v2.9.0+ required for this module')
+
+ state = module.params.get('state')
+ group_name = module.params.get('group_name')
+ group_description = module.params.get('group_description')
+ group_subnets = module.params.get('group_subnets')
+
+ if state == 'present':
+ for required in ('group_name', 'group_description', 'group_subnets'):
+ if not module.params.get(required):
+ module.fail_json(msg=str("parameter %s required for state='present'" % required))
+ else:
+ for not_allowed in ('group_description', 'group_subnets'):
+ if module.params.get(not_allowed):
+ module.fail_json(msg=str("parameter %s not allowed for state='absent'" % not_allowed))
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+ if not region:
+ module.fail_json(msg=str("Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file"))
+
+ # Connect to the Redshift endpoint.
+ try:
+ conn = connect_to_aws(boto.redshift, region, **aws_connect_params)
+ except boto.exception.JSONResponseError as e:
+ module.fail_json(msg=str(e))
+
+ try:
+ changed = False
+ exists = False
+ group = None
+
+ try:
+ matching_groups = conn.describe_cluster_subnet_groups(group_name, max_records=100)
+ exists = len(matching_groups) > 0
+ except boto.exception.JSONResponseError as e:
+ if e.body['Error']['Code'] != 'ClusterSubnetGroupNotFoundFault':
+ # if e.code != 'ClusterSubnetGroupNotFoundFault':
+ module.fail_json(msg=str(e))
+
+ if state == 'absent':
+ if exists:
+ conn.delete_cluster_subnet_group(group_name)
+ changed = True
+
+ else:
+ if not exists:
+ new_group = conn.create_cluster_subnet_group(group_name, group_description, group_subnets)
+ group = {
+ 'name': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult']
+ ['ClusterSubnetGroup']['ClusterSubnetGroupName'],
+ 'vpc_id': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult']
+ ['ClusterSubnetGroup']['VpcId'],
+ }
+ else:
+ changed_group = conn.modify_cluster_subnet_group(group_name, group_subnets, description=group_description)
+ group = {
+ 'name': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult']
+ ['ClusterSubnetGroup']['ClusterSubnetGroupName'],
+ 'vpc_id': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult']
+ ['ClusterSubnetGroup']['VpcId'],
+ }
+
+ changed = True
+
+ except boto.exception.JSONResponseError as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=changed, group=group)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/route53.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/route53.py
new file mode 100644
index 00000000..6caf3850
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/route53.py
@@ -0,0 +1,708 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: route53
+version_added: 1.0.0
+short_description: add or delete entries in Amazons Route 53 DNS service
+description:
+ - Creates and deletes DNS records in Amazons Route 53 service.
+options:
+ state:
+ description:
+ - Specifies the state of the resource record. As of Ansible 2.4, the I(command) option has been changed
+ to I(state) as default and the choices C(present) and C(absent) have been added, but I(command) still works as well.
+ required: true
+ aliases: [ 'command' ]
+ choices: [ 'present', 'absent', 'get', 'create', 'delete' ]
+ type: str
+ zone:
+ description:
+ - The DNS zone to modify.
+ - This is a required parameter, if parameter I(hosted_zone_id) is not supplied.
+ type: str
+ hosted_zone_id:
+ description:
+ - The Hosted Zone ID of the DNS zone to modify.
+ - This is a required parameter, if parameter I(zone) is not supplied.
+ type: str
+ record:
+ description:
+ - The full DNS record to create or delete.
+ required: true
+ type: str
+ ttl:
+ description:
+ - The TTL, in second, to give the new record.
+ default: 3600
+ type: int
+ type:
+ description:
+ - The type of DNS record to create.
+ required: true
+ choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS', 'SOA' ]
+ type: str
+ alias:
+ description:
+ - Indicates if this is an alias record.
+ - Defaults to C(false).
+ type: bool
+ alias_hosted_zone_id:
+ description:
+ - The hosted zone identifier.
+ type: str
+ alias_evaluate_target_health:
+ description:
+ - Whether or not to evaluate an alias target health. Useful for aliases to Elastic Load Balancers.
+ type: bool
+ default: false
+ value:
+ description:
+ - The new value when creating a DNS record. YAML lists or multiple comma-spaced values are allowed for non-alias records.
+ - When deleting a record all values for the record must be specified or Route 53 will not delete it.
+ type: list
+ elements: str
+ overwrite:
+ description:
+ - Whether an existing record should be overwritten on create if values do not match.
+ type: bool
+ retry_interval:
+ description:
+ - In the case that Route 53 is still servicing a prior request, this module will wait and try again after this many seconds.
+ If you have many domain names, the default of C(500) seconds may be too long.
+ default: 500
+ type: int
+ private_zone:
+ description:
+ - If set to C(true), the private zone matching the requested name within the domain will be used if there are both public and private zones.
+ - The default is to use the public zone.
+ type: bool
+ default: false
+ identifier:
+ description:
+ - Have to be specified for Weighted, latency-based and failover resource record sets only.
+ An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type.
+ type: str
+ weight:
+ description:
+ - Weighted resource record sets only. Among resource record sets that
+ have the same combination of DNS name and type, a value that
+ determines what portion of traffic for the current resource record set
+ is routed to the associated location.
+ type: int
+ region:
+ description:
+ - Latency-based resource record sets only Among resource record sets
+ that have the same combination of DNS name and type, a value that
+ determines which region this should be associated with for the
+ latency-based routing
+ type: str
+ health_check:
+ description:
+ - Health check to associate with this record
+ type: str
+ failover:
+ description:
+ - Failover resource record sets only. Whether this is the primary or
+ secondary resource record set. Allowed values are PRIMARY and SECONDARY
+ type: str
+ choices: ['SECONDARY', 'PRIMARY']
+ vpc_id:
+ description:
+ - "When used in conjunction with private_zone: true, this will only modify records in the private hosted zone attached to this VPC."
+ - This allows you to have multiple private hosted zones, all with the same name, attached to different VPCs.
+ type: str
+ wait:
+ description:
+ - Wait until the changes have been replicated to all Amazon Route 53 DNS servers.
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - How long to wait for the changes to be replicated, in seconds.
+ default: 300
+ type: int
+author:
+- Bruce Pennypacker (@bpennypacker)
+- Mike Buzzetti (@jimbydamonk)
+extends_documentation_fragment:
+- amazon.aws.aws
+
+'''
+
+RETURN = r'''
+nameservers:
+ description: Nameservers associated with the zone.
+ returned: when state is 'get'
+ type: list
+ sample:
+ - ns-1036.awsdns-00.org.
+ - ns-516.awsdns-00.net.
+ - ns-1504.awsdns-00.co.uk.
+ - ns-1.awsdns-00.com.
+set:
+ description: Info specific to the resource record.
+ returned: when state is 'get'
+ type: complex
+ contains:
+ alias:
+ description: Whether this is an alias.
+ returned: always
+ type: bool
+ sample: false
+ failover:
+ description: Whether this is the primary or secondary resource record set.
+ returned: always
+ type: str
+ sample: PRIMARY
+ health_check:
+ description: health_check associated with this record.
+ returned: always
+ type: str
+ identifier:
+ description: An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type.
+ returned: always
+ type: str
+ record:
+ description: Domain name for the record set.
+ returned: always
+ type: str
+ sample: new.foo.com.
+ region:
+ description: Which region this should be associated with for latency-based routing.
+ returned: always
+ type: str
+ sample: us-west-2
+ ttl:
+ description: Resource record cache TTL.
+ returned: always
+ type: str
+ sample: '3600'
+ type:
+ description: Resource record set type.
+ returned: always
+ type: str
+ sample: A
+ value:
+ description: Record value.
+ returned: always
+ type: str
+ sample: 52.43.18.27
+ values:
+ description: Record Values.
+ returned: always
+ type: list
+ sample:
+ - 52.43.18.27
+ weight:
+ description: Weight of the record.
+ returned: always
+ type: str
+ sample: '3'
+ zone:
+ description: Zone this record set belongs to.
+ returned: always
+ type: str
+ sample: foo.bar.com.
+'''
+
+EXAMPLES = r'''
+- name: Add new.foo.com as an A record with 3 IPs and wait until the changes have been replicated
+ community.aws.route53:
+ state: present
+ zone: foo.com
+ record: new.foo.com
+ type: A
+ ttl: 7200
+ value: 1.1.1.1,2.2.2.2,3.3.3.3
+ wait: yes
+
+- name: Update new.foo.com as an A record with a list of 3 IPs and wait until the changes have been replicated
+ community.aws.route53:
+ state: present
+ zone: foo.com
+ record: new.foo.com
+ type: A
+ ttl: 7200
+ value:
+ - 1.1.1.1
+ - 2.2.2.2
+ - 3.3.3.3
+ wait: yes
+
+- name: Retrieve the details for new.foo.com
+ community.aws.route53:
+ state: get
+ zone: foo.com
+ record: new.foo.com
+ type: A
+ register: rec
+
+- name: Delete new.foo.com A record using the results from the get command
+ community.aws.route53:
+ state: absent
+ zone: foo.com
+ record: "{{ rec.set.record }}"
+ ttl: "{{ rec.set.ttl }}"
+ type: "{{ rec.set.type }}"
+ value: "{{ rec.set.value }}"
+
+# Add an AAAA record. Note that because there are colons in the value
+# that the IPv6 address must be quoted. Also shows using the old form command=create.
+- name: Add an AAAA record
+ community.aws.route53:
+ command: create
+ zone: foo.com
+ record: localhost.foo.com
+ type: AAAA
+ ttl: 7200
+ value: "::1"
+
+# For more information on SRV records see:
+# https://en.wikipedia.org/wiki/SRV_record
+- name: Add a SRV record with multiple fields for a service on port 22222
+ community.aws.route53:
+ state: present
+ zone: foo.com
+ record: "_example-service._tcp.foo.com"
+ type: SRV
+ value: "0 0 22222 host1.foo.com,0 0 22222 host2.foo.com"
+
+# Note that TXT and SPF records must be surrounded
+# by quotes when sent to Route 53:
+- name: Add a TXT record.
+ community.aws.route53:
+ state: present
+ zone: foo.com
+ record: localhost.foo.com
+ type: TXT
+ ttl: 7200
+ value: '"bar"'
+
+- name: Add an alias record that points to an Amazon ELB
+ community.aws.route53:
+ state: present
+ zone: foo.com
+ record: elb.foo.com
+ type: A
+ value: "{{ elb_dns_name }}"
+ alias: True
+ alias_hosted_zone_id: "{{ elb_zone_id }}"
+
+- name: Retrieve the details for elb.foo.com
+ community.aws.route53:
+ state: get
+ zone: foo.com
+ record: elb.foo.com
+ type: A
+ register: rec
+
+- name: Delete an alias record using the results from the get command
+ community.aws.route53:
+ state: absent
+ zone: foo.com
+ record: "{{ rec.set.record }}"
+ ttl: "{{ rec.set.ttl }}"
+ type: "{{ rec.set.type }}"
+ value: "{{ rec.set.value }}"
+ alias: True
+ alias_hosted_zone_id: "{{ rec.set.alias_hosted_zone_id }}"
+
+- name: Add an alias record that points to an Amazon ELB and evaluates it health
+ community.aws.route53:
+ state: present
+ zone: foo.com
+ record: elb.foo.com
+ type: A
+ value: "{{ elb_dns_name }}"
+ alias: True
+ alias_hosted_zone_id: "{{ elb_zone_id }}"
+ alias_evaluate_target_health: True
+
+- name: Add an AAAA record with Hosted Zone ID
+ community.aws.route53:
+ state: present
+ zone: foo.com
+ hosted_zone_id: Z2AABBCCDDEEFF
+ record: localhost.foo.com
+ type: AAAA
+ ttl: 7200
+ value: "::1"
+
+- name: Use a routing policy to distribute traffic
+ community.aws.route53:
+ state: present
+ zone: foo.com
+ record: www.foo.com
+ type: CNAME
+ value: host1.foo.com
+ ttl: 30
+ # Routing policy
+ identifier: "host1@www"
+ weight: 100
+ health_check: "d994b780-3150-49fd-9205-356abdd42e75"
+
+- name: Add a CAA record (RFC 6844)
+ community.aws.route53:
+ state: present
+ zone: example.com
+ record: example.com
+ type: CAA
+ value:
+ - 0 issue "ca.example.net"
+ - 0 issuewild ";"
+ - 0 iodef "mailto:security@example.com"
+
+'''
+
+import time
+import distutils.version
+
+try:
+ import boto
+ import boto.ec2
+ from boto.route53 import Route53Connection
+ from boto.route53.record import Record, ResourceRecordSets
+ from boto.route53.status import Status
+except ImportError:
+ pass # Handled by HAS_BOTO
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO
+
+
+MINIMUM_BOTO_VERSION = '2.28.0'
+WAIT_RETRY_SLEEP = 5 # how many seconds to wait between propagation status polls
+
+
+class TimeoutError(Exception):
+ pass
+
+
+def get_zone_id_by_name(conn, module, zone_name, want_private, want_vpc_id):
+ """Finds a zone by name or zone_id"""
+ for zone in invoke_with_throttling_retries(conn.get_zones):
+ # only save this zone id if the private status of the zone matches
+ # the private_zone_in boolean specified in the params
+ private_zone = module.boolean(zone.config.get('PrivateZone', False))
+ if private_zone == want_private and zone.name == zone_name:
+ if want_vpc_id:
+ # NOTE: These details aren't available in other boto methods, hence the necessary
+ # extra API call
+ hosted_zone = invoke_with_throttling_retries(conn.get_hosted_zone, zone.id)
+ zone_details = hosted_zone['GetHostedZoneResponse']
+ # this is to deal with this boto bug: https://github.com/boto/boto/pull/2882
+ if isinstance(zone_details['VPCs'], dict):
+ if zone_details['VPCs']['VPC']['VPCId'] == want_vpc_id:
+ return zone.id
+ else: # Forward compatibility for when boto fixes that bug
+ if want_vpc_id in [v['VPCId'] for v in zone_details['VPCs']]:
+ return zone.id
+ else:
+ return zone.id
+ return None
+
+
+def commit(changes, retry_interval, wait, wait_timeout):
+ """Commit changes, but retry PriorRequestNotComplete errors."""
+ result = None
+ retry = 10
+ while True:
+ try:
+ retry -= 1
+ result = changes.commit()
+ break
+ except boto.route53.exception.DNSServerError as e:
+ code = e.body.split("<Code>")[1]
+ code = code.split("</Code>")[0]
+ if code != 'PriorRequestNotComplete' or retry < 0:
+ raise e
+ time.sleep(float(retry_interval))
+
+ if wait:
+ timeout_time = time.time() + wait_timeout
+ connection = changes.connection
+ change = result['ChangeResourceRecordSetsResponse']['ChangeInfo']
+ status = Status(connection, change)
+ while status.status != 'INSYNC' and time.time() < timeout_time:
+ time.sleep(WAIT_RETRY_SLEEP)
+ status.update()
+ if time.time() >= timeout_time:
+ raise TimeoutError()
+ return result
+
+
+# Shamelessly copied over from https://git.io/vgmDG
+IGNORE_CODE = 'Throttling'
+MAX_RETRIES = 5
+
+
+def invoke_with_throttling_retries(function_ref, *argv, **kwargs):
+ retries = 0
+ while True:
+ try:
+ retval = function_ref(*argv, **kwargs)
+ return retval
+ except boto.exception.BotoServerError as e:
+ if e.code != IGNORE_CODE or retries == MAX_RETRIES:
+ raise e
+ time.sleep(5 * (2**retries))
+ retries += 1
+
+
+def decode_name(name):
+ # Due to a bug in either AWS or Boto, "special" characters are returned as octals, preventing round
+ # tripping of things like * and @.
+ return name.encode().decode('unicode_escape')
+
+
+def to_dict(rset, zone_in, zone_id):
+ record = dict()
+ record['zone'] = zone_in
+ record['type'] = rset.type
+ record['record'] = decode_name(rset.name)
+ record['ttl'] = str(rset.ttl)
+ record['identifier'] = rset.identifier
+ record['weight'] = rset.weight
+ record['region'] = rset.region
+ record['failover'] = rset.failover
+ record['health_check'] = rset.health_check
+ record['hosted_zone_id'] = zone_id
+ if rset.alias_dns_name:
+ record['alias'] = True
+ record['value'] = rset.alias_dns_name
+ record['values'] = [rset.alias_dns_name]
+ record['alias_hosted_zone_id'] = rset.alias_hosted_zone_id
+ record['alias_evaluate_target_health'] = rset.alias_evaluate_target_health
+ else:
+ record['alias'] = False
+ record['value'] = ','.join(sorted(rset.resource_records))
+ record['values'] = sorted(rset.resource_records)
+ return record
+
+
+def main():
+ argument_spec = dict(
+ state=dict(type='str', required=True, choices=['absent', 'create', 'delete', 'get', 'present'], aliases=['command']),
+ zone=dict(type='str'),
+ hosted_zone_id=dict(type='str'),
+ record=dict(type='str', required=True),
+ ttl=dict(type='int', default=3600),
+ type=dict(type='str', required=True, choices=['A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SPF', 'SRV', 'TXT']),
+ alias=dict(type='bool'),
+ alias_hosted_zone_id=dict(type='str'),
+ alias_evaluate_target_health=dict(type='bool', default=False),
+ value=dict(type='list', elements='str'),
+ overwrite=dict(type='bool'),
+ retry_interval=dict(type='int', default=500),
+ private_zone=dict(type='bool', default=False),
+ identifier=dict(type='str'),
+ weight=dict(type='int'),
+ region=dict(type='str'),
+ health_check=dict(type='str'),
+ failover=dict(type='str', choices=['PRIMARY', 'SECONDARY']),
+ vpc_id=dict(type='str'),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['zone', 'hosted_zone_id']],
+ # If alias is True then you must specify alias_hosted_zone as well
+ required_together=[['alias', 'alias_hosted_zone_id']],
+ # state=present, absent, create, delete THEN value is required
+ required_if=(
+ ('state', 'present', ['value']),
+ ('state', 'create', ['value']),
+ ('state', 'absent', ['value']),
+ ('state', 'delete', ['value']),
+ ),
+ # failover, region and weight are mutually exclusive
+ mutually_exclusive=[('failover', 'region', 'weight')],
+ # failover, region and weight require identifier
+ required_by=dict(
+ failover=('identifier',),
+ region=('identifier',),
+ weight=('identifier',),
+ ),
+ check_boto3=False,
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ if distutils.version.StrictVersion(boto.__version__) < distutils.version.StrictVersion(MINIMUM_BOTO_VERSION):
+ module.fail_json(msg='Found boto in version %s, but >= %s is required' % (boto.__version__, MINIMUM_BOTO_VERSION))
+
+ if module.params['state'] in ('present', 'create'):
+ command_in = 'create'
+ elif module.params['state'] in ('absent', 'delete'):
+ command_in = 'delete'
+ elif module.params['state'] == 'get':
+ command_in = 'get'
+
+ zone_in = (module.params.get('zone') or '').lower()
+ hosted_zone_id_in = module.params.get('hosted_zone_id')
+ ttl_in = module.params.get('ttl')
+ record_in = module.params.get('record').lower()
+ type_in = module.params.get('type')
+ value_in = module.params.get('value') or []
+ alias_in = module.params.get('alias')
+ alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id')
+ alias_evaluate_target_health_in = module.params.get('alias_evaluate_target_health')
+ retry_interval_in = module.params.get('retry_interval')
+
+ if module.params['vpc_id'] is not None:
+ private_zone_in = True
+ else:
+ private_zone_in = module.params.get('private_zone')
+
+ identifier_in = module.params.get('identifier')
+ weight_in = module.params.get('weight')
+ region_in = module.params.get('region')
+ health_check_in = module.params.get('health_check')
+ failover_in = module.params.get('failover')
+ vpc_id_in = module.params.get('vpc_id')
+ wait_in = module.params.get('wait')
+ wait_timeout_in = module.params.get('wait_timeout')
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
+
+ if zone_in[-1:] != '.':
+ zone_in += "."
+
+ if record_in[-1:] != '.':
+ record_in += "."
+
+ if command_in == 'create' or command_in == 'delete':
+ if alias_in and len(value_in) != 1:
+ module.fail_json(msg="parameter 'value' must contain a single dns name for alias records")
+ if (weight_in is None and region_in is None and failover_in is None) and identifier_in is not None:
+ module.fail_json(msg="You have specified identifier which makes sense only if you specify one of: weight, region or failover.")
+
+ # connect to the route53 endpoint
+ try:
+ conn = Route53Connection(**aws_connect_kwargs)
+ except boto.exception.BotoServerError as e:
+ module.fail_json(msg=e.error_message)
+
+ # Find the named zone ID
+ zone_id = hosted_zone_id_in or get_zone_id_by_name(conn, module, zone_in, private_zone_in, vpc_id_in)
+
+ # Verify that the requested zone is already defined in Route53
+ if zone_id is None:
+ errmsg = "Zone %s does not exist in Route53" % (zone_in or hosted_zone_id_in)
+ module.fail_json(msg=errmsg)
+
+ record = {}
+
+ found_record = False
+ wanted_rset = Record(name=record_in, type=type_in, ttl=ttl_in,
+ identifier=identifier_in, weight=weight_in,
+ region=region_in, health_check=health_check_in,
+ failover=failover_in)
+ for v in value_in:
+ if alias_in:
+ wanted_rset.set_alias(alias_hosted_zone_id_in, v, alias_evaluate_target_health_in)
+ else:
+ wanted_rset.add_value(v)
+
+ need_to_sort_records = (type_in == 'CAA')
+
+ # Sort records for wanted_rset if necessary (keep original list)
+ unsorted_records = wanted_rset.resource_records
+ if need_to_sort_records:
+ wanted_rset.resource_records = sorted(unsorted_records)
+
+ sets = invoke_with_throttling_retries(conn.get_all_rrsets, zone_id, name=record_in,
+ type=type_in, identifier=identifier_in)
+ sets_iter = iter(sets)
+ while True:
+ try:
+ rset = invoke_with_throttling_retries(next, sets_iter)
+ except StopIteration:
+ break
+ # Need to save this changes in rset, because of comparing rset.to_xml() == wanted_rset.to_xml() in next block
+ rset.name = decode_name(rset.name)
+
+ if identifier_in is not None:
+ identifier_in = str(identifier_in)
+
+ if rset.type == type_in and rset.name.lower() == record_in.lower() and rset.identifier == identifier_in:
+ if need_to_sort_records:
+ # Sort records
+ rset.resource_records = sorted(rset.resource_records)
+ found_record = True
+ record = to_dict(rset, zone_in, zone_id)
+ if command_in == 'create' and rset.to_xml() == wanted_rset.to_xml():
+ module.exit_json(changed=False)
+
+ # We need to look only at the first rrset returned by the above call,
+ # so break here. The returned elements begin with the one matching our
+ # requested name, type, and identifier, if such an element exists,
+ # followed by all others that come after it in alphabetical order.
+ # Therefore, if the first set does not match, no subsequent set will
+ # match either.
+ break
+
+ if command_in == 'get':
+ if type_in == 'NS':
+ ns = record.get('values', [])
+ else:
+ # Retrieve name servers associated to the zone.
+ z = invoke_with_throttling_retries(conn.get_zone, zone_in)
+ ns = invoke_with_throttling_retries(z.get_nameservers)
+
+ module.exit_json(changed=False, set=record, nameservers=ns)
+
+ if command_in == 'delete' and not found_record:
+ module.exit_json(changed=False)
+
+ changes = ResourceRecordSets(conn, zone_id)
+
+ if command_in == 'create' or command_in == 'delete':
+ if command_in == 'create' and found_record:
+ if not module.params['overwrite']:
+ module.fail_json(msg="Record already exists with different value. Set 'overwrite' to replace it")
+ command = 'UPSERT'
+ else:
+ command = command_in.upper()
+ # Restore original order of records
+ wanted_rset.resource_records = unsorted_records
+ changes.add_change_record(command, wanted_rset)
+
+ if not module.check_mode:
+ try:
+ invoke_with_throttling_retries(commit, changes, retry_interval_in, wait_in, wait_timeout_in)
+ except boto.route53.exception.DNSServerError as e:
+ txt = e.body.split("<Message>")[1]
+ txt = txt.split("</Message>")[0]
+ if "but it already exists" in txt:
+ module.exit_json(changed=False)
+ else:
+ module.fail_json(msg=txt)
+ except TimeoutError:
+ module.fail_json(msg='Timeout waiting for changes to replicate')
+
+ module.exit_json(
+ changed=True,
+ diff=dict(
+ before=record,
+ after=to_dict(wanted_rset, zone_in, zone_id) if command != 'delete' else {},
+ ),
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/route53_facts.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/route53_facts.py
new file mode 100644
index 00000000..38d0bc54
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/route53_facts.py
@@ -0,0 +1,492 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: route53_info
+short_description: Retrieves route53 details using AWS methods
+version_added: 1.0.0
+description:
+ - Gets various details related to Route53 zone, record set or health check details.
+ - This module was called C(route53_facts) before Ansible 2.9. The usage did not change.
+options:
+ query:
+ description:
+ - Specifies the query action to take.
+ required: True
+ choices: [
+ 'change',
+ 'checker_ip_range',
+ 'health_check',
+ 'hosted_zone',
+ 'record_sets',
+ 'reusable_delegation_set',
+ ]
+ type: str
+ change_id:
+ description:
+ - The ID of the change batch request.
+ - The value that you specify here is the value that
+ ChangeResourceRecordSets returned in the Id element
+ when you submitted the request.
+ - Required if I(query=change).
+ required: false
+ type: str
+ hosted_zone_id:
+ description:
+ - The Hosted Zone ID of the DNS zone.
+ - Required if I(query) is set to I(hosted_zone) and I(hosted_zone_method) is set to I(details).
+ - Required if I(query) is set to I(record_sets).
+ required: false
+ type: str
+ max_items:
+ description:
+ - Maximum number of items to return for various get/list requests.
+ required: false
+ type: str
+ next_marker:
+ description:
+ - "Some requests such as list_command: hosted_zones will return a maximum
+ number of entries - EG 100 or the number specified by I(max_items).
+ If the number of entries exceeds this maximum another request can be sent
+ using the NextMarker entry from the first response to get the next page
+ of results."
+ required: false
+ type: str
+ delegation_set_id:
+ description:
+ - The DNS Zone delegation set ID.
+ required: false
+ type: str
+ start_record_name:
+ description:
+ - "The first name in the lexicographic ordering of domain names that you want
+ the list_command: record_sets to start listing from."
+ required: false
+ type: str
+ type:
+ description:
+ - The type of DNS record.
+ required: false
+ choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS' ]
+ type: str
+ dns_name:
+ description:
+ - The first name in the lexicographic ordering of domain names that you want
+ the list_command to start listing from.
+ required: false
+ type: str
+ resource_id:
+ description:
+ - The ID/s of the specified resource/s.
+ - Required if I(query=health_check) and I(health_check_method=tags).
+ - Required if I(query=hosted_zone) and I(hosted_zone_method=tags).
+ required: false
+ aliases: ['resource_ids']
+ type: list
+ elements: str
+ health_check_id:
+ description:
+ - The ID of the health check.
+ - Required if C(query) is set to C(health_check) and
+ C(health_check_method) is set to C(details) or C(status) or C(failure_reason).
+ required: false
+ type: str
+ hosted_zone_method:
+ description:
+ - "This is used in conjunction with query: hosted_zone.
+ It allows for listing details, counts or tags of various
+ hosted zone details."
+ required: false
+ choices: [
+ 'details',
+ 'list',
+ 'list_by_name',
+ 'count',
+ 'tags',
+ ]
+ default: 'list'
+ type: str
+ health_check_method:
+ description:
+ - "This is used in conjunction with query: health_check.
+ It allows for listing details, counts or tags of various
+ health check details."
+ required: false
+ choices: [
+ 'list',
+ 'details',
+ 'status',
+ 'failure_reason',
+ 'count',
+ 'tags',
+ ]
+ default: 'list'
+ type: str
+author: Karen Cheng (@Etherdaemon)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Simple example of listing all hosted zones
+- name: List all hosted zones
+ community.aws.route53_info:
+ query: hosted_zone
+ register: hosted_zones
+
+# Getting a count of hosted zones
+- name: Return a count of all hosted zones
+ community.aws.route53_info:
+ query: hosted_zone
+ hosted_zone_method: count
+ register: hosted_zone_count
+
+- name: List the first 20 resource record sets in a given hosted zone
+ community.aws.route53_info:
+ profile: account_name
+ query: record_sets
+ hosted_zone_id: ZZZ1111112222
+ max_items: 20
+ register: record_sets
+
+- name: List first 20 health checks
+ community.aws.route53_info:
+ query: health_check
+ health_check_method: list
+ max_items: 20
+ register: health_checks
+
+- name: Get health check last failure_reason
+ community.aws.route53_info:
+ query: health_check
+ health_check_method: failure_reason
+ health_check_id: 00000000-1111-2222-3333-12345678abcd
+ register: health_check_failure_reason
+
+- name: Retrieve reusable delegation set details
+ community.aws.route53_info:
+ query: reusable_delegation_set
+ delegation_set_id: delegation id
+ register: delegation_sets
+
+- name: setup of example for using next_marker
+ community.aws.route53_info:
+ query: hosted_zone
+ max_items: 1
+ register: first_info
+
+- name: example for using next_marker
+ community.aws.route53_info:
+ query: hosted_zone
+ next_marker: "{{ first_info.NextMarker }}"
+ max_items: 1
+ when: "{{ 'NextMarker' in first_info }}"
+
+- name: retrieve host entries starting with host1.workshop.test.io
+ block:
+ - name: grab zone id
+ community.aws.route53_zone:
+ zone: "test.io"
+ register: AWSINFO
+
+ - name: grab Route53 record information
+ community.aws.route53_info:
+ type: A
+ query: record_sets
+ hosted_zone_id: "{{ AWSINFO.zone_id }}"
+ start_record_name: "host1.workshop.test.io"
+ register: RECORDS
+'''
+try:
+ import boto
+ import botocore
+ import boto3
+except ImportError:
+ pass # Handled by HAS_BOTO and HAS_BOTO3
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3
+
+
+def get_hosted_zone(client, module):
+ params = dict()
+
+ if module.params.get('hosted_zone_id'):
+ params['Id'] = module.params.get('hosted_zone_id')
+ else:
+ module.fail_json(msg="Hosted Zone Id is required")
+
+ return client.get_hosted_zone(**params)
+
+
+def reusable_delegation_set_details(client, module):
+ params = dict()
+ if not module.params.get('delegation_set_id'):
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ results = client.list_reusable_delegation_sets(**params)
+ else:
+ params['DelegationSetId'] = module.params.get('delegation_set_id')
+ results = client.get_reusable_delegation_set(**params)
+
+ return results
+
+
+def list_hosted_zones(client, module):
+ params = dict()
+
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ if module.params.get('delegation_set_id'):
+ params['DelegationSetId'] = module.params.get('delegation_set_id')
+
+ paginator = client.get_paginator('list_hosted_zones')
+ zones = paginator.paginate(**params).build_full_result()['HostedZones']
+ return {
+ "HostedZones": zones,
+ "list": zones,
+ }
+
+
+def list_hosted_zones_by_name(client, module):
+ params = dict()
+
+ if module.params.get('hosted_zone_id'):
+ params['HostedZoneId'] = module.params.get('hosted_zone_id')
+
+ if module.params.get('dns_name'):
+ params['DNSName'] = module.params.get('dns_name')
+
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ return client.list_hosted_zones_by_name(**params)
+
+
+def change_details(client, module):
+ params = dict()
+
+ if module.params.get('change_id'):
+ params['Id'] = module.params.get('change_id')
+ else:
+ module.fail_json(msg="change_id is required")
+
+ results = client.get_change(**params)
+ return results
+
+
+def checker_ip_range_details(client, module):
+ return client.get_checker_ip_ranges()
+
+
+def get_count(client, module):
+ if module.params.get('query') == 'health_check':
+ results = client.get_health_check_count()
+ else:
+ results = client.get_hosted_zone_count()
+
+ return results
+
+
+def get_health_check(client, module):
+ params = dict()
+
+ if not module.params.get('health_check_id'):
+ module.fail_json(msg="health_check_id is required")
+ else:
+ params['HealthCheckId'] = module.params.get('health_check_id')
+
+ if module.params.get('health_check_method') == 'details':
+ results = client.get_health_check(**params)
+ elif module.params.get('health_check_method') == 'failure_reason':
+ results = client.get_health_check_last_failure_reason(**params)
+ elif module.params.get('health_check_method') == 'status':
+ results = client.get_health_check_status(**params)
+
+ return results
+
+
+def get_resource_tags(client, module):
+ params = dict()
+
+ if module.params.get('resource_id'):
+ params['ResourceIds'] = module.params.get('resource_id')
+ else:
+ module.fail_json(msg="resource_id or resource_ids is required")
+
+ if module.params.get('query') == 'health_check':
+ params['ResourceType'] = 'healthcheck'
+ else:
+ params['ResourceType'] = 'hostedzone'
+
+ return client.list_tags_for_resources(**params)
+
+
+def list_health_checks(client, module):
+ params = dict()
+
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ paginator = client.get_paginator('list_health_checks')
+ health_checks = paginator.paginate(**params).build_full_result()['HealthChecks']
+ return {
+ "HealthChecks": health_checks,
+ "list": health_checks,
+ }
+
+
+def record_sets_details(client, module):
+ params = dict()
+
+ if module.params.get('hosted_zone_id'):
+ params['HostedZoneId'] = module.params.get('hosted_zone_id')
+ else:
+ module.fail_json(msg="Hosted Zone Id is required")
+
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('start_record_name'):
+ params['StartRecordName'] = module.params.get('start_record_name')
+
+ if module.params.get('type') and not module.params.get('start_record_name'):
+ module.fail_json(msg="start_record_name must be specified if type is set")
+ elif module.params.get('type'):
+ params['StartRecordType'] = module.params.get('type')
+
+ paginator = client.get_paginator('list_resource_record_sets')
+ record_sets = paginator.paginate(**params).build_full_result()['ResourceRecordSets']
+ return {
+ "ResourceRecordSets": record_sets,
+ "list": record_sets,
+ }
+
+
+def health_check_details(client, module):
+ health_check_invocations = {
+ 'list': list_health_checks,
+ 'details': get_health_check,
+ 'status': get_health_check,
+ 'failure_reason': get_health_check,
+ 'count': get_count,
+ 'tags': get_resource_tags,
+ }
+
+ results = health_check_invocations[module.params.get('health_check_method')](client, module)
+ return results
+
+
+def hosted_zone_details(client, module):
+ hosted_zone_invocations = {
+ 'details': get_hosted_zone,
+ 'list': list_hosted_zones,
+ 'list_by_name': list_hosted_zones_by_name,
+ 'count': get_count,
+ 'tags': get_resource_tags,
+ }
+
+ results = hosted_zone_invocations[module.params.get('hosted_zone_method')](client, module)
+ return results
+
+
+def main():
+ argument_spec = dict(
+ query=dict(choices=[
+ 'change',
+ 'checker_ip_range',
+ 'health_check',
+ 'hosted_zone',
+ 'record_sets',
+ 'reusable_delegation_set',
+ ], required=True),
+ change_id=dict(),
+ hosted_zone_id=dict(),
+ max_items=dict(),
+ next_marker=dict(),
+ delegation_set_id=dict(),
+ start_record_name=dict(),
+ type=dict(choices=[
+ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS'
+ ]),
+ dns_name=dict(),
+ resource_id=dict(type='list', aliases=['resource_ids'], elements='str'),
+ health_check_id=dict(),
+ hosted_zone_method=dict(choices=[
+ 'details',
+ 'list',
+ 'list_by_name',
+ 'count',
+ 'tags'
+ ], default='list'),
+ health_check_method=dict(choices=[
+ 'list',
+ 'details',
+ 'status',
+ 'failure_reason',
+ 'count',
+ 'tags',
+ ], default='list'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['hosted_zone_method', 'health_check_method'],
+ ],
+ check_boto3=False,
+ )
+ if module._name == 'route53_facts':
+ module.deprecate("The 'route53_facts' module has been renamed to 'route53_info'", date='2021-12-01', collection_name='community.aws')
+
+ # Validate Requirements
+ if not (HAS_BOTO or HAS_BOTO3):
+ module.fail_json(msg='json and boto/boto3 is required.')
+
+ try:
+ route53 = module.client('route53')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ invocations = {
+ 'change': change_details,
+ 'checker_ip_range': checker_ip_range_details,
+ 'health_check': health_check_details,
+ 'hosted_zone': hosted_zone_details,
+ 'record_sets': record_sets_details,
+ 'reusable_delegation_set': reusable_delegation_set_details,
+ }
+
+ results = dict(changed=False)
+ try:
+ results = invocations[module.params.get('query')](route53, module)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json(msg=to_native(e))
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/route53_health_check.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/route53_health_check.py
new file mode 100644
index 00000000..a1f9c9a2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/route53_health_check.py
@@ -0,0 +1,369 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: route53_health_check
+version_added: 1.0.0
+short_description: Add or delete health-checks in Amazons Route53 DNS service
+description:
+ - Creates and deletes DNS Health checks in Amazons Route53 service.
+ - Only the port, resource_path, string_match and request_interval are
+ considered when updating existing health-checks.
+options:
+ state:
+ description:
+ - Specifies the action to take.
+ choices: [ 'present', 'absent' ]
+ type: str
+ default: 'present'
+ ip_address:
+ description:
+ - IP address of the end-point to check. Either this or I(fqdn) has to be provided.
+ type: str
+ port:
+ description:
+ - The port on the endpoint on which you want Amazon Route 53 to perform
+ health checks. Required for TCP checks.
+ type: int
+ type:
+ description:
+ - The type of health check that you want to create, which indicates how
+ Amazon Route 53 determines whether an endpoint is healthy.
+ required: true
+ choices: [ 'HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP' ]
+ type: str
+ resource_path:
+ description:
+ - The path that you want Amazon Route 53 to request when performing
+ health checks. The path can be any value for which your endpoint will
+ return an HTTP status code of 2xx or 3xx when the endpoint is healthy,
+ for example the file /docs/route53-health-check.html.
+ - Required for all checks except TCP.
+ - The path must begin with a /
+ - Maximum 255 characters.
+ type: str
+ fqdn:
+ description:
+ - Domain name of the endpoint to check. Either this or I(ip_address) has
+ to be provided. When both are given the `fqdn` is used in the `Host:`
+ header of the HTTP request.
+ type: str
+ string_match:
+ description:
+ - If the check type is HTTP_STR_MATCH or HTTP_STR_MATCH, the string
+ that you want Amazon Route 53 to search for in the response body from
+ the specified resource. If the string appears in the first 5120 bytes
+ of the response body, Amazon Route 53 considers the resource healthy.
+ type: str
+ request_interval:
+ description:
+ - The number of seconds between the time that Amazon Route 53 gets a
+ response from your endpoint and the time that it sends the next
+ health-check request.
+ default: 30
+ choices: [ 10, 30 ]
+ type: int
+ failure_threshold:
+ description:
+ - The number of consecutive health checks that an endpoint must pass or
+ fail for Amazon Route 53 to change the current status of the endpoint
+ from unhealthy to healthy or vice versa.
+ default: 3
+ choices: [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ]
+ type: int
+author: "zimbatm (@zimbatm)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: Create a health-check for host1.example.com and use it in record
+ community.aws.route53_health_check:
+ state: present
+ fqdn: host1.example.com
+ type: HTTP_STR_MATCH
+ resource_path: /
+ string_match: "Hello"
+ request_interval: 10
+ failure_threshold: 2
+ register: my_health_check
+
+- community.aws.route53:
+ action: create
+ zone: "example.com"
+ type: CNAME
+ record: "www.example.com"
+ value: host1.example.com
+ ttl: 30
+ # Routing policy
+ identifier: "host1@www"
+ weight: 100
+ health_check: "{{ my_health_check.health_check.id }}"
+
+- name: Delete health-check
+ community.aws.route53_health_check:
+ state: absent
+ fqdn: host1.example.com
+
+'''
+
+import uuid
+
+try:
+ import boto
+ import boto.ec2
+ from boto import route53
+ from boto.route53 import Route53Connection, exception
+ from boto.route53.healthcheck import HealthCheck
+except ImportError:
+ pass # Handled by HAS_BOTO
+
+# import module snippets
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO
+
+
+# Things that can't get changed:
+# protocol
+# ip_address or domain
+# request_interval
+# string_match if not previously enabled
+def find_health_check(conn, wanted):
+ """Searches for health checks that have the exact same set of immutable values"""
+
+ results = conn.get_list_health_checks()
+
+ while True:
+ for check in results.HealthChecks:
+ config = check.HealthCheckConfig
+ if (
+ config.get('IPAddress') == wanted.ip_addr and
+ config.get('FullyQualifiedDomainName') == wanted.fqdn and
+ config.get('Type') == wanted.hc_type and
+ config.get('RequestInterval') == str(wanted.request_interval) and
+ config.get('Port') == str(wanted.port)
+ ):
+ return check
+
+ if (results.IsTruncated == 'true'):
+ results = conn.get_list_health_checks(marker=results.NextMarker)
+ else:
+ return None
+
+
+def to_health_check(config):
+ return HealthCheck(
+ config.get('IPAddress'),
+ int(config.get('Port')),
+ config.get('Type'),
+ config.get('ResourcePath'),
+ fqdn=config.get('FullyQualifiedDomainName'),
+ string_match=config.get('SearchString'),
+ request_interval=int(config.get('RequestInterval')),
+ failure_threshold=int(config.get('FailureThreshold')),
+ )
+
+
+def health_check_diff(a, b):
+ a = a.__dict__
+ b = b.__dict__
+ if a == b:
+ return {}
+ diff = {}
+ for key in set(a.keys()) | set(b.keys()):
+ if a.get(key) != b.get(key):
+ diff[key] = b.get(key)
+ return diff
+
+
+def to_template_params(health_check):
+ params = {
+ 'ip_addr_part': '',
+ 'port': health_check.port,
+ 'type': health_check.hc_type,
+ 'resource_path_part': '',
+ 'fqdn_part': '',
+ 'string_match_part': '',
+ 'request_interval': health_check.request_interval,
+ 'failure_threshold': health_check.failure_threshold,
+ }
+ if health_check.ip_addr:
+ params['ip_addr_part'] = HealthCheck.XMLIpAddrPart % {'ip_addr': health_check.ip_addr}
+ if health_check.resource_path:
+ params['resource_path_part'] = XMLResourcePathPart % {'resource_path': health_check.resource_path}
+ if health_check.fqdn:
+ params['fqdn_part'] = HealthCheck.XMLFQDNPart % {'fqdn': health_check.fqdn}
+ if health_check.string_match:
+ params['string_match_part'] = HealthCheck.XMLStringMatchPart % {'string_match': health_check.string_match}
+ return params
+
+
+XMLResourcePathPart = """<ResourcePath>%(resource_path)s</ResourcePath>"""
+
+POSTXMLBody = """
+ <CreateHealthCheckRequest xmlns="%(xmlns)s">
+ <CallerReference>%(caller_ref)s</CallerReference>
+ <HealthCheckConfig>
+ %(ip_addr_part)s
+ <Port>%(port)s</Port>
+ <Type>%(type)s</Type>
+ %(resource_path_part)s
+ %(fqdn_part)s
+ %(string_match_part)s
+ <RequestInterval>%(request_interval)s</RequestInterval>
+ <FailureThreshold>%(failure_threshold)s</FailureThreshold>
+ </HealthCheckConfig>
+ </CreateHealthCheckRequest>
+ """
+
+UPDATEHCXMLBody = """
+ <UpdateHealthCheckRequest xmlns="%(xmlns)s">
+ <HealthCheckVersion>%(health_check_version)s</HealthCheckVersion>
+ %(ip_addr_part)s
+ <Port>%(port)s</Port>
+ %(resource_path_part)s
+ %(fqdn_part)s
+ %(string_match_part)s
+ <FailureThreshold>%(failure_threshold)i</FailureThreshold>
+ </UpdateHealthCheckRequest>
+ """
+
+
+def create_health_check(conn, health_check, caller_ref=None):
+ if caller_ref is None:
+ caller_ref = str(uuid.uuid4())
+ uri = '/%s/healthcheck' % conn.Version
+ params = to_template_params(health_check)
+ params.update(xmlns=conn.XMLNameSpace, caller_ref=caller_ref)
+
+ xml_body = POSTXMLBody % params
+ response = conn.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 201:
+ e = boto.jsonresponse.Element()
+ h = boto.jsonresponse.XmlHandler(e, None)
+ h.parse(body)
+ return e
+ else:
+ raise exception.DNSServerError(response.status, response.reason, body)
+
+
+def update_health_check(conn, health_check_id, health_check_version, health_check):
+ uri = '/%s/healthcheck/%s' % (conn.Version, health_check_id)
+ params = to_template_params(health_check)
+ params.update(
+ xmlns=conn.XMLNameSpace,
+ health_check_version=health_check_version,
+ )
+ xml_body = UPDATEHCXMLBody % params
+ response = conn.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status not in (200, 204):
+ raise exception.DNSServerError(response.status,
+ response.reason,
+ body)
+ e = boto.jsonresponse.Element()
+ h = boto.jsonresponse.XmlHandler(e, None)
+ h.parse(body)
+ return e
+
+
+def main():
+ argument_spec = dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ ip_address=dict(),
+ port=dict(type='int'),
+ type=dict(required=True, choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']),
+ resource_path=dict(),
+ fqdn=dict(),
+ string_match=dict(),
+ request_interval=dict(type='int', choices=[10, 30], default=30),
+ failure_threshold=dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], default=3),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, check_boto3=False)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto 2.27.0+ required for this module')
+
+ state_in = module.params.get('state')
+ ip_addr_in = module.params.get('ip_address')
+ port_in = module.params.get('port')
+ type_in = module.params.get('type')
+ resource_path_in = module.params.get('resource_path')
+ fqdn_in = module.params.get('fqdn')
+ string_match_in = module.params.get('string_match')
+ request_interval_in = module.params.get('request_interval')
+ failure_threshold_in = module.params.get('failure_threshold')
+
+ if ip_addr_in is None and fqdn_in is None:
+ module.fail_json(msg="parameter 'ip_address' or 'fqdn' is required")
+
+ # Default port
+ if port_in is None:
+ if type_in in ['HTTP', 'HTTP_STR_MATCH']:
+ port_in = 80
+ elif type_in in ['HTTPS', 'HTTPS_STR_MATCH']:
+ port_in = 443
+ else:
+ module.fail_json(msg="parameter 'port' is required for 'type' TCP")
+
+ # string_match in relation with type
+ if type_in in ['HTTP_STR_MATCH', 'HTTPS_STR_MATCH']:
+ if string_match_in is None:
+ module.fail_json(msg="parameter 'string_match' is required for the HTTP(S)_STR_MATCH types")
+ elif len(string_match_in) > 255:
+ module.fail_json(msg="parameter 'string_match' is limited to 255 characters max")
+ elif string_match_in:
+ module.fail_json(msg="parameter 'string_match' argument is only for the HTTP(S)_STR_MATCH types")
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
+ # connect to the route53 endpoint
+ try:
+ conn = Route53Connection(**aws_connect_kwargs)
+ except boto.exception.BotoServerError as e:
+ module.fail_json(msg=e.error_message)
+
+ changed = False
+ action = None
+ check_id = None
+ wanted_config = HealthCheck(ip_addr_in, port_in, type_in, resource_path_in, fqdn_in, string_match_in, request_interval_in, failure_threshold_in)
+ existing_check = find_health_check(conn, wanted_config)
+ if existing_check:
+ check_id = existing_check.Id
+ existing_config = to_health_check(existing_check.HealthCheckConfig)
+
+ if state_in == 'present':
+ if existing_check is None:
+ action = "create"
+ check_id = create_health_check(conn, wanted_config).HealthCheck.Id
+ changed = True
+ else:
+ diff = health_check_diff(existing_config, wanted_config)
+ if diff:
+ action = "update"
+ update_health_check(conn, existing_check.Id, int(existing_check.HealthCheckVersion), wanted_config)
+ changed = True
+ elif state_in == 'absent':
+ if check_id:
+ action = "delete"
+ conn.delete_health_check(check_id)
+ changed = True
+ else:
+ module.fail_json(msg="Logic Error: Unknown state")
+
+ module.exit_json(changed=changed, health_check=dict(id=check_id), action=action)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/route53_info.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/route53_info.py
new file mode 100644
index 00000000..38d0bc54
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/route53_info.py
@@ -0,0 +1,492 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: route53_info
+short_description: Retrieves route53 details using AWS methods
+version_added: 1.0.0
+description:
+ - Gets various details related to Route53 zone, record set or health check details.
+ - This module was called C(route53_facts) before Ansible 2.9. The usage did not change.
+options:
+ query:
+ description:
+ - Specifies the query action to take.
+ required: True
+ choices: [
+ 'change',
+ 'checker_ip_range',
+ 'health_check',
+ 'hosted_zone',
+ 'record_sets',
+ 'reusable_delegation_set',
+ ]
+ type: str
+ change_id:
+ description:
+ - The ID of the change batch request.
+ - The value that you specify here is the value that
+ ChangeResourceRecordSets returned in the Id element
+ when you submitted the request.
+ - Required if I(query=change).
+ required: false
+ type: str
+ hosted_zone_id:
+ description:
+ - The Hosted Zone ID of the DNS zone.
+ - Required if I(query) is set to I(hosted_zone) and I(hosted_zone_method) is set to I(details).
+ - Required if I(query) is set to I(record_sets).
+ required: false
+ type: str
+ max_items:
+ description:
+ - Maximum number of items to return for various get/list requests.
+ required: false
+ type: str
+ next_marker:
+ description:
+ - "Some requests such as list_command: hosted_zones will return a maximum
+ number of entries - EG 100 or the number specified by I(max_items).
+ If the number of entries exceeds this maximum another request can be sent
+ using the NextMarker entry from the first response to get the next page
+ of results."
+ required: false
+ type: str
+ delegation_set_id:
+ description:
+ - The DNS Zone delegation set ID.
+ required: false
+ type: str
+ start_record_name:
+ description:
+ - "The first name in the lexicographic ordering of domain names that you want
+ the list_command: record_sets to start listing from."
+ required: false
+ type: str
+ type:
+ description:
+ - The type of DNS record.
+ required: false
+ choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS' ]
+ type: str
+ dns_name:
+ description:
+ - The first name in the lexicographic ordering of domain names that you want
+ the list_command to start listing from.
+ required: false
+ type: str
+ resource_id:
+ description:
+ - The ID/s of the specified resource/s.
+ - Required if I(query=health_check) and I(health_check_method=tags).
+ - Required if I(query=hosted_zone) and I(hosted_zone_method=tags).
+ required: false
+ aliases: ['resource_ids']
+ type: list
+ elements: str
+ health_check_id:
+ description:
+ - The ID of the health check.
+ - Required if C(query) is set to C(health_check) and
+ C(health_check_method) is set to C(details) or C(status) or C(failure_reason).
+ required: false
+ type: str
+ hosted_zone_method:
+ description:
+ - "This is used in conjunction with query: hosted_zone.
+ It allows for listing details, counts or tags of various
+ hosted zone details."
+ required: false
+ choices: [
+ 'details',
+ 'list',
+ 'list_by_name',
+ 'count',
+ 'tags',
+ ]
+ default: 'list'
+ type: str
+ health_check_method:
+ description:
+ - "This is used in conjunction with query: health_check.
+ It allows for listing details, counts or tags of various
+ health check details."
+ required: false
+ choices: [
+ 'list',
+ 'details',
+ 'status',
+ 'failure_reason',
+ 'count',
+ 'tags',
+ ]
+ default: 'list'
+ type: str
+author: Karen Cheng (@Etherdaemon)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Simple example of listing all hosted zones
+- name: List all hosted zones
+ community.aws.route53_info:
+ query: hosted_zone
+ register: hosted_zones
+
+# Getting a count of hosted zones
+- name: Return a count of all hosted zones
+ community.aws.route53_info:
+ query: hosted_zone
+ hosted_zone_method: count
+ register: hosted_zone_count
+
+- name: List the first 20 resource record sets in a given hosted zone
+ community.aws.route53_info:
+ profile: account_name
+ query: record_sets
+ hosted_zone_id: ZZZ1111112222
+ max_items: 20
+ register: record_sets
+
+- name: List first 20 health checks
+ community.aws.route53_info:
+ query: health_check
+ health_check_method: list
+ max_items: 20
+ register: health_checks
+
+- name: Get health check last failure_reason
+ community.aws.route53_info:
+ query: health_check
+ health_check_method: failure_reason
+ health_check_id: 00000000-1111-2222-3333-12345678abcd
+ register: health_check_failure_reason
+
+- name: Retrieve reusable delegation set details
+ community.aws.route53_info:
+ query: reusable_delegation_set
+ delegation_set_id: delegation id
+ register: delegation_sets
+
+- name: setup of example for using next_marker
+ community.aws.route53_info:
+ query: hosted_zone
+ max_items: 1
+ register: first_info
+
+- name: example for using next_marker
+ community.aws.route53_info:
+ query: hosted_zone
+ next_marker: "{{ first_info.NextMarker }}"
+ max_items: 1
+ when: "{{ 'NextMarker' in first_info }}"
+
+- name: retrieve host entries starting with host1.workshop.test.io
+ block:
+ - name: grab zone id
+ community.aws.route53_zone:
+ zone: "test.io"
+ register: AWSINFO
+
+ - name: grab Route53 record information
+ community.aws.route53_info:
+ type: A
+ query: record_sets
+ hosted_zone_id: "{{ AWSINFO.zone_id }}"
+ start_record_name: "host1.workshop.test.io"
+ register: RECORDS
+'''
+try:
+ import boto
+ import botocore
+ import boto3
+except ImportError:
+ pass # Handled by HAS_BOTO and HAS_BOTO3
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3
+
+
+def get_hosted_zone(client, module):
+ params = dict()
+
+ if module.params.get('hosted_zone_id'):
+ params['Id'] = module.params.get('hosted_zone_id')
+ else:
+ module.fail_json(msg="Hosted Zone Id is required")
+
+ return client.get_hosted_zone(**params)
+
+
+def reusable_delegation_set_details(client, module):
+ params = dict()
+ if not module.params.get('delegation_set_id'):
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ results = client.list_reusable_delegation_sets(**params)
+ else:
+ params['DelegationSetId'] = module.params.get('delegation_set_id')
+ results = client.get_reusable_delegation_set(**params)
+
+ return results
+
+
+def list_hosted_zones(client, module):
+ params = dict()
+
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ if module.params.get('delegation_set_id'):
+ params['DelegationSetId'] = module.params.get('delegation_set_id')
+
+ paginator = client.get_paginator('list_hosted_zones')
+ zones = paginator.paginate(**params).build_full_result()['HostedZones']
+ return {
+ "HostedZones": zones,
+ "list": zones,
+ }
+
+
+def list_hosted_zones_by_name(client, module):
+ params = dict()
+
+ if module.params.get('hosted_zone_id'):
+ params['HostedZoneId'] = module.params.get('hosted_zone_id')
+
+ if module.params.get('dns_name'):
+ params['DNSName'] = module.params.get('dns_name')
+
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ return client.list_hosted_zones_by_name(**params)
+
+
+def change_details(client, module):
+ params = dict()
+
+ if module.params.get('change_id'):
+ params['Id'] = module.params.get('change_id')
+ else:
+ module.fail_json(msg="change_id is required")
+
+ results = client.get_change(**params)
+ return results
+
+
+def checker_ip_range_details(client, module):
+ return client.get_checker_ip_ranges()
+
+
+def get_count(client, module):
+ if module.params.get('query') == 'health_check':
+ results = client.get_health_check_count()
+ else:
+ results = client.get_hosted_zone_count()
+
+ return results
+
+
+def get_health_check(client, module):
+ params = dict()
+
+ if not module.params.get('health_check_id'):
+ module.fail_json(msg="health_check_id is required")
+ else:
+ params['HealthCheckId'] = module.params.get('health_check_id')
+
+ if module.params.get('health_check_method') == 'details':
+ results = client.get_health_check(**params)
+ elif module.params.get('health_check_method') == 'failure_reason':
+ results = client.get_health_check_last_failure_reason(**params)
+ elif module.params.get('health_check_method') == 'status':
+ results = client.get_health_check_status(**params)
+
+ return results
+
+
+def get_resource_tags(client, module):
+ params = dict()
+
+ if module.params.get('resource_id'):
+ params['ResourceIds'] = module.params.get('resource_id')
+ else:
+ module.fail_json(msg="resource_id or resource_ids is required")
+
+ if module.params.get('query') == 'health_check':
+ params['ResourceType'] = 'healthcheck'
+ else:
+ params['ResourceType'] = 'hostedzone'
+
+ return client.list_tags_for_resources(**params)
+
+
+def list_health_checks(client, module):
+ params = dict()
+
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ paginator = client.get_paginator('list_health_checks')
+ health_checks = paginator.paginate(**params).build_full_result()['HealthChecks']
+ return {
+ "HealthChecks": health_checks,
+ "list": health_checks,
+ }
+
+
+def record_sets_details(client, module):
+ params = dict()
+
+ if module.params.get('hosted_zone_id'):
+ params['HostedZoneId'] = module.params.get('hosted_zone_id')
+ else:
+ module.fail_json(msg="Hosted Zone Id is required")
+
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('start_record_name'):
+ params['StartRecordName'] = module.params.get('start_record_name')
+
+ if module.params.get('type') and not module.params.get('start_record_name'):
+ module.fail_json(msg="start_record_name must be specified if type is set")
+ elif module.params.get('type'):
+ params['StartRecordType'] = module.params.get('type')
+
+ paginator = client.get_paginator('list_resource_record_sets')
+ record_sets = paginator.paginate(**params).build_full_result()['ResourceRecordSets']
+ return {
+ "ResourceRecordSets": record_sets,
+ "list": record_sets,
+ }
+
+
+def health_check_details(client, module):
+ health_check_invocations = {
+ 'list': list_health_checks,
+ 'details': get_health_check,
+ 'status': get_health_check,
+ 'failure_reason': get_health_check,
+ 'count': get_count,
+ 'tags': get_resource_tags,
+ }
+
+ results = health_check_invocations[module.params.get('health_check_method')](client, module)
+ return results
+
+
+def hosted_zone_details(client, module):
+ hosted_zone_invocations = {
+ 'details': get_hosted_zone,
+ 'list': list_hosted_zones,
+ 'list_by_name': list_hosted_zones_by_name,
+ 'count': get_count,
+ 'tags': get_resource_tags,
+ }
+
+ results = hosted_zone_invocations[module.params.get('hosted_zone_method')](client, module)
+ return results
+
+
+def main():
+ argument_spec = dict(
+ query=dict(choices=[
+ 'change',
+ 'checker_ip_range',
+ 'health_check',
+ 'hosted_zone',
+ 'record_sets',
+ 'reusable_delegation_set',
+ ], required=True),
+ change_id=dict(),
+ hosted_zone_id=dict(),
+ max_items=dict(),
+ next_marker=dict(),
+ delegation_set_id=dict(),
+ start_record_name=dict(),
+ type=dict(choices=[
+ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS'
+ ]),
+ dns_name=dict(),
+ resource_id=dict(type='list', aliases=['resource_ids'], elements='str'),
+ health_check_id=dict(),
+ hosted_zone_method=dict(choices=[
+ 'details',
+ 'list',
+ 'list_by_name',
+ 'count',
+ 'tags'
+ ], default='list'),
+ health_check_method=dict(choices=[
+ 'list',
+ 'details',
+ 'status',
+ 'failure_reason',
+ 'count',
+ 'tags',
+ ], default='list'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['hosted_zone_method', 'health_check_method'],
+ ],
+ check_boto3=False,
+ )
+ if module._name == 'route53_facts':
+ module.deprecate("The 'route53_facts' module has been renamed to 'route53_info'", date='2021-12-01', collection_name='community.aws')
+
+ # Validate Requirements
+ if not (HAS_BOTO or HAS_BOTO3):
+ module.fail_json(msg='json and boto/boto3 is required.')
+
+ try:
+ route53 = module.client('route53')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ invocations = {
+ 'change': change_details,
+ 'checker_ip_range': checker_ip_range_details,
+ 'health_check': health_check_details,
+ 'hosted_zone': hosted_zone_details,
+ 'record_sets': record_sets_details,
+ 'reusable_delegation_set': reusable_delegation_set_details,
+ }
+
+ results = dict(changed=False)
+ try:
+ results = invocations[module.params.get('query')](route53, module)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json(msg=to_native(e))
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/route53_zone.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/route53_zone.py
new file mode 100644
index 00000000..6467dd04
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/route53_zone.py
@@ -0,0 +1,436 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: route53_zone
+short_description: add or delete Route53 zones
+version_added: 1.0.0
+description:
+ - Creates and deletes Route53 private and public zones.
+requirements: [ boto3 ]
+options:
+ zone:
+ description:
+ - "The DNS zone record (eg: foo.com.)"
+ required: true
+ type: str
+ state:
+ description:
+ - Whether or not the zone should exist or not.
+ default: present
+ choices: [ "present", "absent" ]
+ type: str
+ vpc_id:
+ description:
+ - The VPC ID the zone should be a part of (if this is going to be a private zone).
+ type: str
+ vpc_region:
+ description:
+ - The VPC Region the zone should be a part of (if this is going to be a private zone).
+ type: str
+ comment:
+ description:
+ - Comment associated with the zone.
+ default: ''
+ type: str
+ hosted_zone_id:
+ description:
+ - The unique zone identifier you want to delete or "all" if there are many zones with the same domain name.
+ - Required if there are multiple zones identified with the above options.
+ type: str
+ delegation_set_id:
+ description:
+ - The reusable delegation set ID to be associated with the zone.
+ - Note that you can't associate a reusable delegation set with a private hosted zone.
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+author: "Christopher Troup (@minichate)"
+'''
+
+EXAMPLES = '''
+- name: create a public zone
+ community.aws.route53_zone:
+ zone: example.com
+ comment: this is an example
+
+- name: delete a public zone
+ community.aws.route53_zone:
+ zone: example.com
+ state: absent
+
+- name: create a private zone
+ community.aws.route53_zone:
+ zone: devel.example.com
+ vpc_id: '{{ myvpc_id }}'
+ vpc_region: us-west-2
+ comment: developer domain
+
+- name: create a public zone associated with a specific reusable delegation set
+ community.aws.route53_zone:
+ zone: example.com
+ comment: reusable delegation set example
+ delegation_set_id: A1BCDEF2GHIJKL
+'''
+
+RETURN = '''
+comment:
+ description: optional hosted zone comment
+ returned: when hosted zone exists
+ type: str
+ sample: "Private zone"
+name:
+ description: hosted zone name
+ returned: when hosted zone exists
+ type: str
+ sample: "private.local."
+private_zone:
+ description: whether hosted zone is private or public
+ returned: when hosted zone exists
+ type: bool
+ sample: true
+vpc_id:
+ description: id of vpc attached to private hosted zone
+ returned: for private hosted zone
+ type: str
+ sample: "vpc-1d36c84f"
+vpc_region:
+ description: region of vpc attached to private hosted zone
+ returned: for private hosted zone
+ type: str
+ sample: "eu-west-1"
+zone_id:
+ description: hosted zone id
+ returned: when hosted zone exists
+ type: str
+ sample: "Z6JQG9820BEFMW"
+delegation_set_id:
+ description: id of the associated reusable delegation set
+ returned: for public hosted zones, if they have been associated with a reusable delegation set
+ type: str
+ sample: "A1BCDEF2GHIJKL"
+'''
+
+import time
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def find_zones(module, client, zone_in, private_zone):
+ try:
+ paginator = client.get_paginator('list_hosted_zones')
+ results = paginator.paginate().build_full_result()
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not list current hosted zones")
+ zones = []
+ for r53zone in results['HostedZones']:
+ if r53zone['Name'] != zone_in:
+ continue
+ # only save zone names that match the public/private setting
+ if (r53zone['Config']['PrivateZone'] and private_zone) or \
+ (not r53zone['Config']['PrivateZone'] and not private_zone):
+ zones.append(r53zone)
+
+ return zones
+
+
+def create(module, client, matching_zones):
+ zone_in = module.params.get('zone').lower()
+ vpc_id = module.params.get('vpc_id')
+ vpc_region = module.params.get('vpc_region')
+ comment = module.params.get('comment')
+ delegation_set_id = module.params.get('delegation_set_id')
+
+ if not zone_in.endswith('.'):
+ zone_in += "."
+
+ private_zone = bool(vpc_id and vpc_region)
+
+ record = {
+ 'private_zone': private_zone,
+ 'vpc_id': vpc_id,
+ 'vpc_region': vpc_region,
+ 'comment': comment,
+ 'name': zone_in,
+ 'delegation_set_id': delegation_set_id,
+ 'zone_id': None,
+ }
+
+ if private_zone:
+ changed, result = create_or_update_private(module, client, matching_zones, record)
+ else:
+ changed, result = create_or_update_public(module, client, matching_zones, record)
+
+ return changed, result
+
+
+def create_or_update_private(module, client, matching_zones, record):
+ for z in matching_zones:
+ try:
+ result = client.get_hosted_zone(Id=z['Id']) # could be in different regions or have different VPCids
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % z['Id'])
+ zone_details = result['HostedZone']
+ vpc_details = result['VPCs']
+ current_vpc_id = None
+ current_vpc_region = None
+ if isinstance(vpc_details, dict):
+ if vpc_details['VPC']['VPCId'] == record['vpc_id']:
+ current_vpc_id = vpc_details['VPC']['VPCId']
+ current_vpc_region = vpc_details['VPC']['VPCRegion']
+ else:
+ if record['vpc_id'] in [v['VPCId'] for v in vpc_details]:
+ current_vpc_id = record['vpc_id']
+ if record['vpc_region'] in [v['VPCRegion'] for v in vpc_details]:
+ current_vpc_region = record['vpc_region']
+
+ if record['vpc_id'] == current_vpc_id and record['vpc_region'] == current_vpc_region:
+ record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '')
+ if 'Comment' in zone_details['Config'] and zone_details['Config']['Comment'] != record['comment']:
+ if not module.check_mode:
+ try:
+ client.update_hosted_zone_comment(Id=zone_details['Id'], Comment=record['comment'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not update comment for hosted zone %s" % zone_details['Id'])
+ return True, record
+ else:
+ record['msg'] = "There is already a private hosted zone in the same region with the same VPC \
+ you chose. Unable to create a new private hosted zone in the same name space."
+ return False, record
+
+ if not module.check_mode:
+ try:
+ result = client.create_hosted_zone(
+ Name=record['name'],
+ HostedZoneConfig={
+ 'Comment': record['comment'] if record['comment'] is not None else "",
+ 'PrivateZone': True,
+ },
+ VPC={
+ 'VPCRegion': record['vpc_region'],
+ 'VPCId': record['vpc_id'],
+ },
+ CallerReference="%s-%s" % (record['name'], time.time()),
+ )
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not create hosted zone")
+
+ hosted_zone = result['HostedZone']
+ zone_id = hosted_zone['Id'].replace('/hostedzone/', '')
+ record['zone_id'] = zone_id
+
+ changed = True
+ return changed, record
+
+
+def create_or_update_public(module, client, matching_zones, record):
+ zone_details, zone_delegation_set_details = None, {}
+ for matching_zone in matching_zones:
+ try:
+ zone = client.get_hosted_zone(Id=matching_zone['Id'])
+ zone_details = zone['HostedZone']
+ zone_delegation_set_details = zone.get('DelegationSet', {})
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % matching_zone['Id'])
+ if 'Comment' in zone_details['Config'] and zone_details['Config']['Comment'] != record['comment']:
+ if not module.check_mode:
+ try:
+ client.update_hosted_zone_comment(
+ Id=zone_details['Id'],
+ Comment=record['comment']
+ )
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not update comment for hosted zone %s" % zone_details['Id'])
+ changed = True
+ else:
+ changed = False
+ break
+
+ if zone_details is None:
+ if not module.check_mode:
+ try:
+ params = dict(
+ Name=record['name'],
+ HostedZoneConfig={
+ 'Comment': record['comment'] if record['comment'] is not None else "",
+ 'PrivateZone': False,
+ },
+ CallerReference="%s-%s" % (record['name'], time.time()),
+ )
+
+ if record.get('delegation_set_id') is not None:
+ params['DelegationSetId'] = record['delegation_set_id']
+
+ result = client.create_hosted_zone(**params)
+ zone_details = result['HostedZone']
+ zone_delegation_set_details = result.get('DelegationSet', {})
+
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not create hosted zone")
+ changed = True
+
+ if module.check_mode:
+ if zone_details:
+ record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '')
+ else:
+ record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '')
+ record['name'] = zone_details['Name']
+ record['delegation_set_id'] = zone_delegation_set_details.get('Id', '').replace('/delegationset/', '')
+
+ return changed, record
+
+
+def delete_private(module, client, matching_zones, vpc_id, vpc_region):
+ for z in matching_zones:
+ try:
+ result = client.get_hosted_zone(Id=z['Id'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % z['Id'])
+ zone_details = result['HostedZone']
+ vpc_details = result['VPCs']
+ if isinstance(vpc_details, dict):
+ if vpc_details['VPC']['VPCId'] == vpc_id and vpc_region == vpc_details['VPC']['VPCRegion']:
+ if not module.check_mode:
+ try:
+ client.delete_hosted_zone(Id=z['Id'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id'])
+ return True, "Successfully deleted %s" % zone_details['Name']
+ else:
+ if vpc_id in [v['VPCId'] for v in vpc_details] and vpc_region in [v['VPCRegion'] for v in vpc_details]:
+ if not module.check_mode:
+ try:
+ client.delete_hosted_zone(Id=z['Id'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id'])
+ return True, "Successfully deleted %s" % zone_details['Name']
+
+ return False, "The vpc_id and the vpc_region do not match a private hosted zone."
+
+
+def delete_public(module, client, matching_zones):
+ if len(matching_zones) > 1:
+ changed = False
+ msg = "There are multiple zones that match. Use hosted_zone_id to specify the correct zone."
+ else:
+ if not module.check_mode:
+ try:
+ client.delete_hosted_zone(Id=matching_zones[0]['Id'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not get delete hosted zone %s" % matching_zones[0]['Id'])
+ changed = True
+ msg = "Successfully deleted %s" % matching_zones[0]['Id']
+ return changed, msg
+
+
+def delete_hosted_id(module, client, hosted_zone_id, matching_zones):
+ if hosted_zone_id == "all":
+ deleted = []
+ for z in matching_zones:
+ deleted.append(z['Id'])
+ if not module.check_mode:
+ try:
+ client.delete_hosted_zone(Id=z['Id'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id'])
+ changed = True
+ msg = "Successfully deleted zones: %s" % deleted
+ elif hosted_zone_id in [zo['Id'].replace('/hostedzone/', '') for zo in matching_zones]:
+ if not module.check_mode:
+ try:
+ client.delete_hosted_zone(Id=hosted_zone_id)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not delete hosted zone %s" % hosted_zone_id)
+ changed = True
+ msg = "Successfully deleted zone: %s" % hosted_zone_id
+ else:
+ changed = False
+ msg = "There is no zone to delete that matches hosted_zone_id %s." % hosted_zone_id
+ return changed, msg
+
+
+def delete(module, client, matching_zones):
+ zone_in = module.params.get('zone').lower()
+ vpc_id = module.params.get('vpc_id')
+ vpc_region = module.params.get('vpc_region')
+ hosted_zone_id = module.params.get('hosted_zone_id')
+
+ if not zone_in.endswith('.'):
+ zone_in += "."
+
+ private_zone = bool(vpc_id and vpc_region)
+
+ if zone_in in [z['Name'] for z in matching_zones]:
+ if hosted_zone_id:
+ changed, result = delete_hosted_id(module, client, hosted_zone_id, matching_zones)
+ else:
+ if private_zone:
+ changed, result = delete_private(module, client, matching_zones, vpc_id, vpc_region)
+ else:
+ changed, result = delete_public(module, client, matching_zones)
+ else:
+ changed = False
+ result = "No zone to delete."
+
+ return changed, result
+
+
+def main():
+ argument_spec = dict(
+ zone=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ vpc_id=dict(default=None),
+ vpc_region=dict(default=None),
+ comment=dict(default=''),
+ hosted_zone_id=dict(),
+ delegation_set_id=dict(),
+ )
+
+ mutually_exclusive = [
+ ['delegation_set_id', 'vpc_id'],
+ ['delegation_set_id', 'vpc_region'],
+ ]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ )
+
+ zone_in = module.params.get('zone').lower()
+ state = module.params.get('state').lower()
+ vpc_id = module.params.get('vpc_id')
+ vpc_region = module.params.get('vpc_region')
+
+ if not zone_in.endswith('.'):
+ zone_in += "."
+
+ private_zone = bool(vpc_id and vpc_region)
+
+ client = module.client('route53')
+
+ zones = find_zones(module, client, zone_in, private_zone)
+ if state == 'present':
+ changed, result = create(module, client, matching_zones=zones)
+ elif state == 'absent':
+ changed, result = delete(module, client, matching_zones=zones)
+
+ if isinstance(result, dict):
+ module.exit_json(changed=changed, result=result, **result)
+ else:
+ module.exit_json(changed=changed, result=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/s3_bucket_notification.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/s3_bucket_notification.py
new file mode 100644
index 00000000..f42c64a0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/s3_bucket_notification.py
@@ -0,0 +1,259 @@
+#!/usr/bin/python
+# (c) 2019, XLAB d.o.o <www.xlab.si>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: s3_bucket_notification
+version_added: 1.0.0
+short_description: Creates, updates or deletes S3 Bucket notification for lambda
+description:
+ - This module allows the management of AWS Lambda function bucket event mappings via the
+ Ansible framework. Use module M(community.aws.lambda) to manage the lambda function itself, M(community.aws.lambda_alias)
+ to manage function aliases and M(community.aws.lambda_policy) to modify lambda permissions.
+notes:
+ - This module heavily depends on M(community.aws.lambda_policy) as you need to allow C(lambda:InvokeFunction)
+ permission for your lambda function.
+
+author:
+ - XLAB d.o.o. (@xlab-si)
+ - Aljaz Kosir (@aljazkosir)
+ - Miha Plesko (@miha-plesko)
+options:
+ event_name:
+ description:
+ - Unique name for event notification on bucket.
+ required: true
+ type: str
+ lambda_function_arn:
+ description:
+ - The ARN of the lambda function.
+ aliases: ['function_arn']
+ type: str
+ bucket_name:
+ description:
+ - S3 bucket name.
+ required: true
+ type: str
+ state:
+ description:
+ - Describes the desired state.
+ default: "present"
+ choices: ["present", "absent"]
+ type: str
+ lambda_alias:
+ description:
+ - Name of the Lambda function alias.
+ - Mutually exclusive with I(lambda_version).
+ type: str
+ lambda_version:
+ description:
+ - Version of the Lambda function.
+ - Mutually exclusive with I(lambda_alias).
+ type: int
+ events:
+ description:
+ - Events that you want to be triggering notifications. You can select multiple events to send
+ to the same destination, you can set up different events to send to different destinations,
+ and you can set up a prefix or suffix for an event. However, for each bucket,
+ individual events cannot have multiple configurations with overlapping prefixes or
+ suffixes that could match the same object key.
+ - Required when I(state=present).
+ choices: ['s3:ObjectCreated:*', 's3:ObjectCreated:Put', 's3:ObjectCreated:Post',
+ 's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload',
+ 's3:ObjectRemoved:*', 's3:ObjectRemoved:Delete',
+ 's3:ObjectRemoved:DeleteMarkerCreated', 's3:ObjectRestore:Post',
+ 's3:ObjectRestore:Completed', 's3:ReducedRedundancyLostObject']
+ type: list
+ elements: str
+ prefix:
+ description:
+ - Optional prefix to limit the notifications to objects with keys that start with matching
+ characters.
+ type: str
+ suffix:
+ description:
+ - Optional suffix to limit the notifications to objects with keys that end with matching
+ characters.
+ type: str
+requirements:
+ - boto3
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+---
+# Example that creates a lambda event notification for a bucket
+- name: Process jpg image
+ community.aws.s3_bucket_notification:
+ state: present
+ event_name: on_file_add_or_remove
+ bucket_name: test-bucket
+ function_name: arn:aws:lambda:us-east-2:526810320200:function:test-lambda
+ events: ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"]
+ prefix: images/
+ suffix: .jpg
+'''
+
+RETURN = r'''
+notification_configuration:
+ description: list of currently applied notifications
+ returned: success
+ type: list
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # will be protected by AnsibleAWSModule
+
+
+class AmazonBucket:
+ def __init__(self, client, bucket_name):
+ self.client = client
+ self.bucket_name = bucket_name
+ self._full_config_cache = None
+
+ def full_config(self):
+ if self._full_config_cache is None:
+ self._full_config_cache = [Config.from_api(cfg) for cfg in
+ self.client.get_bucket_notification_configuration(
+ Bucket=self.bucket_name).get(
+ 'LambdaFunctionConfigurations', list())]
+ return self._full_config_cache
+
+ def current_config(self, config_name):
+ for config in self.full_config():
+ if config.raw['Id'] == config_name:
+ return config
+
+ def apply_config(self, desired):
+ configs = [cfg.raw for cfg in self.full_config() if cfg.name != desired.raw['Id']]
+ configs.append(desired.raw)
+ self._upload_bucket_config(configs)
+ return configs
+
+ def delete_config(self, desired):
+ configs = [cfg.raw for cfg in self.full_config() if cfg.name != desired.raw['Id']]
+ self._upload_bucket_config(configs)
+ return configs
+
+ def _upload_bucket_config(self, config):
+ self.client.put_bucket_notification_configuration(
+ Bucket=self.bucket_name,
+ NotificationConfiguration={
+ 'LambdaFunctionConfigurations': config
+ })
+
+
+class Config:
+ def __init__(self, content):
+ self._content = content
+ self.name = content['Id']
+
+ @property
+ def raw(self):
+ return self._content
+
+ def __eq__(self, other):
+ if other:
+ return self.raw == other.raw
+ return False
+
+ @classmethod
+ def from_params(cls, **params):
+ function_arn = params['lambda_function_arn']
+
+ qualifier = None
+ if params['lambda_version'] > 0:
+ qualifier = str(params['lambda_version'])
+ elif params['lambda_alias']:
+ qualifier = str(params['lambda_alias'])
+ if qualifier:
+ params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier)
+
+ return cls({
+ 'Id': params['event_name'],
+ 'LambdaFunctionArn': params['lambda_function_arn'],
+ 'Events': sorted(params['events']),
+ 'Filter': {
+ 'Key': {
+ 'FilterRules': [{
+ 'Name': 'Prefix',
+ 'Value': params['prefix']
+ }, {
+ 'Name': 'Suffix',
+ 'Value': params['suffix']
+ }]
+ }
+ }
+ })
+
+ @classmethod
+ def from_api(cls, config):
+ return cls(config)
+
+
+def main():
+ event_types = ['s3:ObjectCreated:*', 's3:ObjectCreated:Put', 's3:ObjectCreated:Post',
+ 's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload',
+ 's3:ObjectRemoved:*', 's3:ObjectRemoved:Delete',
+ 's3:ObjectRemoved:DeleteMarkerCreated', 's3:ObjectRestore:Post',
+ 's3:ObjectRestore:Completed', 's3:ReducedRedundancyLostObject']
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ event_name=dict(required=True),
+ lambda_function_arn=dict(aliases=['function_arn']),
+ bucket_name=dict(required=True),
+ events=dict(type='list', default=[], choices=event_types, elements='str'),
+ prefix=dict(default=''),
+ suffix=dict(default=''),
+ lambda_alias=dict(),
+ lambda_version=dict(type='int', default=0),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['lambda_alias', 'lambda_version']],
+ required_if=[['state', 'present', ['events']]]
+ )
+
+ bucket = AmazonBucket(module.client('s3'), module.params['bucket_name'])
+ current = bucket.current_config(module.params['event_name'])
+ desired = Config.from_params(**module.params)
+ notification_configuration = [cfg.raw for cfg in bucket.full_config()]
+
+ state = module.params['state']
+ try:
+ if (state == 'present' and current == desired) or (state == 'absent' and not current):
+ changed = False
+ elif module.check_mode:
+ changed = True
+ elif state == 'present':
+ changed = True
+ notification_configuration = bucket.apply_config(desired)
+ elif state == 'absent':
+ changed = True
+ notification_configuration = bucket.delete_config(desired)
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json(msg='{0}'.format(e))
+
+ module.exit_json(**dict(changed=changed,
+ notification_configuration=[camel_dict_to_snake_dict(cfg) for cfg in
+ notification_configuration]))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/s3_lifecycle.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/s3_lifecycle.py
new file mode 100644
index 00000000..5edceea5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/s3_lifecycle.py
@@ -0,0 +1,513 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: s3_lifecycle
+version_added: 1.0.0
+short_description: Manage s3 bucket lifecycle rules in AWS
+description:
+ - Manage s3 bucket lifecycle rules in AWS
+author: "Rob White (@wimnat)"
+notes:
+ - If specifying expiration time as days then transition time must also be specified in days
+ - If specifying expiration time as a date then transition time must also be specified as a date
+requirements:
+ - python-dateutil
+options:
+ name:
+ description:
+ - "Name of the s3 bucket"
+ required: true
+ type: str
+ expiration_date:
+ description:
+ - >
+ Indicates the lifetime of the objects that are subject to the rule by the date they will expire. The value must be ISO-8601 format, the time must
+ be midnight and a GMT timezone must be specified.
+ type: str
+ expiration_days:
+ description:
+ - "Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer."
+ type: int
+ prefix:
+ description:
+ - "Prefix identifying one or more objects to which the rule applies. If no prefix is specified, the rule will apply to the whole bucket."
+ type: str
+ purge_transitions:
+ description:
+ - >
+ "Whether to replace all the current transition(s) with the new transition(s). When false, the provided transition(s)
+ will be added, replacing transitions with the same storage_class. When true, existing transitions will be removed and
+ replaced with the new transition(s)
+ default: true
+ type: bool
+ noncurrent_version_expiration_days:
+ description:
+ - 'Delete noncurrent versions this many days after they become noncurrent'
+ required: false
+ type: int
+ noncurrent_version_storage_class:
+ description:
+ - 'Transition noncurrent versions to this storage class'
+ default: glacier
+ choices: ['glacier', 'onezone_ia', 'standard_ia', 'intelligent_tiering', 'deep_archive']
+ required: false
+ type: str
+ noncurrent_version_transition_days:
+ description:
+ - 'Transition noncurrent versions this many days after they become noncurrent'
+ required: false
+ type: int
+ noncurrent_version_transitions:
+ description:
+ - >
+ A list of transition behaviors to be applied to noncurrent versions for the rule. Each storage class may be used only once. Each transition
+ behavior contains these elements
+ I(transition_days)
+ I(storage_class)
+ type: list
+ elements: dict
+ rule_id:
+ description:
+ - "Unique identifier for the rule. The value cannot be longer than 255 characters. A unique value for the rule will be generated if no value is provided."
+ type: str
+ state:
+ description:
+ - "Create or remove the lifecycle rule"
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ status:
+ description:
+ - "If 'enabled', the rule is currently being applied. If 'disabled', the rule is not currently being applied."
+ default: enabled
+ choices: [ 'enabled', 'disabled' ]
+ type: str
+ storage_class:
+ description:
+ - "The storage class to transition to."
+ - "The 'standard_ia' class is only being available from Ansible version 2.2."
+ default: glacier
+ choices: [ 'glacier', 'onezone_ia', 'standard_ia', 'intelligent_tiering', 'deep_archive']
+ type: str
+ transition_date:
+ description:
+ - >
+ Indicates the lifetime of the objects that are subject to the rule by the date they will transition to a different storage class.
+ The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified. If transition_days is not specified,
+ this parameter is required."
+ type: str
+ transition_days:
+ description:
+ - "Indicates when, in days, an object transitions to a different storage class. If transition_date is not specified, this parameter is required."
+ type: int
+ transitions:
+ description:
+ - A list of transition behaviors to be applied to the rule. Each storage class may be used only once. Each transition
+ behavior may contain these elements
+ I(transition_days)
+ I(transition_date)
+ I(storage_class)
+ type: list
+ elements: dict
+ requester_pays:
+ description:
+ - The I(requester_pays) option does nothing and will be removed after 2022-06-01
+ type: bool
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Configure a lifecycle rule on a bucket to expire (delete) items with a prefix of /logs/ after 30 days
+ community.aws.s3_lifecycle:
+ name: mybucket
+ expiration_days: 30
+ prefix: logs/
+ status: enabled
+ state: present
+
+- name: Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier after 7 days and then delete after 90 days
+ community.aws.s3_lifecycle:
+ name: mybucket
+ transition_days: 7
+ expiration_days: 90
+ prefix: logs/
+ status: enabled
+ state: present
+
+# Note that midnight GMT must be specified.
+# Be sure to quote your date strings
+- name: Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier on 31 Dec 2020 and then delete on 31 Dec 2030.
+ community.aws.s3_lifecycle:
+ name: mybucket
+ transition_date: "2020-12-30T00:00:00.000Z"
+ expiration_date: "2030-12-30T00:00:00.000Z"
+ prefix: logs/
+ status: enabled
+ state: present
+
+- name: Disable the rule created above
+ community.aws.s3_lifecycle:
+ name: mybucket
+ prefix: logs/
+ status: disabled
+ state: present
+
+- name: Delete the lifecycle rule created above
+ community.aws.s3_lifecycle:
+ name: mybucket
+ prefix: logs/
+ state: absent
+
+- name: Configure a lifecycle rule to transition all backup files older than 31 days in /backups/ to standard infrequent access class.
+ community.aws.s3_lifecycle:
+ name: mybucket
+ prefix: backups/
+ storage_class: standard_ia
+ transition_days: 31
+ state: present
+ status: enabled
+
+- name: Configure a lifecycle rule to transition files to infrequent access after 30 days and glacier after 90
+ community.aws.s3_lifecycle:
+ name: mybucket
+ prefix: logs/
+ state: present
+ status: enabled
+ transitions:
+ - transition_days: 30
+ storage_class: standard_ia
+ - transition_days: 90
+ storage_class: glacier
+'''
+
+from copy import deepcopy
+import datetime
+
+try:
+ import dateutil.parser
+ HAS_DATEUTIL = True
+except ImportError:
+ HAS_DATEUTIL = False
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # handled by AnsibleAwsModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+def create_lifecycle_rule(client, module):
+
+ name = module.params.get("name")
+ expiration_date = module.params.get("expiration_date")
+ expiration_days = module.params.get("expiration_days")
+ noncurrent_version_expiration_days = module.params.get("noncurrent_version_expiration_days")
+ noncurrent_version_transition_days = module.params.get("noncurrent_version_transition_days")
+ noncurrent_version_transitions = module.params.get("noncurrent_version_transitions")
+ noncurrent_version_storage_class = module.params.get("noncurrent_version_storage_class")
+ prefix = module.params.get("prefix") or ""
+ rule_id = module.params.get("rule_id")
+ status = module.params.get("status")
+ storage_class = module.params.get("storage_class")
+ transition_date = module.params.get("transition_date")
+ transition_days = module.params.get("transition_days")
+ transitions = module.params.get("transitions")
+ purge_transitions = module.params.get("purge_transitions")
+ changed = False
+
+ # Get the bucket's current lifecycle rules
+ try:
+ current_lifecycle = client.get_bucket_lifecycle_configuration(Bucket=name)
+ current_lifecycle_rules = current_lifecycle['Rules']
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchLifecycleConfiguration':
+ current_lifecycle_rules = []
+ else:
+ module.fail_json_aws(e)
+ except BotoCoreError as e:
+ module.fail_json_aws(e)
+
+ rule = dict(Filter=dict(Prefix=prefix), Status=status.title())
+ if rule_id is not None:
+ rule['ID'] = rule_id
+ # Create expiration
+ if expiration_days is not None:
+ rule['Expiration'] = dict(Days=expiration_days)
+ elif expiration_date is not None:
+ rule['Expiration'] = dict(Date=expiration_date)
+
+ if noncurrent_version_expiration_days is not None:
+ rule['NoncurrentVersionExpiration'] = dict(NoncurrentDays=noncurrent_version_expiration_days)
+
+ if transition_days is not None:
+ rule['Transitions'] = [dict(Days=transition_days, StorageClass=storage_class.upper()), ]
+
+ elif transition_date is not None:
+ rule['Transitions'] = [dict(Date=transition_date, StorageClass=storage_class.upper()), ]
+
+ if transitions is not None:
+ if not rule.get('Transitions'):
+ rule['Transitions'] = []
+ for transition in transitions:
+ t_out = dict()
+ if transition.get('transition_date'):
+ t_out['Date'] = transition['transition_date']
+ elif transition.get('transition_days'):
+ t_out['Days'] = transition['transition_days']
+ if transition.get('storage_class'):
+ t_out['StorageClass'] = transition['storage_class'].upper()
+ rule['Transitions'].append(t_out)
+
+ if noncurrent_version_transition_days is not None:
+ rule['NoncurrentVersionTransitions'] = [dict(NoncurrentDays=noncurrent_version_transition_days,
+ StorageClass=noncurrent_version_storage_class.upper()), ]
+
+ if noncurrent_version_transitions is not None:
+ if not rule.get('NoncurrentVersionTransitions'):
+ rule['NoncurrentVersionTransitions'] = []
+ for noncurrent_version_transition in noncurrent_version_transitions:
+ t_out = dict()
+ t_out['NoncurrentDays'] = noncurrent_version_transition['transition_days']
+ if noncurrent_version_transition.get('storage_class'):
+ t_out['StorageClass'] = noncurrent_version_transition['storage_class'].upper()
+ rule['NoncurrentVersionTransitions'].append(t_out)
+
+ lifecycle_configuration = dict(Rules=[])
+ appended = False
+ # If current_lifecycle_obj is not None then we have rules to compare, otherwise just add the rule
+ if current_lifecycle_rules:
+ # If rule ID exists, use that for comparison otherwise compare based on prefix
+ for existing_rule in current_lifecycle_rules:
+ if rule.get('ID') == existing_rule.get('ID') and rule['Filter']['Prefix'] != existing_rule.get('Filter', {}).get('Prefix', ''):
+ existing_rule.pop('ID')
+ elif rule_id is None and rule['Filter']['Prefix'] == existing_rule.get('Filter', {}).get('Prefix', ''):
+ existing_rule.pop('ID')
+ if rule.get('ID') == existing_rule.get('ID'):
+ changed_, appended_ = update_or_append_rule(rule, existing_rule, purge_transitions, lifecycle_configuration)
+ changed = changed_ or changed
+ appended = appended_ or appended
+ else:
+ lifecycle_configuration['Rules'].append(existing_rule)
+
+ # If nothing appended then append now as the rule must not exist
+ if not appended:
+ lifecycle_configuration['Rules'].append(rule)
+ changed = True
+ else:
+ lifecycle_configuration['Rules'].append(rule)
+ changed = True
+
+ # Write lifecycle to bucket
+ try:
+ client.put_bucket_lifecycle_configuration(Bucket=name, LifecycleConfiguration=lifecycle_configuration)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e)
+
+ module.exit_json(changed=changed)
+
+
+def update_or_append_rule(new_rule, existing_rule, purge_transitions, lifecycle_obj):
+ changed = False
+ if existing_rule['Status'] != new_rule['Status']:
+ if not new_rule.get('Transitions') and existing_rule.get('Transitions'):
+ new_rule['Transitions'] = existing_rule['Transitions']
+ if not new_rule.get('Expiration') and existing_rule.get('Expiration'):
+ new_rule['Expiration'] = existing_rule['Expiration']
+ if not new_rule.get('NoncurrentVersionExpiration') and existing_rule.get('NoncurrentVersionExpiration'):
+ new_rule['NoncurrentVersionExpiration'] = existing_rule['NoncurrentVersionExpiration']
+ lifecycle_obj['Rules'].append(new_rule)
+ changed = True
+ appended = True
+ else:
+ if not purge_transitions:
+ merge_transitions(new_rule, existing_rule)
+ if compare_rule(new_rule, existing_rule, purge_transitions):
+ lifecycle_obj['Rules'].append(new_rule)
+ appended = True
+ else:
+ lifecycle_obj['Rules'].append(new_rule)
+ changed = True
+ appended = True
+ return changed, appended
+
+
+def compare_rule(rule_a, rule_b, purge_transitions):
+
+ # Copy objects
+ rule1 = deepcopy(rule_a)
+ rule2 = deepcopy(rule_b)
+
+ if purge_transitions:
+ return rule1 == rule2
+ else:
+ transitions1 = rule1.pop('Transitions', [])
+ transitions2 = rule2.pop('Transitions', [])
+ noncurrent_transtions1 = rule1.pop('NoncurrentVersionTransitions', [])
+ noncurrent_transtions2 = rule2.pop('NoncurrentVersionTransitions', [])
+ if rule1 != rule2:
+ return False
+ for transition in transitions1:
+ if transition not in transitions2:
+ return False
+ for transition in noncurrent_transtions1:
+ if transition not in noncurrent_transtions2:
+ return False
+ return True
+
+
+def merge_transitions(updated_rule, updating_rule):
+ # because of the legal s3 transitions, we know only one can exist for each storage class.
+ # So, our strategy is build some dicts, keyed on storage class and add the storage class transitions that are only
+ # in updating_rule to updated_rule
+ updated_transitions = {}
+ updating_transitions = {}
+ for transition in updated_rule.get('Transitions', []):
+ updated_transitions[transition['StorageClass']] = transition
+ for transition in updating_rule.get('Transitions', []):
+ updating_transitions[transition['StorageClass']] = transition
+ for storage_class, transition in updating_transitions.items():
+ if updated_transitions.get(storage_class) is None:
+ updated_rule['Transitions'].append(transition)
+
+
+def destroy_lifecycle_rule(client, module):
+
+ name = module.params.get("name")
+ prefix = module.params.get("prefix")
+ rule_id = module.params.get("rule_id")
+ changed = False
+
+ if prefix is None:
+ prefix = ""
+
+ # Get the bucket's current lifecycle rules
+ try:
+ current_lifecycle_rules = client.get_bucket_lifecycle_configuration(Bucket=name)['Rules']
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchLifecycleConfiguration':
+ current_lifecycle_rules = []
+ else:
+ module.fail_json_aws(e)
+ except BotoCoreError as e:
+ module.fail_json_aws(e)
+
+ # Create lifecycle
+ lifecycle_obj = dict(Rules=[])
+
+ # Check if rule exists
+ # If an ID exists, use that otherwise compare based on prefix
+ if rule_id is not None:
+ for existing_rule in current_lifecycle_rules:
+ if rule_id == existing_rule['ID']:
+ # We're not keeping the rule (i.e. deleting) so mark as changed
+ changed = True
+ else:
+ lifecycle_obj['Rules'].append(existing_rule)
+ else:
+ for existing_rule in current_lifecycle_rules:
+ if prefix == existing_rule['Filter']['Prefix']:
+ # We're not keeping the rule (i.e. deleting) so mark as changed
+ changed = True
+ else:
+ lifecycle_obj['Rules'].append(existing_rule)
+
+ # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration
+ try:
+ if lifecycle_obj['Rules']:
+ client.put_bucket_lifecycle_configuration(Bucket=name, LifecycleConfiguration=lifecycle_obj)
+ elif current_lifecycle_rules:
+ changed = True
+ client.delete_bucket_lifecycle(Bucket=name)
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e)
+ module.exit_json(changed=changed)
+
+
+def main():
+ s3_storage_class = ['glacier', 'onezone_ia', 'standard_ia', 'intelligent_tiering', 'deep_archive']
+ argument_spec = dict(
+ name=dict(required=True, type='str'),
+ expiration_days=dict(type='int'),
+ expiration_date=dict(),
+ noncurrent_version_expiration_days=dict(type='int'),
+ noncurrent_version_storage_class=dict(default='glacier', type='str', choices=s3_storage_class),
+ noncurrent_version_transition_days=dict(type='int'),
+ noncurrent_version_transitions=dict(type='list', elements='dict'),
+ prefix=dict(),
+ requester_pays=dict(type='bool', removed_at_date='2022-06-01', removed_from_collection='community.aws'),
+ rule_id=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ status=dict(default='enabled', choices=['enabled', 'disabled']),
+ storage_class=dict(default='glacier', type='str', choices=s3_storage_class),
+ transition_days=dict(type='int'),
+ transition_date=dict(),
+ transitions=dict(type='list', elements='dict'),
+ purge_transitions=dict(default='yes', type='bool')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['expiration_days', 'expiration_date'],
+ ['expiration_days', 'transition_date'],
+ ['transition_days', 'transition_date'],
+ ['transition_days', 'expiration_date'],
+ ['transition_days', 'transitions'],
+ ['transition_date', 'transitions'],
+ ['noncurrent_version_transition_days', 'noncurrent_version_transitions'],
+ ],)
+
+ if not HAS_DATEUTIL:
+ module.fail_json(msg='dateutil required for this module')
+
+ client = module.client('s3')
+
+ expiration_date = module.params.get("expiration_date")
+ transition_date = module.params.get("transition_date")
+ state = module.params.get("state")
+
+ if state == 'present' and module.params["status"] == "enabled": # allow deleting/disabling a rule by id/prefix
+
+ required_when_present = ('expiration_date', 'expiration_days', 'transition_date',
+ 'transition_days', 'transitions', 'noncurrent_version_expiration_days',
+ 'noncurrent_version_transition_days',
+ 'noncurrent_version_transitions')
+ for param in required_when_present:
+ if module.params.get(param):
+ break
+ else:
+ msg = "one of the following is required when 'state' is 'present': %s" % ', '.join(required_when_present)
+ module.fail_json(msg=msg)
+ # If expiration_date set, check string is valid
+ if expiration_date is not None:
+ try:
+ datetime.datetime.strptime(expiration_date, "%Y-%m-%dT%H:%M:%S.000Z")
+ except ValueError as e:
+ module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included")
+
+ if transition_date is not None:
+ try:
+ datetime.datetime.strptime(transition_date, "%Y-%m-%dT%H:%M:%S.000Z")
+ except ValueError as e:
+ module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included")
+
+ if state == 'present':
+ create_lifecycle_rule(client, module)
+ elif state == 'absent':
+ destroy_lifecycle_rule(client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/s3_logging.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/s3_logging.py
new file mode 100644
index 00000000..24f4004e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/s3_logging.py
@@ -0,0 +1,173 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: s3_logging
+version_added: 1.0.0
+short_description: Manage logging facility of an s3 bucket in AWS
+description:
+ - Manage logging facility of an s3 bucket in AWS
+author: Rob White (@wimnat)
+options:
+ name:
+ description:
+ - "Name of the s3 bucket."
+ required: true
+ type: str
+ state:
+ description:
+ - "Enable or disable logging."
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ target_bucket:
+ description:
+ - "The bucket to log to. Required when state=present."
+ type: str
+ target_prefix:
+ description:
+ - "The prefix that should be prepended to the generated log files written to the target_bucket."
+ default: ""
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Enable logging of s3 bucket mywebsite.com to s3 bucket mylogs
+ community.aws.s3_logging:
+ name: mywebsite.com
+ target_bucket: mylogs
+ target_prefix: logs/mywebsite.com
+ state: present
+
+- name: Remove logging on an s3 bucket
+ community.aws.s3_logging:
+ name: mywebsite.com
+ state: absent
+
+'''
+
+try:
+ import boto.ec2
+ from boto.s3.connection import OrdinaryCallingFormat, Location
+ from boto.exception import S3ResponseError
+except ImportError:
+ pass # Handled by HAS_BOTO
+
+from ansible.module_utils._text import to_native
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO
+
+
+def compare_bucket_logging(bucket, target_bucket, target_prefix):
+
+ bucket_log_obj = bucket.get_logging_status()
+ if bucket_log_obj.target != target_bucket or bucket_log_obj.prefix != target_prefix:
+ return False
+ else:
+ return True
+
+
+def enable_bucket_logging(connection, module):
+
+ bucket_name = module.params.get("name")
+ target_bucket = module.params.get("target_bucket")
+ target_prefix = module.params.get("target_prefix")
+ changed = False
+
+ try:
+ bucket = connection.get_bucket(bucket_name)
+ except S3ResponseError as e:
+ module.fail_json(msg=to_native(e))
+
+ try:
+ if not compare_bucket_logging(bucket, target_bucket, target_prefix):
+ # Before we can enable logging we must give the log-delivery group WRITE and READ_ACP permissions to the target bucket
+ try:
+ target_bucket_obj = connection.get_bucket(target_bucket)
+ except S3ResponseError as e:
+ if e.status == 301:
+ module.fail_json(msg="the logging target bucket must be in the same region as the bucket being logged")
+ else:
+ module.fail_json(msg=to_native(e))
+ target_bucket_obj.set_as_logging_target()
+
+ bucket.enable_logging(target_bucket, target_prefix)
+ changed = True
+
+ except S3ResponseError as e:
+ module.fail_json(msg=to_native(e))
+
+ module.exit_json(changed=changed)
+
+
+def disable_bucket_logging(connection, module):
+
+ bucket_name = module.params.get("name")
+ changed = False
+
+ try:
+ bucket = connection.get_bucket(bucket_name)
+ if not compare_bucket_logging(bucket, None, None):
+ bucket.disable_logging()
+ changed = True
+ except S3ResponseError as e:
+ module.fail_json(msg=to_native(e))
+
+ module.exit_json(changed=changed)
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(required=True),
+ target_bucket=dict(required=False, default=None),
+ target_prefix=dict(required=False, default=""),
+ state=dict(required=False, default='present', choices=['present', 'absent']),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region in ('us-east-1', '', None):
+ # S3ism for the US Standard region
+ location = Location.DEFAULT
+ else:
+ # Boto uses symbolic names for locations but region strings will
+ # actually work fine for everything except us-east-1 (US Standard)
+ location = region
+ try:
+ connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params)
+ # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
+ if connection is None:
+ connection = boto.connect_s3(**aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ module.fail_json(msg=str(e))
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ enable_bucket_logging(connection, module)
+ elif state == 'absent':
+ disable_bucket_logging(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/s3_metrics_configuration.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/s3_metrics_configuration.py
new file mode 100644
index 00000000..729503cf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/s3_metrics_configuration.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: s3_metrics_configuration
+version_added: 1.3.0
+short_description: Manage s3 bucket metrics configuration in AWS
+description:
+ - Manage s3 bucket metrics configuration in AWS which allows to get the CloudWatch request metrics for the objects in a bucket
+author: Dmytro Vorotyntsev (@vorotech)
+notes:
+ - This modules manages single metrics configuration, the s3 bucket might have up to 1,000 metrics configurations
+ - To request metrics for the entire bucket, create a metrics configuration without a filter
+ - Metrics configurations are necessary only to enable request metric, bucket-level daily storage metrics are always turned on
+options:
+ bucket_name:
+ description:
+ - "Name of the s3 bucket"
+ required: true
+ type: str
+ id:
+ description:
+ - "The ID used to identify the metrics configuration"
+ required: true
+ type: str
+ filter_prefix:
+ description:
+ - "A prefix used when evaluating a metrics filter"
+ required: false
+ type: str
+ filter_tags:
+ description:
+ - "A dictionary of one or more tags used when evaluating a metrics filter"
+ required: false
+ aliases: ['filter_tag']
+ type: dict
+ state:
+ description:
+ - "Create or delete metrics configuration"
+ default: present
+ choices: ['present', 'absent']
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create a metrics configuration that enables metrics for an entire bucket
+ community.aws.s3_metrics_configuration:
+ bucket_name: my-bucket
+ id: EntireBucket
+ state: present
+
+- name: Put a metrics configuration that enables metrics for objects starting with a prefix
+ community.aws.s3_metrics_configuration:
+ bucket_name: my-bucket
+ id: Assets
+ filter_prefix: assets
+ state: present
+
+- name: Put a metrics configuration that enables metrics for objects with specific tag
+ community.aws.s3_metrics_configuration:
+ bucket_name: my-bucket
+ id: Assets
+ filter_tag:
+ kind: asset
+ state: present
+
+- name: Put a metrics configuration that enables metrics for objects that start with a particular prefix and have specific tags applied
+ community.aws.s3_metrics_configuration:
+ bucket_name: my-bucket
+ id: ImportantBlueDocuments
+ filter_prefix: documents
+ filter_tags:
+ priority: high
+ class: blue
+ state: present
+
+- name: Delete metrics configuration
+ community.aws.s3_metrics_configuration:
+ bucket_name: my-bucket
+ id: EntireBucket
+ state: absent
+
+'''
+
+try:
+ import boto3
+ import botocore
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_tag_list
+
+
+def _create_metrics_configuration(mc_id, filter_prefix, filter_tags):
+ payload = {
+ 'Id': mc_id
+ }
+ # Just a filter_prefix or just a single tag filter is a special case
+ if filter_prefix and not filter_tags:
+ payload['Filter'] = {
+ 'Prefix': filter_prefix
+ }
+ elif not filter_prefix and len(filter_tags) == 1:
+ payload['Filter'] = {
+ 'Tag': ansible_dict_to_boto3_tag_list(filter_tags)[0]
+ }
+ # Otherwise we need to use 'And'
+ elif filter_tags:
+ payload['Filter'] = {
+ 'And': {
+ 'Tags': ansible_dict_to_boto3_tag_list(filter_tags)
+ }
+ }
+ if filter_prefix:
+ payload['Filter']['And']['Prefix'] = filter_prefix
+
+ return payload
+
+
+def create_or_update_metrics_configuration(client, module):
+ bucket_name = module.params.get('bucket_name')
+ mc_id = module.params.get('id')
+ filter_prefix = module.params.get('filter_prefix')
+ filter_tags = module.params.get('filter_tags')
+
+ try:
+ response = client.get_bucket_metrics_configuration(aws_retry=True, Bucket=bucket_name, Id=mc_id)
+ metrics_configuration = response['MetricsConfiguration']
+ except is_boto3_error_code('NoSuchConfiguration'):
+ metrics_configuration = None
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to get bucket metrics configuration")
+
+ new_configuration = _create_metrics_configuration(mc_id, filter_prefix, filter_tags)
+
+ if metrics_configuration:
+ if metrics_configuration == new_configuration:
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ try:
+ client.put_bucket_metrics_configuration(
+ aws_retry=True,
+ Bucket=bucket_name,
+ Id=mc_id,
+ MetricsConfiguration=new_configuration
+ )
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to put bucket metrics configuration '%s'" % mc_id)
+
+ module.exit_json(changed=True)
+
+
+def delete_metrics_configuration(client, module):
+ bucket_name = module.params.get('bucket_name')
+ mc_id = module.params.get('id')
+
+ try:
+ client.get_bucket_metrics_configuration(aws_retry=True, Bucket=bucket_name, Id=mc_id)
+ except is_boto3_error_code('NoSuchConfiguration'):
+ module.exit_json(changed=False)
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to get bucket metrics configuration")
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ try:
+ client.delete_bucket_metrics_configuration(aws_retry=True, Bucket=bucket_name, Id=mc_id)
+ except is_boto3_error_code('NoSuchConfiguration'):
+ module.exit_json(changed=False)
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to delete bucket metrics configuration '%s'" % mc_id)
+
+ module.exit_json(changed=True)
+
+
+def main():
+ argument_spec = dict(
+ bucket_name=dict(type='str', required=True),
+ id=dict(type='str', required=True),
+ filter_prefix=dict(type='str', required=False),
+ filter_tags=dict(default={}, type='dict', required=False, aliases=['filter_tag']),
+ state=dict(default='present', type='str', choices=['present', 'absent']),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ state = module.params.get('state')
+
+ try:
+ client = module.client('s3', retry_decorator=AWSRetry.exponential_backoff(retries=10, delay=3))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ if state == 'present':
+ create_or_update_metrics_configuration(client, module)
+ elif state == 'absent':
+ delete_metrics_configuration(client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/s3_sync.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/s3_sync.py
new file mode 100644
index 00000000..78326587
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/s3_sync.py
@@ -0,0 +1,557 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: s3_sync
+version_added: 1.0.0
+short_description: Efficiently upload multiple files to S3
+description:
+ - The S3 module is great, but it is very slow for a large volume of files- even a dozen will be noticeable. In addition to speed, it handles globbing,
+ inclusions/exclusions, mime types, expiration mapping, recursion, cache control and smart directory mapping.
+options:
+ mode:
+ description:
+ - sync direction.
+ default: 'push'
+ choices: [ 'push' ]
+ type: str
+ file_change_strategy:
+ description:
+ - Difference determination method to allow changes-only syncing. Unlike rsync, files are not patched- they are fully skipped or fully uploaded.
+ - date_size will upload if file sizes don't match or if local file modified date is newer than s3's version
+ - checksum will compare etag values based on s3's implementation of chunked md5s.
+ - force will always upload all files.
+ required: false
+ default: 'date_size'
+ choices: [ 'force', 'checksum', 'date_size' ]
+ type: str
+ bucket:
+ description:
+ - Bucket name.
+ required: true
+ type: str
+ key_prefix:
+ description:
+ - In addition to file path, prepend s3 path with this prefix. Module will add slash at end of prefix if necessary.
+ required: false
+ type: str
+ file_root:
+ description:
+ - File/directory path for synchronization. This is a local path.
+ - This root path is scrubbed from the key name, so subdirectories will remain as keys.
+ required: true
+ type: path
+ permission:
+ description:
+ - Canned ACL to apply to synced files.
+ - Changing this ACL only changes newly synced files, it does not trigger a full reupload.
+ required: false
+ choices:
+ - 'private'
+ - 'public-read'
+ - 'public-read-write'
+ - 'authenticated-read'
+ - 'aws-exec-read'
+ - 'bucket-owner-read'
+ - 'bucket-owner-full-control'
+ type: str
+ mime_map:
+ description:
+ - >
+ Dict entry from extension to MIME type. This will override any default/sniffed MIME type.
+ For example C({".txt": "application/text", ".yml": "application/text"})
+ required: false
+ type: dict
+ include:
+ description:
+ - Shell pattern-style file matching.
+ - Used before exclude to determine eligible files (for instance, only "*.gif")
+ - For multiple patterns, comma-separate them.
+ required: false
+ default: "*"
+ type: str
+ exclude:
+ description:
+ - Shell pattern-style file matching.
+ - Used after include to remove files (for instance, skip "*.txt")
+ - For multiple patterns, comma-separate them.
+ required: false
+ default: ".*"
+ type: str
+ cache_control:
+ description:
+ - Cache-Control header set on uploaded objects.
+ - Directives are separated by commas.
+ required: false
+ type: str
+ delete:
+ description:
+ - Remove remote files that exist in bucket but are not present in the file root.
+ required: false
+ default: no
+ type: bool
+ retries:
+ description:
+ - The I(retries) option does nothing and will be removed after 2022-06-01
+ type: str
+
+requirements:
+ - boto3 >= 1.4.4
+ - botocore
+ - python-dateutil
+
+author: Ted Timmons (@tedder)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: basic upload
+ community.aws.s3_sync:
+ bucket: tedder
+ file_root: roles/s3/files/
+
+- name: all the options
+ community.aws.s3_sync:
+ bucket: tedder
+ file_root: roles/s3/files
+ mime_map:
+ .yml: application/text
+ .json: application/text
+ key_prefix: config_files/web
+ file_change_strategy: force
+ permission: public-read
+ cache_control: "public, max-age=31536000"
+ include: "*"
+ exclude: "*.txt,.*"
+'''
+
+RETURN = '''
+filelist_initial:
+ description: file listing (dicts) from initial globbing
+ returned: always
+ type: list
+ sample: [{
+ "bytes": 151,
+ "chopped_path": "policy.json",
+ "fullpath": "roles/cf/files/policy.json",
+ "modified_epoch": 1477416706
+ }]
+filelist_local_etag:
+ description: file listing (dicts) including calculated local etag
+ returned: always
+ type: list
+ sample: [{
+ "bytes": 151,
+ "chopped_path": "policy.json",
+ "fullpath": "roles/cf/files/policy.json",
+ "mime_type": "application/json",
+ "modified_epoch": 1477416706,
+ "s3_path": "s3sync/policy.json"
+ }]
+filelist_s3:
+ description: file listing (dicts) including information about previously-uploaded versions
+ returned: always
+ type: list
+ sample: [{
+ "bytes": 151,
+ "chopped_path": "policy.json",
+ "fullpath": "roles/cf/files/policy.json",
+ "mime_type": "application/json",
+ "modified_epoch": 1477416706,
+ "s3_path": "s3sync/policy.json"
+ }]
+filelist_typed:
+ description: file listing (dicts) with calculated or overridden mime types
+ returned: always
+ type: list
+ sample: [{
+ "bytes": 151,
+ "chopped_path": "policy.json",
+ "fullpath": "roles/cf/files/policy.json",
+ "mime_type": "application/json",
+ "modified_epoch": 1477416706
+ }]
+filelist_actionable:
+ description: file listing (dicts) of files that will be uploaded after the strategy decision
+ returned: always
+ type: list
+ sample: [{
+ "bytes": 151,
+ "chopped_path": "policy.json",
+ "fullpath": "roles/cf/files/policy.json",
+ "mime_type": "application/json",
+ "modified_epoch": 1477931256,
+ "s3_path": "s3sync/policy.json",
+ "whysize": "151 / 151",
+ "whytime": "1477931256 / 1477929260"
+ }]
+uploaded:
+ description: file listing (dicts) of files that were actually uploaded
+ returned: always
+ type: list
+ sample: [{
+ "bytes": 151,
+ "chopped_path": "policy.json",
+ "fullpath": "roles/cf/files/policy.json",
+ "s3_path": "s3sync/policy.json",
+ "whysize": "151 / 151",
+ "whytime": "1477931637 / 1477931489"
+ }]
+
+'''
+
+import datetime
+import fnmatch
+import hashlib
+import mimetypes
+import os
+import stat as osstat # os.stat constants
+import traceback
+
+try:
+ from dateutil import tz
+ HAS_DATEUTIL = True
+except ImportError:
+ HAS_DATEUTIL = False
+
+try:
+ import botocore
+ from boto3.s3.transfer import TransferConfig
+ DEFAULT_CHUNK_SIZE = TransferConfig().multipart_chunksize
+except ImportError:
+ DEFAULT_CHUNK_SIZE = 5 * 1024 * 1024
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_text
+
+# import module snippets
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto_exception
+
+
+# the following function, calculate_multipart_etag, is from tlastowka
+# on github and is used under its (compatible) GPL license. So this
+# license applies to the following function.
+# source: https://github.com/tlastowka/calculate_multipart_etag/blob/master/calculate_multipart_etag.py
+#
+# calculate_multipart_etag Copyright (C) 2015
+# Tony Lastowka <tlastowka at gmail dot com>
+# https://github.com/tlastowka
+#
+#
+# calculate_multipart_etag is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# calculate_multipart_etag is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with calculate_multipart_etag. If not, see <http://www.gnu.org/licenses/>.
+def calculate_multipart_etag(source_path, chunk_size=DEFAULT_CHUNK_SIZE):
+ """
+ calculates a multipart upload etag for amazon s3
+
+ Arguments:
+
+ source_path -- The file to calculate the etag for
+ chunk_size -- The chunk size to calculate for.
+ """
+
+ md5s = []
+
+ with open(source_path, 'rb') as fp:
+ while True:
+
+ data = fp.read(chunk_size)
+
+ if not data:
+ break
+ md5s.append(hashlib.md5(data))
+
+ if len(md5s) == 1:
+ new_etag = '"{0}"'.format(md5s[0].hexdigest())
+ else: # > 1
+ digests = b"".join(m.digest() for m in md5s)
+
+ new_md5 = hashlib.md5(digests)
+ new_etag = '"{0}-{1}"'.format(new_md5.hexdigest(), len(md5s))
+
+ return new_etag
+
+
+def gather_files(fileroot, include=None, exclude=None):
+ ret = []
+ for (dirpath, dirnames, filenames) in os.walk(fileroot):
+ for fn in filenames:
+ fullpath = os.path.join(dirpath, fn)
+ # include/exclude
+ if include:
+ found = False
+ for x in include.split(','):
+ if fnmatch.fnmatch(fn, x):
+ found = True
+ if not found:
+ # not on the include list, so we don't want it.
+ continue
+
+ if exclude:
+ found = False
+ for x in exclude.split(','):
+ if fnmatch.fnmatch(fn, x):
+ found = True
+ if found:
+ # skip it, even if previously included.
+ continue
+
+ chopped_path = os.path.relpath(fullpath, start=fileroot)
+ fstat = os.stat(fullpath)
+ f_size = fstat[osstat.ST_SIZE]
+ f_modified_epoch = fstat[osstat.ST_MTIME]
+ ret.append({
+ 'fullpath': fullpath,
+ 'chopped_path': chopped_path,
+ 'modified_epoch': f_modified_epoch,
+ 'bytes': f_size,
+ })
+ # dirpath = path *to* the directory
+ # dirnames = subdirs *in* our directory
+ # filenames
+ return ret
+
+
+def calculate_s3_path(filelist, key_prefix=''):
+ ret = []
+ for fileentry in filelist:
+ # don't modify the input dict
+ retentry = fileentry.copy()
+ retentry['s3_path'] = os.path.join(key_prefix, fileentry['chopped_path'])
+ ret.append(retentry)
+ return ret
+
+
+def calculate_local_etag(filelist, key_prefix=''):
+ '''Really, "calculate md5", but since AWS uses their own format, we'll just call
+ it a "local etag". TODO optimization: only calculate if remote key exists.'''
+ ret = []
+ for fileentry in filelist:
+ # don't modify the input dict
+ retentry = fileentry.copy()
+ retentry['local_etag'] = calculate_multipart_etag(fileentry['fullpath'])
+ ret.append(retentry)
+ return ret
+
+
+def determine_mimetypes(filelist, override_map):
+ ret = []
+ for fileentry in filelist:
+ retentry = fileentry.copy()
+ localfile = fileentry['fullpath']
+
+ # reminder: file extension is '.txt', not 'txt'.
+ file_extension = os.path.splitext(localfile)[1]
+ if override_map and override_map.get(file_extension):
+ # override? use it.
+ retentry['mime_type'] = override_map[file_extension]
+ else:
+ # else sniff it
+ retentry['mime_type'], retentry['encoding'] = mimetypes.guess_type(localfile, strict=False)
+
+ # might be None or '' from one of the above. Not a great type but better than nothing.
+ if not retentry['mime_type']:
+ retentry['mime_type'] = 'application/octet-stream'
+
+ ret.append(retentry)
+
+ return ret
+
+
+def head_s3(s3, bucket, s3keys):
+ retkeys = []
+ for entry in s3keys:
+ retentry = entry.copy()
+ # don't modify the input dict
+ try:
+ retentry['s3_head'] = s3.head_object(Bucket=bucket, Key=entry['s3_path'])
+ except botocore.exceptions.ClientError as err:
+ if (hasattr(err, 'response') and
+ 'ResponseMetadata' in err.response and
+ 'HTTPStatusCode' in err.response['ResponseMetadata'] and
+ str(err.response['ResponseMetadata']['HTTPStatusCode']) == '404'):
+ pass
+ else:
+ raise Exception(err)
+ # error_msg = boto_exception(err)
+ # return {'error': error_msg}
+ retkeys.append(retentry)
+ return retkeys
+
+
+def filter_list(s3, bucket, s3filelist, strategy):
+ keeplist = list(s3filelist)
+
+ for e in keeplist:
+ e['_strategy'] = strategy
+
+ # init/fetch info from S3 if we're going to use it for comparisons
+ if not strategy == 'force':
+ keeplist = head_s3(s3, bucket, s3filelist)
+
+ # now actually run the strategies
+ if strategy == 'checksum':
+ for entry in keeplist:
+ if entry.get('s3_head'):
+ # since we have a remote s3 object, compare the values.
+ if entry['s3_head']['ETag'] == entry['local_etag']:
+ # files match, so remove the entry
+ entry['skip_flag'] = True
+ else:
+ # file etags don't match, keep the entry.
+ pass
+ else: # we don't have an etag, so we'll keep it.
+ pass
+ elif strategy == 'date_size':
+ for entry in keeplist:
+ if entry.get('s3_head'):
+ # fstat = entry['stat']
+ local_modified_epoch = entry['modified_epoch']
+ local_size = entry['bytes']
+
+ # py2's datetime doesn't have a timestamp() field, so we have to revert to something more awkward.
+ # remote_modified_epoch = entry['s3_head']['LastModified'].timestamp()
+ remote_modified_datetime = entry['s3_head']['LastModified']
+ delta = (remote_modified_datetime - datetime.datetime(1970, 1, 1, tzinfo=tz.tzutc()))
+ remote_modified_epoch = delta.seconds + (delta.days * 86400)
+
+ remote_size = entry['s3_head']['ContentLength']
+
+ entry['whytime'] = '{0} / {1}'.format(local_modified_epoch, remote_modified_epoch)
+ entry['whysize'] = '{0} / {1}'.format(local_size, remote_size)
+
+ if local_modified_epoch <= remote_modified_epoch and local_size == remote_size:
+ entry['skip_flag'] = True
+ else:
+ entry['why'] = "no s3_head"
+ # else: probably 'force'. Basically we don't skip with any with other strategies.
+ else:
+ pass
+
+ # prune 'please skip' entries, if any.
+ return [x for x in keeplist if not x.get('skip_flag')]
+
+
+def upload_files(s3, bucket, filelist, params):
+ ret = []
+ for entry in filelist:
+ args = {
+ 'ContentType': entry['mime_type']
+ }
+ if params.get('permission'):
+ args['ACL'] = params['permission']
+ if params.get('cache_control'):
+ args['CacheControl'] = params['cache_control']
+ # if this fails exception is caught in main()
+ s3.upload_file(entry['fullpath'], bucket, entry['s3_path'], ExtraArgs=args, Callback=None, Config=None)
+ ret.append(entry)
+ return ret
+
+
+def remove_files(s3, sourcelist, params):
+ bucket = params.get('bucket')
+ key_prefix = params.get('key_prefix')
+ paginator = s3.get_paginator('list_objects_v2')
+ current_keys = set(x['Key'] for x in paginator.paginate(Bucket=bucket, Prefix=key_prefix).build_full_result().get('Contents', []))
+ keep_keys = set(to_text(source_file['s3_path']) for source_file in sourcelist)
+ delete_keys = list(current_keys - keep_keys)
+
+ # can delete 1000 objects at a time
+ groups_of_keys = [delete_keys[i:i + 1000] for i in range(0, len(delete_keys), 1000)]
+ for keys in groups_of_keys:
+ s3.delete_objects(Bucket=bucket, Delete={'Objects': [{'Key': key} for key in keys]})
+
+ return delete_keys
+
+
+def main():
+ argument_spec = dict(
+ mode=dict(choices=['push'], default='push'),
+ file_change_strategy=dict(choices=['force', 'date_size', 'checksum'], default='date_size'),
+ bucket=dict(required=True),
+ key_prefix=dict(required=False, default=''),
+ file_root=dict(required=True, type='path'),
+ permission=dict(required=False, choices=['private', 'public-read', 'public-read-write', 'authenticated-read',
+ 'aws-exec-read', 'bucket-owner-read', 'bucket-owner-full-control']),
+ retries=dict(required=False, removed_at_date='2022-06-01', removed_from_collection='community.aws'),
+ mime_map=dict(required=False, type='dict'),
+ exclude=dict(required=False, default=".*"),
+ include=dict(required=False, default="*"),
+ cache_control=dict(required=False, default=''),
+ delete=dict(required=False, type='bool', default=False),
+ # future options: encoding, metadata, storage_class, retries
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ )
+
+ if not HAS_DATEUTIL:
+ module.fail_json(msg='dateutil required for this module')
+
+ result = {}
+ mode = module.params['mode']
+
+ try:
+ s3 = module.client('s3')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ if mode == 'push':
+ try:
+ result['filelist_initial'] = gather_files(module.params['file_root'], exclude=module.params['exclude'], include=module.params['include'])
+ result['filelist_typed'] = determine_mimetypes(result['filelist_initial'], module.params.get('mime_map'))
+ result['filelist_s3'] = calculate_s3_path(result['filelist_typed'], module.params['key_prefix'])
+ result['filelist_local_etag'] = calculate_local_etag(result['filelist_s3'])
+ result['filelist_actionable'] = filter_list(s3, module.params['bucket'], result['filelist_local_etag'], module.params['file_change_strategy'])
+ result['uploads'] = upload_files(s3, module.params['bucket'], result['filelist_actionable'], module.params)
+
+ if module.params['delete']:
+ result['removed'] = remove_files(s3, result['filelist_local_etag'], module.params)
+
+ # mark changed if we actually upload something.
+ if result.get('uploads') or result.get('removed'):
+ result['changed'] = True
+ # result.update(filelist=actionable_filelist)
+ except botocore.exceptions.ClientError as err:
+ error_msg = boto_exception(err)
+ module.fail_json(msg=error_msg, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/s3_website.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/s3_website.py
new file mode 100644
index 00000000..6f7aa898
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/s3_website.py
@@ -0,0 +1,322 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: s3_website
+version_added: 1.0.0
+short_description: Configure an s3 bucket as a website
+description:
+ - Configure an s3 bucket as a website
+requirements: [ boto3 ]
+author: Rob White (@wimnat)
+options:
+ name:
+ description:
+ - "Name of the s3 bucket"
+ required: true
+ type: str
+ error_key:
+ description:
+ - "The object key name to use when a 4XX class error occurs. To remove an error key, set to None."
+ type: str
+ redirect_all_requests:
+ description:
+ - "Describes the redirect behavior for every request to this s3 bucket website endpoint"
+ type: str
+ state:
+ description:
+ - "Add or remove s3 website configuration"
+ choices: [ 'present', 'absent' ]
+ required: true
+ type: str
+ suffix:
+ description:
+ - >
+ Suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to
+ samplebucket/images/ the data that is returned will be for the object with the key name images/index.html). The suffix must not include a slash
+ character.
+ default: index.html
+ type: str
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Configure an s3 bucket to redirect all requests to example.com
+ community.aws.s3_website:
+ name: mybucket.com
+ redirect_all_requests: example.com
+ state: present
+
+- name: Remove website configuration from an s3 bucket
+ community.aws.s3_website:
+ name: mybucket.com
+ state: absent
+
+- name: Configure an s3 bucket as a website with index and error pages
+ community.aws.s3_website:
+ name: mybucket.com
+ suffix: home.htm
+ error_key: errors/404.htm
+ state: present
+
+'''
+
+RETURN = '''
+index_document:
+ description: index document
+ type: complex
+ returned: always
+ contains:
+ suffix:
+ description: suffix that is appended to a request that is for a directory on the website endpoint
+ returned: success
+ type: str
+ sample: index.html
+error_document:
+ description: error document
+ type: complex
+ returned: always
+ contains:
+ key:
+ description: object key name to use when a 4XX class error occurs
+ returned: when error_document parameter set
+ type: str
+ sample: error.html
+redirect_all_requests_to:
+ description: where to redirect requests
+ type: complex
+ returned: always
+ contains:
+ host_name:
+ description: name of the host where requests will be redirected.
+ returned: when redirect all requests parameter set
+ type: str
+ sample: ansible.com
+ protocol:
+ description: protocol to use when redirecting requests.
+ returned: when redirect all requests parameter set
+ type: str
+ sample: https
+routing_rules:
+ description: routing rules
+ type: list
+ returned: always
+ contains:
+ condition:
+ type: complex
+ description: A container for describing a condition that must be met for the specified redirect to apply.
+ contains:
+ http_error_code_returned_equals:
+ description: The HTTP error code when the redirect is applied.
+ returned: always
+ type: str
+ key_prefix_equals:
+ description: object key name prefix when the redirect is applied. For example, to redirect
+ requests for ExamplePage.html, the key prefix will be ExamplePage.html
+ returned: when routing rule present
+ type: str
+ sample: docs/
+ redirect:
+ type: complex
+ description: Container for redirect information.
+ returned: always
+ contains:
+ host_name:
+ description: name of the host where requests will be redirected.
+ returned: when host name set as part of redirect rule
+ type: str
+ sample: ansible.com
+ http_redirect_code:
+ description: The HTTP redirect code to use on the response.
+ returned: when routing rule present
+ type: str
+ protocol:
+ description: Protocol to use when redirecting requests.
+ returned: when routing rule present
+ type: str
+ sample: http
+ replace_key_prefix_with:
+ description: object key prefix to use in the redirect request
+ returned: when routing rule present
+ type: str
+ sample: documents/
+ replace_key_with:
+ description: object key prefix to use in the redirect request
+ returned: when routing rule present
+ type: str
+ sample: documents/
+'''
+
+import time
+
+try:
+ import boto3
+ import botocore
+ from botocore.exceptions import ClientError, ParamValidationError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def _create_redirect_dict(url):
+
+ redirect_dict = {}
+ url_split = url.split(':')
+
+ # Did we split anything?
+ if len(url_split) == 2:
+ redirect_dict[u'Protocol'] = url_split[0]
+ redirect_dict[u'HostName'] = url_split[1].replace('//', '')
+ elif len(url_split) == 1:
+ redirect_dict[u'HostName'] = url_split[0]
+ else:
+ raise ValueError('Redirect URL appears invalid')
+
+ return redirect_dict
+
+
+def _create_website_configuration(suffix, error_key, redirect_all_requests):
+
+ website_configuration = {}
+
+ if error_key is not None:
+ website_configuration['ErrorDocument'] = {'Key': error_key}
+
+ if suffix is not None:
+ website_configuration['IndexDocument'] = {'Suffix': suffix}
+
+ if redirect_all_requests is not None:
+ website_configuration['RedirectAllRequestsTo'] = _create_redirect_dict(redirect_all_requests)
+
+ return website_configuration
+
+
+def enable_or_update_bucket_as_website(client_connection, resource_connection, module):
+
+ bucket_name = module.params.get("name")
+ redirect_all_requests = module.params.get("redirect_all_requests")
+ # If redirect_all_requests is set then don't use the default suffix that has been set
+ if redirect_all_requests is not None:
+ suffix = None
+ else:
+ suffix = module.params.get("suffix")
+ error_key = module.params.get("error_key")
+ changed = False
+
+ try:
+ bucket_website = resource_connection.BucketWebsite(bucket_name)
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to get bucket")
+
+ try:
+ website_config = client_connection.get_bucket_website(Bucket=bucket_name)
+ except is_boto3_error_code('NoSuchWebsiteConfiguration'):
+ website_config = None
+ except ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to get website configuration")
+
+ if website_config is None:
+ try:
+ bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
+ changed = True
+ except (ClientError, ParamValidationError) as e:
+ module.fail_json_aws(e, msg="Failed to set bucket website configuration")
+ except ValueError as e:
+ module.fail_json(msg=str(e))
+ else:
+ try:
+ if (suffix is not None and website_config['IndexDocument']['Suffix'] != suffix) or \
+ (error_key is not None and website_config['ErrorDocument']['Key'] != error_key) or \
+ (redirect_all_requests is not None and website_config['RedirectAllRequestsTo'] != _create_redirect_dict(redirect_all_requests)):
+
+ try:
+ bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
+ changed = True
+ except (ClientError, ParamValidationError) as e:
+ module.fail_json_aws(e, msg="Failed to update bucket website configuration")
+ except KeyError as e:
+ try:
+ bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
+ changed = True
+ except (ClientError, ParamValidationError) as e:
+ module.fail_json(e, msg="Failed to update bucket website configuration")
+ except ValueError as e:
+ module.fail_json(msg=str(e))
+
+ # Wait 5 secs before getting the website_config again to give it time to update
+ time.sleep(5)
+
+ website_config = client_connection.get_bucket_website(Bucket=bucket_name)
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(website_config))
+
+
+def disable_bucket_as_website(client_connection, module):
+
+ changed = False
+ bucket_name = module.params.get("name")
+
+ try:
+ client_connection.get_bucket_website(Bucket=bucket_name)
+ except is_boto3_error_code('NoSuchWebsiteConfiguration'):
+ module.exit_json(changed=changed)
+ except ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to get bucket website")
+
+ try:
+ client_connection.delete_bucket_website(Bucket=bucket_name)
+ changed = True
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to delete bucket website")
+
+ module.exit_json(changed=changed)
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['present', 'absent']),
+ suffix=dict(type='str', required=False, default='index.html'),
+ error_key=dict(type='str', required=False),
+ redirect_all_requests=dict(type='str', required=False),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['redirect_all_requests', 'suffix'],
+ ['redirect_all_requests', 'error_key']
+ ],
+ )
+
+ try:
+ client_connection = module.client('s3')
+ resource_connection = module.resource('s3')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ enable_or_update_bucket_as_website(client_connection, resource_connection, module)
+ elif state == 'absent':
+ disable_bucket_as_website(client_connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/sns.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/sns.py
new file mode 100644
index 00000000..49b73aa6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/sns.py
@@ -0,0 +1,230 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Michael J. Schultz <mjschultz@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: sns
+short_description: Send Amazon Simple Notification Service messages
+version_added: 1.0.0
+description:
+ - Sends a notification to a topic on your Amazon SNS account.
+author:
+ - Michael J. Schultz (@mjschultz)
+ - Paul Arthur (@flowerysong)
+options:
+ msg:
+ description:
+ - Default message for subscriptions without a more specific message.
+ required: true
+ aliases: [ "default" ]
+ type: str
+ subject:
+ description:
+ - Message subject
+ type: str
+ topic:
+ description:
+ - The name or ARN of the topic to publish to.
+ required: true
+ type: str
+ email:
+ description:
+ - Message to send to email subscriptions.
+ type: str
+ email_json:
+ description:
+ - Message to send to email-json subscriptions.
+ type: str
+ sqs:
+ description:
+ - Message to send to SQS subscriptions.
+ type: str
+ sms:
+ description:
+ - Message to send to SMS subscriptions.
+ type: str
+ http:
+ description:
+ - Message to send to HTTP subscriptions.
+ type: str
+ https:
+ description:
+ - Message to send to HTTPS subscriptions.
+ type: str
+ application:
+ description:
+ - Message to send to application subscriptions.
+ type: str
+ lambda:
+ description:
+ - Message to send to Lambda subscriptions.
+ type: str
+ message_attributes:
+ description:
+ - Dictionary of message attributes. These are optional structured data entries to be sent along to the endpoint.
+ - This is in AWS's distinct Name/Type/Value format; see example below.
+ type: dict
+ message_structure:
+ description:
+ - The payload format to use for the message.
+ - This must be 'json' to support protocol-specific messages (C(http), C(https), C(email), C(sms), C(sqs)).
+ - It must be 'string' to support I(message_attributes).
+ default: json
+ choices: ['json', 'string']
+ type: str
+extends_documentation_fragment:
+- amazon.aws.ec2
+- amazon.aws.aws
+
+requirements:
+ - boto3
+ - botocore
+'''
+
+EXAMPLES = """
+- name: Send default notification message via SNS
+ community.aws.sns:
+ msg: '{{ inventory_hostname }} has completed the play.'
+ subject: Deploy complete!
+ topic: deploy
+ delegate_to: localhost
+
+- name: Send notification messages via SNS with short message for SMS
+ community.aws.sns:
+ msg: '{{ inventory_hostname }} has completed the play.'
+ sms: deployed!
+ subject: Deploy complete!
+ topic: deploy
+ delegate_to: localhost
+
+- name: Send message with message_attributes
+ community.aws.sns:
+ topic: "deploy"
+ msg: "message with extra details!"
+ message_attributes:
+ channel:
+ data_type: String
+ string_value: "mychannel"
+ color:
+ data_type: String
+ string_value: "green"
+ delegate_to: localhost
+"""
+
+RETURN = """
+msg:
+ description: Human-readable diagnostic information
+ returned: always
+ type: str
+ sample: OK
+message_id:
+ description: The message ID of the submitted message
+ returned: when success
+ type: str
+ sample: 2f681ef0-6d76-5c94-99b2-4ae3996ce57b
+"""
+
+import json
+import traceback
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+def arn_topic_lookup(module, client, short_topic):
+ lookup_topic = ':{0}'.format(short_topic)
+
+ try:
+ paginator = client.get_paginator('list_topics')
+ topic_iterator = paginator.paginate()
+ for response in topic_iterator:
+ for topic in response['Topics']:
+ if topic['TopicArn'].endswith(lookup_topic):
+ return topic['TopicArn']
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to look up topic ARN')
+
+ return None
+
+
+def main():
+ protocols = [
+ 'http',
+ 'https',
+ 'email',
+ 'email_json',
+ 'sms',
+ 'sqs',
+ 'application',
+ 'lambda',
+ ]
+
+ argument_spec = dict(
+ msg=dict(required=True, aliases=['default']),
+ subject=dict(),
+ topic=dict(required=True),
+ message_attributes=dict(type='dict'),
+ message_structure=dict(choices=['json', 'string'], default='json'),
+ )
+
+ for p in protocols:
+ argument_spec[p] = dict()
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ sns_kwargs = dict(
+ Message=module.params['msg'],
+ Subject=module.params['subject'],
+ MessageStructure=module.params['message_structure'],
+ )
+
+ if module.params['message_attributes']:
+ if module.params['message_structure'] != 'string':
+ module.fail_json(msg='message_attributes is only supported when the message_structure is "string".')
+ sns_kwargs['MessageAttributes'] = module.params['message_attributes']
+
+ dict_msg = {
+ 'default': sns_kwargs['Message']
+ }
+
+ for p in protocols:
+ if module.params[p]:
+ if sns_kwargs['MessageStructure'] != 'json':
+ module.fail_json(msg='Protocol-specific messages are only supported when message_structure is "json".')
+ dict_msg[p.replace('_', '-')] = module.params[p]
+
+ client = module.client('sns')
+
+ topic = module.params['topic']
+ if ':' in topic:
+ # Short names can't contain ':' so we'll assume this is the full ARN
+ sns_kwargs['TopicArn'] = topic
+ else:
+ sns_kwargs['TopicArn'] = arn_topic_lookup(module, client, topic)
+
+ if not sns_kwargs['TopicArn']:
+ module.fail_json(msg='Could not find topic: {0}'.format(topic))
+
+ if sns_kwargs['MessageStructure'] == 'json':
+ sns_kwargs['Message'] = json.dumps(dict_msg)
+
+ try:
+ result = client.publish(**sns_kwargs)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to publish message')
+
+ module.exit_json(msg='OK', message_id=result['MessageId'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/sns_topic.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/sns_topic.py
new file mode 100644
index 00000000..79070cba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/sns_topic.py
@@ -0,0 +1,525 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: sns_topic
+short_description: Manages AWS SNS topics and subscriptions
+version_added: 1.0.0
+description:
+ - The M(community.aws.sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics.
+ - As of 2.6, this module can be use to subscribe and unsubscribe to topics outside of your AWS account.
+author:
+ - "Joel Thompson (@joelthompson)"
+ - "Fernando Jose Pando (@nand0p)"
+ - "Will Thames (@willthames)"
+options:
+ name:
+ description:
+ - The name or ARN of the SNS topic to manage.
+ required: true
+ type: str
+ state:
+ description:
+ - Whether to create or destroy an SNS topic.
+ default: present
+ choices: ["absent", "present"]
+ type: str
+ display_name:
+ description:
+ - Display name of the topic.
+ type: str
+ policy:
+ description:
+ - Policy to apply to the SNS topic.
+ type: dict
+ delivery_policy:
+ description:
+ - Delivery policy to apply to the SNS topic.
+ type: dict
+ subscriptions:
+ description:
+ - List of subscriptions to apply to the topic. Note that AWS requires
+ subscriptions to be confirmed, so you will need to confirm any new
+ subscriptions.
+ suboptions:
+ endpoint:
+ description: Endpoint of subscription.
+ required: true
+ protocol:
+ description: Protocol of subscription.
+ required: true
+ type: list
+ elements: dict
+ default: []
+ purge_subscriptions:
+ description:
+ - "Whether to purge any subscriptions not listed here. NOTE: AWS does not
+ allow you to purge any PendingConfirmation subscriptions, so if any
+ exist and would be purged, they are silently skipped. This means that
+ somebody could come back later and confirm the subscription. Sorry.
+ Blame Amazon."
+ default: true
+ type: bool
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements: [ "boto" ]
+'''
+
+EXAMPLES = r"""
+
+- name: Create alarm SNS topic
+ community.aws.sns_topic:
+ name: "alarms"
+ state: present
+ display_name: "alarm SNS topic"
+ delivery_policy:
+ http:
+ defaultHealthyRetryPolicy:
+ minDelayTarget: 2
+ maxDelayTarget: 4
+ numRetries: 3
+ numMaxDelayRetries: 5
+ backoffFunction: "<linear|arithmetic|geometric|exponential>"
+ disableSubscriptionOverrides: True
+ defaultThrottlePolicy:
+ maxReceivesPerSecond: 10
+ subscriptions:
+ - endpoint: "my_email_address@example.com"
+ protocol: "email"
+ - endpoint: "my_mobile_number"
+ protocol: "sms"
+
+"""
+
+RETURN = r'''
+sns_arn:
+ description: The ARN of the topic you are modifying
+ type: str
+ returned: always
+ sample: "arn:aws:sns:us-east-2:111111111111:my_topic_name"
+community.aws.sns_topic:
+ description: Dict of sns topic details
+ type: complex
+ returned: always
+ contains:
+ attributes_set:
+ description: list of attributes set during this run
+ returned: always
+ type: list
+ sample: []
+ check_mode:
+ description: whether check mode was on
+ returned: always
+ type: bool
+ sample: false
+ delivery_policy:
+ description: Delivery policy for the SNS topic
+ returned: when topic is owned by this AWS account
+ type: str
+ sample: >
+ {"http":{"defaultHealthyRetryPolicy":{"minDelayTarget":20,"maxDelayTarget":20,"numRetries":3,"numMaxDelayRetries":0,
+ "numNoDelayRetries":0,"numMinDelayRetries":0,"backoffFunction":"linear"},"disableSubscriptionOverrides":false}}
+ display_name:
+ description: Display name for SNS topic
+ returned: when topic is owned by this AWS account
+ type: str
+ sample: My topic name
+ name:
+ description: Topic name
+ returned: always
+ type: str
+ sample: ansible-test-dummy-topic
+ owner:
+ description: AWS account that owns the topic
+ returned: when topic is owned by this AWS account
+ type: str
+ sample: '111111111111'
+ policy:
+ description: Policy for the SNS topic
+ returned: when topic is owned by this AWS account
+ type: str
+ sample: >
+ {"Version":"2012-10-17","Id":"SomePolicyId","Statement":[{"Sid":"ANewSid","Effect":"Allow","Principal":{"AWS":"arn:aws:iam::111111111111:root"},
+ "Action":"sns:Subscribe","Resource":"arn:aws:sns:us-east-2:111111111111:ansible-test-dummy-topic","Condition":{"StringEquals":{"sns:Protocol":"email"}}}]}
+ state:
+ description: whether the topic is present or absent
+ returned: always
+ type: str
+ sample: present
+ subscriptions:
+ description: List of subscribers to the topic in this AWS account
+ returned: always
+ type: list
+ sample: []
+ subscriptions_added:
+ description: List of subscribers added in this run
+ returned: always
+ type: list
+ sample: []
+ subscriptions_confirmed:
+ description: Count of confirmed subscriptions
+ returned: when topic is owned by this AWS account
+ type: str
+ sample: '0'
+ subscriptions_deleted:
+ description: Count of deleted subscriptions
+ returned: when topic is owned by this AWS account
+ type: str
+ sample: '0'
+ subscriptions_existing:
+ description: List of existing subscriptions
+ returned: always
+ type: list
+ sample: []
+ subscriptions_new:
+ description: List of new subscriptions
+ returned: always
+ type: list
+ sample: []
+ subscriptions_pending:
+ description: Count of pending subscriptions
+ returned: when topic is owned by this AWS account
+ type: str
+ sample: '0'
+ subscriptions_purge:
+ description: Whether or not purge_subscriptions was set
+ returned: always
+ type: bool
+ sample: true
+ topic_arn:
+ description: ARN of the SNS topic (equivalent to sns_arn)
+ returned: when topic is owned by this AWS account
+ type: str
+ sample: arn:aws:sns:us-east-2:111111111111:ansible-test-dummy-topic
+ topic_created:
+ description: Whether the topic was created
+ returned: always
+ type: bool
+ sample: false
+ topic_deleted:
+ description: Whether the topic was deleted
+ returned: always
+ type: bool
+ sample: false
+'''
+
+import json
+import re
+import copy
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies, AWSRetry, camel_dict_to_snake_dict
+
+
+class SnsTopicManager(object):
+ """ Handles SNS Topic creation and destruction """
+
+ def __init__(self,
+ module,
+ name,
+ state,
+ display_name,
+ policy,
+ delivery_policy,
+ subscriptions,
+ purge_subscriptions,
+ check_mode):
+
+ self.connection = module.client('sns')
+ self.module = module
+ self.name = name
+ self.state = state
+ self.display_name = display_name
+ self.policy = policy
+ self.delivery_policy = delivery_policy
+ self.subscriptions = subscriptions
+ self.subscriptions_existing = []
+ self.subscriptions_deleted = []
+ self.subscriptions_added = []
+ self.purge_subscriptions = purge_subscriptions
+ self.check_mode = check_mode
+ self.topic_created = False
+ self.topic_deleted = False
+ self.topic_arn = None
+ self.attributes_set = []
+
+ @AWSRetry.jittered_backoff()
+ def _list_topics_with_backoff(self):
+ paginator = self.connection.get_paginator('list_topics')
+ return paginator.paginate().build_full_result()['Topics']
+
+ @AWSRetry.jittered_backoff(catch_extra_error_codes=['NotFound'])
+ def _list_topic_subscriptions_with_backoff(self):
+ paginator = self.connection.get_paginator('list_subscriptions_by_topic')
+ return paginator.paginate(TopicArn=self.topic_arn).build_full_result()['Subscriptions']
+
+ @AWSRetry.jittered_backoff(catch_extra_error_codes=['NotFound'])
+ def _list_subscriptions_with_backoff(self):
+ paginator = self.connection.get_paginator('list_subscriptions')
+ return paginator.paginate().build_full_result()['Subscriptions']
+
+ def _list_topics(self):
+ try:
+ topics = self._list_topics_with_backoff()
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't get topic list")
+ return [t['TopicArn'] for t in topics]
+
+ def _topic_arn_lookup(self):
+ # topic names cannot have colons, so this captures the full topic name
+ all_topics = self._list_topics()
+ lookup_topic = ':%s' % self.name
+ for topic in all_topics:
+ if topic.endswith(lookup_topic):
+ return topic
+
+ def _create_topic(self):
+ if not self.check_mode:
+ try:
+ response = self.connection.create_topic(Name=self.name)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't create topic %s" % self.name)
+ self.topic_arn = response['TopicArn']
+ return True
+
+ def _compare_delivery_policies(self, policy_a, policy_b):
+ _policy_a = copy.deepcopy(policy_a)
+ _policy_b = copy.deepcopy(policy_b)
+ # AWS automatically injects disableSubscriptionOverrides if you set an
+ # http policy
+ if 'http' in policy_a:
+ if 'disableSubscriptionOverrides' not in policy_a['http']:
+ _policy_a['http']['disableSubscriptionOverrides'] = False
+ if 'http' in policy_b:
+ if 'disableSubscriptionOverrides' not in policy_b['http']:
+ _policy_b['http']['disableSubscriptionOverrides'] = False
+ comparison = (_policy_a != _policy_b)
+ return comparison
+
+ def _set_topic_attrs(self):
+ changed = False
+ try:
+ topic_attributes = self.connection.get_topic_attributes(TopicArn=self.topic_arn)['Attributes']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't get topic attributes for topic %s" % self.topic_arn)
+
+ if self.display_name and self.display_name != topic_attributes['DisplayName']:
+ changed = True
+ self.attributes_set.append('display_name')
+ if not self.check_mode:
+ try:
+ self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='DisplayName',
+ AttributeValue=self.display_name)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't set display name")
+
+ if self.policy and compare_policies(self.policy, json.loads(topic_attributes['Policy'])):
+ changed = True
+ self.attributes_set.append('policy')
+ if not self.check_mode:
+ try:
+ self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='Policy',
+ AttributeValue=json.dumps(self.policy))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't set topic policy")
+
+ if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or
+ self._compare_delivery_policies(self.delivery_policy, json.loads(topic_attributes['DeliveryPolicy']))):
+ changed = True
+ self.attributes_set.append('delivery_policy')
+ if not self.check_mode:
+ try:
+ self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='DeliveryPolicy',
+ AttributeValue=json.dumps(self.delivery_policy))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't set topic delivery policy")
+ return changed
+
+ def _canonicalize_endpoint(self, protocol, endpoint):
+ if protocol == 'sms':
+ return re.sub('[^0-9]*', '', endpoint)
+ return endpoint
+
+ def _set_topic_subs(self):
+ changed = False
+ subscriptions_existing_list = set()
+ desired_subscriptions = [(sub['protocol'],
+ self._canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in
+ self.subscriptions]
+
+ for sub in self._list_topic_subscriptions():
+ sub_key = (sub['Protocol'], sub['Endpoint'])
+ subscriptions_existing_list.add(sub_key)
+ if (self.purge_subscriptions and sub_key not in desired_subscriptions and
+ sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted')):
+ changed = True
+ self.subscriptions_deleted.append(sub_key)
+ if not self.check_mode:
+ try:
+ self.connection.unsubscribe(SubscriptionArn=sub['SubscriptionArn'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't unsubscribe from topic")
+
+ for protocol, endpoint in set(desired_subscriptions).difference(subscriptions_existing_list):
+ changed = True
+ self.subscriptions_added.append((protocol, endpoint))
+ if not self.check_mode:
+ try:
+ self.connection.subscribe(TopicArn=self.topic_arn, Protocol=protocol, Endpoint=endpoint)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't subscribe to topic %s" % self.topic_arn)
+ return changed
+
+ def _list_topic_subscriptions(self):
+ try:
+ return self._list_topic_subscriptions_with_backoff()
+ except is_boto3_error_code('AuthorizationError'):
+ try:
+ # potentially AuthorizationError when listing subscriptions for third party topic
+ return [sub for sub in self._list_subscriptions_with_backoff()
+ if sub['TopicArn'] == self.topic_arn]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % self.topic_arn)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ self.module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % self.topic_arn)
+
+ def _delete_subscriptions(self):
+ # NOTE: subscriptions in 'PendingConfirmation' timeout in 3 days
+ # https://forums.aws.amazon.com/thread.jspa?threadID=85993
+ subscriptions = self._list_topic_subscriptions()
+ if not subscriptions:
+ return False
+ for sub in subscriptions:
+ if sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted'):
+ self.subscriptions_deleted.append(sub['SubscriptionArn'])
+ if not self.check_mode:
+ try:
+ self.connection.unsubscribe(SubscriptionArn=sub['SubscriptionArn'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't unsubscribe from topic")
+ return True
+
+ def _delete_topic(self):
+ self.topic_deleted = True
+ if not self.check_mode:
+ try:
+ self.connection.delete_topic(TopicArn=self.topic_arn)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't delete topic %s" % self.topic_arn)
+ return True
+
+ def _name_is_arn(self):
+ return self.name.startswith('arn:')
+
+ def ensure_ok(self):
+ changed = False
+ if self._name_is_arn():
+ self.topic_arn = self.name
+ else:
+ self.topic_arn = self._topic_arn_lookup()
+ if not self.topic_arn:
+ changed = self._create_topic()
+ if self.topic_arn in self._list_topics():
+ changed |= self._set_topic_attrs()
+ elif self.display_name or self.policy or self.delivery_policy:
+ self.module.fail_json(msg="Cannot set display name, policy or delivery policy for SNS topics not owned by this account")
+ changed |= self._set_topic_subs()
+ return changed
+
+ def ensure_gone(self):
+ changed = False
+ if self._name_is_arn():
+ self.topic_arn = self.name
+ else:
+ self.topic_arn = self._topic_arn_lookup()
+ if self.topic_arn:
+ if self.topic_arn not in self._list_topics():
+ self.module.fail_json(msg="Cannot use state=absent with third party ARN. Use subscribers=[] to unsubscribe")
+ changed = self._delete_subscriptions()
+ changed |= self._delete_topic()
+ return changed
+
+ def get_info(self):
+ info = {
+ 'name': self.name,
+ 'state': self.state,
+ 'subscriptions_new': self.subscriptions,
+ 'subscriptions_existing': self.subscriptions_existing,
+ 'subscriptions_deleted': self.subscriptions_deleted,
+ 'subscriptions_added': self.subscriptions_added,
+ 'subscriptions_purge': self.purge_subscriptions,
+ 'check_mode': self.check_mode,
+ 'topic_created': self.topic_created,
+ 'topic_deleted': self.topic_deleted,
+ 'attributes_set': self.attributes_set,
+ }
+ if self.state != 'absent':
+ if self.topic_arn in self._list_topics():
+ info.update(camel_dict_to_snake_dict(self.connection.get_topic_attributes(TopicArn=self.topic_arn)['Attributes']))
+ info['delivery_policy'] = info.pop('effective_delivery_policy')
+ info['subscriptions'] = [camel_dict_to_snake_dict(sub) for sub in self._list_topic_subscriptions()]
+
+ return info
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ display_name=dict(),
+ policy=dict(type='dict'),
+ delivery_policy=dict(type='dict'),
+ subscriptions=dict(default=[], type='list', elements='dict'),
+ purge_subscriptions=dict(type='bool', default=True),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+ display_name = module.params.get('display_name')
+ policy = module.params.get('policy')
+ delivery_policy = module.params.get('delivery_policy')
+ subscriptions = module.params.get('subscriptions')
+ purge_subscriptions = module.params.get('purge_subscriptions')
+ check_mode = module.check_mode
+
+ sns_topic = SnsTopicManager(module,
+ name,
+ state,
+ display_name,
+ policy,
+ delivery_policy,
+ subscriptions,
+ purge_subscriptions,
+ check_mode)
+
+ if state == 'present':
+ changed = sns_topic.ensure_ok()
+
+ elif state == 'absent':
+ changed = sns_topic.ensure_gone()
+
+ sns_facts = dict(changed=changed,
+ sns_arn=sns_topic.topic_arn,
+ sns_topic=sns_topic.get_info())
+
+ module.exit_json(**sns_facts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/sqs_queue.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/sqs_queue.py
new file mode 100644
index 00000000..5d659679
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/sqs_queue.py
@@ -0,0 +1,475 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sqs_queue
+version_added: 1.0.0
+short_description: Creates or deletes AWS SQS queues
+description:
+ - Create or delete AWS SQS queues.
+ - Update attributes on existing queues.
+author:
+ - Alan Loi (@loia)
+ - Fernando Jose Pando (@nand0p)
+ - Nadir Lloret (@nadirollo)
+ - Dennis Podkovyrin (@sbj-ss)
+requirements:
+ - boto3
+options:
+ state:
+ description:
+ - Create or delete the queue.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ name:
+ description:
+ - Name of the queue.
+ required: true
+ type: str
+ queue_type:
+ description:
+ - Standard or FIFO queue.
+ - I(queue_type) can only be set at queue creation and will otherwise be
+ ignored.
+ choices: ['standard', 'fifo']
+ default: 'standard'
+ type: str
+ visibility_timeout:
+ description:
+ - The default visibility timeout in seconds.
+ aliases: [default_visibility_timeout]
+ type: int
+ message_retention_period:
+ description:
+ - The message retention period in seconds.
+ type: int
+ maximum_message_size:
+ description:
+ - The maximum message size in bytes.
+ type: int
+ delay_seconds:
+ description:
+ - The delivery delay in seconds.
+ aliases: [delivery_delay]
+ type: int
+ receive_message_wait_time_seconds:
+ description:
+ - The receive message wait time in seconds.
+ aliases: [receive_message_wait_time]
+ type: int
+ policy:
+ description:
+ - The JSON dict policy to attach to queue.
+ type: dict
+ redrive_policy:
+ description:
+ - JSON dict with the redrive_policy (see example).
+ type: dict
+ kms_master_key_id:
+ description:
+ - The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK.
+ type: str
+ kms_data_key_reuse_period_seconds:
+ description:
+ - The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again.
+ aliases: [kms_data_key_reuse_period]
+ type: int
+ content_based_deduplication:
+ type: bool
+ description:
+ - Enables content-based deduplication. Used for FIFOs only.
+ - Defaults to C(false).
+ tags:
+ description:
+ - Tag dict to apply to the queue (requires botocore 1.5.40 or above).
+ - To remove all tags set I(tags={}) and I(purge_tags=true).
+ type: dict
+ purge_tags:
+ description:
+ - Remove tags not listed in I(tags).
+ type: bool
+ default: false
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+RETURN = '''
+content_based_deduplication:
+ description: Enables content-based deduplication. Used for FIFOs only.
+ type: bool
+ returned: always
+ sample: True
+visibility_timeout:
+ description: The default visibility timeout in seconds.
+ type: int
+ returned: always
+ sample: 30
+delay_seconds:
+ description: The delivery delay in seconds.
+ type: int
+ returned: always
+ sample: 0
+kms_master_key_id:
+ description: The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK.
+ type: str
+ returned: always
+ sample: alias/MyAlias
+kms_data_key_reuse_period_seconds:
+ description: The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again.
+ type: int
+ returned: always
+ sample: 300
+maximum_message_size:
+ description: The maximum message size in bytes.
+ type: int
+ returned: always
+ sample: 262144
+message_retention_period:
+ description: The message retention period in seconds.
+ type: int
+ returned: always
+ sample: 345600
+name:
+ description: Name of the SQS Queue
+ type: str
+ returned: always
+ sample: "queuename-987d2de0"
+queue_arn:
+ description: The queue's Amazon resource name (ARN).
+ type: str
+ returned: on success
+ sample: 'arn:aws:sqs:us-east-1:199999999999:queuename-987d2de0'
+queue_url:
+ description: URL to access the queue
+ type: str
+ returned: on success
+ sample: 'https://queue.amazonaws.com/123456789012/MyQueue'
+receive_message_wait_time_seconds:
+ description: The receive message wait time in seconds.
+ type: int
+ returned: always
+ sample: 0
+region:
+ description: Region that the queue was created within
+ type: str
+ returned: always
+ sample: 'us-east-1'
+tags:
+ description: List of queue tags
+ type: dict
+ returned: always
+ sample: '{"Env": "prod"}'
+'''
+
+EXAMPLES = '''
+- name: Create SQS queue with redrive policy
+ community.aws.sqs_queue:
+ name: my-queue
+ region: ap-southeast-2
+ default_visibility_timeout: 120
+ message_retention_period: 86400
+ maximum_message_size: 1024
+ delivery_delay: 30
+ receive_message_wait_time: 20
+ policy: "{{ json_dict }}"
+ redrive_policy:
+ maxReceiveCount: 5
+ deadLetterTargetArn: arn:aws:sqs:eu-west-1:123456789012:my-dead-queue
+
+- name: Drop redrive policy
+ community.aws.sqs_queue:
+ name: my-queue
+ region: ap-southeast-2
+ redrive_policy: {}
+
+- name: Create FIFO queue
+ community.aws.sqs_queue:
+ name: fifo-queue
+ region: ap-southeast-2
+ queue_type: fifo
+ content_based_deduplication: yes
+
+- name: Tag queue
+ community.aws.sqs_queue:
+ name: fifo-queue
+ region: ap-southeast-2
+ tags:
+ example: SomeValue
+
+- name: Configure Encryption, automatically uses a new data key every hour
+ community.aws.sqs_queue:
+ name: fifo-queue
+ region: ap-southeast-2
+ kms_master_key_id: alias/MyQueueKey
+ kms_data_key_reuse_period_seconds: 3600
+
+- name: Delete SQS queue
+ community.aws.sqs_queue:
+ name: my-queue
+ region: ap-southeast-2
+ state: absent
+'''
+
+import json
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AWSRetry,
+ camel_dict_to_snake_dict,
+ compare_aws_tags,
+ snake_dict_to_camel_dict,
+ compare_policies,
+ )
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError, ParamValidationError
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+
+def get_queue_name(module, is_fifo=False):
+ name = module.params.get('name')
+ if not is_fifo or name.endswith('.fifo'):
+ return name
+ return name + '.fifo'
+
+
+# NonExistentQueue is explicitly expected when a queue doesn't exist
+@AWSRetry.jittered_backoff()
+def get_queue_url(client, name):
+ try:
+ return client.get_queue_url(QueueName=name)['QueueUrl']
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'AWS.SimpleQueueService.NonExistentQueue':
+ return None
+ raise
+
+
+def describe_queue(client, queue_url):
+ """
+ Description a queue in snake format
+ """
+ attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['All'], aws_retry=True)['Attributes']
+ description = dict(attributes)
+ description.pop('Policy', None)
+ description.pop('RedrivePolicy', None)
+ description = camel_dict_to_snake_dict(description)
+ description['policy'] = attributes.get('Policy', None)
+ description['redrive_policy'] = attributes.get('RedrivePolicy', None)
+
+ # Boto3 returns everything as a string, convert them back to integers/dicts if
+ # that's what we expected.
+ for key, value in description.items():
+ if value is None:
+ continue
+
+ if key in ['policy', 'redrive_policy']:
+ policy = json.loads(value)
+ description[key] = policy
+ continue
+
+ if key == 'content_based_deduplication':
+ try:
+ description[key] = bool(value)
+ except (TypeError, ValueError):
+ pass
+
+ try:
+ if value == str(int(value)):
+ description[key] = int(value)
+ except (TypeError, ValueError):
+ pass
+
+ return description
+
+
+def create_or_update_sqs_queue(client, module):
+ is_fifo = (module.params.get('queue_type') == 'fifo')
+ queue_name = get_queue_name(module, is_fifo)
+ result = dict(
+ name=queue_name,
+ region=module.params.get('region'),
+ changed=False,
+ )
+
+ queue_url = get_queue_url(client, queue_name)
+ result['queue_url'] = queue_url
+
+ if not queue_url:
+ create_attributes = {'FifoQueue': 'true'} if is_fifo else {}
+ result['changed'] = True
+ if module.check_mode:
+ return result
+ queue_url = client.create_queue(QueueName=queue_name, Attributes=create_attributes, aws_retry=True)['QueueUrl']
+
+ changed, arn = update_sqs_queue(module, client, queue_url)
+ result['changed'] |= changed
+ result['queue_arn'] = arn
+
+ changed, tags = update_tags(client, queue_url, module)
+ result['changed'] |= changed
+ result['tags'] = tags
+
+ result.update(describe_queue(client, queue_url))
+
+ COMPATABILITY_KEYS = dict(
+ delay_seconds='delivery_delay',
+ receive_message_wait_time_seconds='receive_message_wait_time',
+ visibility_timeout='default_visibility_timeout',
+ kms_data_key_reuse_period_seconds='kms_data_key_reuse_period',
+ )
+ for key in list(result.keys()):
+
+ # The return values changed between boto and boto3, add the old keys too
+ # for backwards compatibility
+ return_name = COMPATABILITY_KEYS.get(key)
+ if return_name:
+ result[return_name] = result.get(key)
+
+ return result
+
+
+def update_sqs_queue(module, client, queue_url):
+ check_mode = module.check_mode
+ changed = False
+ existing_attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['All'], aws_retry=True)['Attributes']
+ new_attributes = snake_dict_to_camel_dict(module.params, capitalize_first=True)
+ attributes_to_set = dict()
+
+ # Boto3 SQS deals with policies as strings, we want to deal with them as
+ # dicts
+ if module.params.get('policy') is not None:
+ policy = module.params.get('policy')
+ current_value = existing_attributes.get('Policy', '{}')
+ current_policy = json.loads(current_value)
+ if compare_policies(current_policy, policy):
+ attributes_to_set['Policy'] = json.dumps(policy)
+ changed = True
+ if module.params.get('redrive_policy') is not None:
+ policy = module.params.get('redrive_policy')
+ current_value = existing_attributes.get('RedrivePolicy', '{}')
+ current_policy = json.loads(current_value)
+ if compare_policies(current_policy, policy):
+ attributes_to_set['RedrivePolicy'] = json.dumps(policy)
+ changed = True
+
+ for attribute, value in existing_attributes.items():
+ # We handle these as a special case because they're IAM policies
+ if attribute in ['Policy', 'RedrivePolicy']:
+ continue
+
+ if attribute not in new_attributes.keys():
+ continue
+
+ if new_attributes.get(attribute) is None:
+ continue
+
+ new_value = new_attributes[attribute]
+
+ if isinstance(new_value, bool):
+ new_value = str(new_value).lower()
+ existing_value = str(existing_value).lower()
+
+ if new_value == value:
+ continue
+
+ # Boto3 expects strings
+ attributes_to_set[attribute] = str(new_value)
+ changed = True
+
+ if changed and not check_mode:
+ client.set_queue_attributes(QueueUrl=queue_url, Attributes=attributes_to_set, aws_retry=True)
+
+ return changed, existing_attributes.get('queue_arn'),
+
+
+def delete_sqs_queue(client, module):
+ is_fifo = (module.params.get('queue_type') == 'fifo')
+ queue_name = get_queue_name(module, is_fifo)
+ result = dict(
+ name=queue_name,
+ region=module.params.get('region'),
+ changed=False
+ )
+
+ queue_url = get_queue_url(client, queue_name)
+ if not queue_url:
+ return result
+
+ result['changed'] = bool(queue_url)
+ if not module.check_mode:
+ AWSRetry.jittered_backoff()(client.delete_queue)(QueueUrl=queue_url)
+
+ return result
+
+
+def update_tags(client, queue_url, module):
+ new_tags = module.params.get('tags')
+ purge_tags = module.params.get('purge_tags')
+ if new_tags is None:
+ return False, {}
+
+ try:
+ existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True)['Tags']
+ except (ClientError, KeyError) as e:
+ existing_tags = {}
+
+ tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags)
+
+ if not module.check_mode:
+ if tags_to_remove:
+ client.untag_queue(QueueUrl=queue_url, TagKeys=tags_to_remove, aws_retry=True)
+ if tags_to_add:
+ client.tag_queue(QueueUrl=queue_url, Tags=tags_to_add)
+ existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True).get('Tags', {})
+ else:
+ existing_tags = new_tags
+
+ changed = bool(tags_to_remove) or bool(tags_to_add)
+ return changed, existing_tags
+
+
+def main():
+
+ argument_spec = dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ name=dict(type='str', required=True),
+ queue_type=dict(type='str', default='standard', choices=['standard', 'fifo']),
+ delay_seconds=dict(type='int', aliases=['delivery_delay']),
+ maximum_message_size=dict(type='int'),
+ message_retention_period=dict(type='int'),
+ policy=dict(type='dict'),
+ receive_message_wait_time_seconds=dict(type='int', aliases=['receive_message_wait_time']),
+ redrive_policy=dict(type='dict'),
+ visibility_timeout=dict(type='int', aliases=['default_visibility_timeout']),
+ kms_master_key_id=dict(type='str'),
+ kms_data_key_reuse_period_seconds=dict(type='int', aliases=['kms_data_key_reuse_period']),
+ content_based_deduplication=dict(type='bool'),
+ tags=dict(type='dict'),
+ purge_tags=dict(type='bool', default=False),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ state = module.params.get('state')
+ retry_decorator = AWSRetry.jittered_backoff(catch_extra_error_codes=['AWS.SimpleQueueService.NonExistentQueue'])
+ try:
+ client = module.client('sqs', retry_decorator=retry_decorator)
+ if state == 'present':
+ result = create_or_update_sqs_queue(client, module)
+ elif state == 'absent':
+ result = delete_sqs_queue(client, module)
+ except (BotoCoreError, ClientError, ParamValidationError) as e:
+ module.fail_json_aws(e, msg='Failed to control sqs queue')
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/sts_assume_role.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/sts_assume_role.py
new file mode 100644
index 00000000..378eb003
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/sts_assume_role.py
@@ -0,0 +1,176 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sts_assume_role
+version_added: 1.0.0
+short_description: Assume a role using AWS Security Token Service and obtain temporary credentials
+description:
+ - Assume a role using AWS Security Token Service and obtain temporary credentials.
+author:
+ - Boris Ekelchik (@bekelchik)
+ - Marek Piatek (@piontas)
+options:
+ role_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the role that the caller is
+ assuming U(https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs).
+ required: true
+ type: str
+ role_session_name:
+ description:
+ - Name of the role's session - will be used by CloudTrail.
+ required: true
+ type: str
+ policy:
+ description:
+ - Supplemental policy to use in addition to assumed role's policies.
+ type: str
+ duration_seconds:
+ description:
+ - The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 43200 seconds (12 hours).
+ - The max depends on the IAM role's sessions duration setting.
+ - By default, the value is set to 3600 seconds.
+ type: int
+ external_id:
+ description:
+ - A unique identifier that is used by third parties to assume a role in their customers' accounts.
+ type: str
+ mfa_serial_number:
+ description:
+ - The identification number of the MFA device that is associated with the user who is making the AssumeRole call.
+ type: str
+ mfa_token:
+ description:
+ - The value provided by the MFA device, if the trust policy of the role being assumed requires MFA.
+ type: str
+notes:
+ - In order to use the assumed role in a following playbook task you must pass the access_key, access_secret and access_token.
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements:
+ - boto3
+ - botocore
+ - python >= 2.6
+'''
+
+RETURN = '''
+sts_creds:
+ description: The temporary security credentials, which include an access key ID, a secret access key, and a security (or session) token
+ returned: always
+ type: dict
+ sample:
+ access_key: XXXXXXXXXXXXXXXXXXXX
+ expiration: '2017-11-11T11:11:11+00:00'
+ secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+sts_user:
+ description: The Amazon Resource Name (ARN) and the assumed role ID
+ returned: always
+ type: dict
+ sample:
+ assumed_role_id: arn:aws:sts::123456789012:assumed-role/demo/Bob
+ arn: ARO123EXAMPLE123:Bob
+changed:
+ description: True if obtaining the credentials succeeds
+ type: bool
+ returned: always
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Assume an existing role (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html)
+- community.aws.sts_assume_role:
+ role_arn: "arn:aws:iam::123456789012:role/someRole"
+ role_session_name: "someRoleSession"
+ register: assumed_role
+
+# Use the assumed role above to tag an instance in account 123456789012
+- amazon.aws.ec2_tag:
+ aws_access_key: "{{ assumed_role.sts_creds.access_key }}"
+ aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}"
+ security_token: "{{ assumed_role.sts_creds.session_token }}"
+ resource: i-xyzxyz01
+ state: present
+ tags:
+ MyNewTag: value
+
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import ClientError, ParamValidationError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def _parse_response(response):
+ credentials = response.get('Credentials', {})
+ user = response.get('AssumedRoleUser', {})
+
+ sts_cred = {
+ 'access_key': credentials.get('AccessKeyId'),
+ 'secret_key': credentials.get('SecretAccessKey'),
+ 'session_token': credentials.get('SessionToken'),
+ 'expiration': credentials.get('Expiration')
+
+ }
+ sts_user = camel_dict_to_snake_dict(user)
+ return sts_cred, sts_user
+
+
+def assume_role_policy(connection, module):
+ params = {
+ 'RoleArn': module.params.get('role_arn'),
+ 'RoleSessionName': module.params.get('role_session_name'),
+ 'Policy': module.params.get('policy'),
+ 'DurationSeconds': module.params.get('duration_seconds'),
+ 'ExternalId': module.params.get('external_id'),
+ 'SerialNumber': module.params.get('mfa_serial_number'),
+ 'TokenCode': module.params.get('mfa_token')
+ }
+ changed = False
+
+ kwargs = dict((k, v) for k, v in params.items() if v is not None)
+
+ try:
+ response = connection.assume_role(**kwargs)
+ changed = True
+ except (ClientError, ParamValidationError) as e:
+ module.fail_json_aws(e)
+
+ sts_cred, sts_user = _parse_response(response)
+ module.exit_json(changed=changed, sts_creds=sts_cred, sts_user=sts_user)
+
+
+def main():
+ argument_spec = dict(
+ role_arn=dict(required=True),
+ role_session_name=dict(required=True),
+ duration_seconds=dict(required=False, default=None, type='int'),
+ external_id=dict(required=False, default=None),
+ policy=dict(required=False, default=None),
+ mfa_serial_number=dict(required=False, default=None),
+ mfa_token=dict(required=False, default=None)
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ connection = module.client('sts')
+
+ assume_role_policy(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/aws/plugins/modules/sts_session_token.py b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/sts_session_token.py
new file mode 100644
index 00000000..4183b976
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/aws/plugins/modules/sts_session_token.py
@@ -0,0 +1,147 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sts_session_token
+version_added: 1.0.0
+short_description: Obtain a session token from the AWS Security Token Service
+description:
+ - Obtain a session token from the AWS Security Token Service.
+author: Victor Costan (@pwnall)
+options:
+ duration_seconds:
+ description:
+ - The duration, in seconds, of the session token.
+ See U(https://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html#API_GetSessionToken_RequestParameters)
+ for acceptable and default values.
+ type: int
+ mfa_serial_number:
+ description:
+ - The identification number of the MFA device that is associated with the user who is making the GetSessionToken call.
+ type: str
+ mfa_token:
+ description:
+ - The value provided by the MFA device, if the trust policy of the user requires MFA.
+ type: str
+notes:
+ - In order to use the session token in a following playbook task you must pass the I(access_key), I(access_secret) and I(access_token).
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements:
+ - boto3
+ - botocore
+ - python >= 2.6
+'''
+
+RETURN = """
+sts_creds:
+ description: The Credentials object returned by the AWS Security Token Service
+ returned: always
+ type: list
+ sample:
+ access_key: ASXXXXXXXXXXXXXXXXXX
+ expiration: "2016-04-08T11:59:47+00:00"
+ secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+changed:
+ description: True if obtaining the credentials succeeds
+ type: bool
+ returned: always
+"""
+
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html)
+- name: Get a session token
+ community.aws.sts_session_token:
+ duration_seconds: 3600
+ register: session_credentials
+
+- name: Use the session token obtained above to tag an instance in account 123456789012
+ amazon.aws.ec2_tag:
+ aws_access_key: "{{ session_credentials.sts_creds.access_key }}"
+ aws_secret_key: "{{ session_credentials.sts_creds.secret_key }}"
+ security_token: "{{ session_credentials.sts_creds.session_token }}"
+ resource: i-xyzxyz01
+ state: present
+ tags:
+ MyNewTag: value
+
+'''
+
+try:
+ import boto3
+ import botocore
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+def normalize_credentials(credentials):
+ access_key = credentials.get('AccessKeyId', None)
+ secret_key = credentials.get('SecretAccessKey', None)
+ session_token = credentials.get('SessionToken', None)
+ expiration = credentials.get('Expiration', None)
+ return {
+ 'access_key': access_key,
+ 'secret_key': secret_key,
+ 'session_token': session_token,
+ 'expiration': expiration
+ }
+
+
+def get_session_token(connection, module):
+ duration_seconds = module.params.get('duration_seconds')
+ mfa_serial_number = module.params.get('mfa_serial_number')
+ mfa_token = module.params.get('mfa_token')
+ changed = False
+
+ args = {}
+ if duration_seconds is not None:
+ args['DurationSeconds'] = duration_seconds
+ if mfa_serial_number is not None:
+ args['SerialNumber'] = mfa_serial_number
+ if mfa_token is not None:
+ args['TokenCode'] = mfa_token
+
+ try:
+ response = connection.get_session_token(**args)
+ changed = True
+ except ClientError as e:
+ module.fail_json(msg=e)
+
+ credentials = normalize_credentials(response.get('Credentials', {}))
+ module.exit_json(changed=changed, sts_creds=credentials)
+
+
+def main():
+ argument_spec = dict(
+ duration_seconds=dict(required=False, default=None, type='int'),
+ mfa_serial_number=dict(required=False, default=None),
+ mfa_token=dict(required=False, default=None),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ try:
+ connection = module.client('sts')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ get_session_token(connection, module)
+
+
+if __name__ == '__main__':
+ main()