summaryrefslogtreecommitdiffstats
path: root/ansible_collections/community/aws/plugins
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 12:04:41 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 12:04:41 +0000
commit975f66f2eebe9dadba04f275774d4ab83f74cf25 (patch)
tree89bd26a93aaae6a25749145b7e4bca4a1e75b2be /ansible_collections/community/aws/plugins
parentInitial commit. (diff)
downloadansible-975f66f2eebe9dadba04f275774d4ab83f74cf25.tar.xz
ansible-975f66f2eebe9dadba04f275774d4ab83f74cf25.zip
Adding upstream version 7.7.0+dfsg.upstream/7.7.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/community/aws/plugins')
-rw-r--r--ansible_collections/community/aws/plugins/connection/__init__.py0
-rw-r--r--ansible_collections/community/aws/plugins/connection/aws_ssm.py928
-rw-r--r--ansible_collections/community/aws/plugins/module_utils/base.py376
-rw-r--r--ansible_collections/community/aws/plugins/module_utils/ec2.py189
-rw-r--r--ansible_collections/community/aws/plugins/module_utils/etag.py62
-rw-r--r--ansible_collections/community/aws/plugins/module_utils/networkfirewall.py1762
-rw-r--r--ansible_collections/community/aws/plugins/module_utils/opensearch.py280
-rw-r--r--ansible_collections/community/aws/plugins/module_utils/sns.py170
-rw-r--r--ansible_collections/community/aws/plugins/module_utils/transitgateway.py345
-rw-r--r--ansible_collections/community/aws/plugins/module_utils/wafv2.py206
-rw-r--r--ansible_collections/community/aws/plugins/modules/__init__.py0
-rw-r--r--ansible_collections/community/aws/plugins/modules/accessanalyzer_validate_policy_info.py237
-rw-r--r--ansible_collections/community/aws/plugins/modules/acm_certificate.py567
-rw-r--r--ansible_collections/community/aws/plugins/modules/acm_certificate_info.py295
-rw-r--r--ansible_collections/community/aws/plugins/modules/api_gateway.py368
-rw-r--r--ansible_collections/community/aws/plugins/modules/api_gateway_domain.py336
-rw-r--r--ansible_collections/community/aws/plugins/modules/application_autoscaling_policy.py539
-rw-r--r--ansible_collections/community/aws/plugins/modules/autoscaling_complete_lifecycle_action.py102
-rw-r--r--ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh.py270
-rw-r--r--ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh_info.py221
-rw-r--r--ansible_collections/community/aws/plugins/modules/autoscaling_launch_config.py694
-rw-r--r--ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_find.py215
-rw-r--r--ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_info.py225
-rw-r--r--ansible_collections/community/aws/plugins/modules/autoscaling_lifecycle_hook.py299
-rw-r--r--ansible_collections/community/aws/plugins/modules/autoscaling_policy.py607
-rw-r--r--ansible_collections/community/aws/plugins/modules/autoscaling_scheduled_action.py324
-rw-r--r--ansible_collections/community/aws/plugins/modules/aws_region_info.py98
-rw-r--r--ansible_collections/community/aws/plugins/modules/batch_compute_environment.py483
-rw-r--r--ansible_collections/community/aws/plugins/modules/batch_job_definition.py458
-rw-r--r--ansible_collections/community/aws/plugins/modules/batch_job_queue.py308
-rw-r--r--ansible_collections/community/aws/plugins/modules/cloudformation_exports_info.py84
-rw-r--r--ansible_collections/community/aws/plugins/modules/cloudformation_stack_set.py753
-rw-r--r--ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py2272
-rw-r--r--ansible_collections/community/aws/plugins/modules/cloudfront_distribution_info.py646
-rw-r--r--ansible_collections/community/aws/plugins/modules/cloudfront_invalidation.py270
-rw-r--r--ansible_collections/community/aws/plugins/modules/cloudfront_origin_access_identity.py270
-rw-r--r--ansible_collections/community/aws/plugins/modules/cloudfront_response_headers_policy.py292
-rw-r--r--ansible_collections/community/aws/plugins/modules/codebuild_project.py488
-rw-r--r--ansible_collections/community/aws/plugins/modules/codecommit_repository.py244
-rw-r--r--ansible_collections/community/aws/plugins/modules/codepipeline.py308
-rw-r--r--ansible_collections/community/aws/plugins/modules/config_aggregation_authorization.py159
-rw-r--r--ansible_collections/community/aws/plugins/modules/config_aggregator.py232
-rw-r--r--ansible_collections/community/aws/plugins/modules/config_delivery_channel.py218
-rw-r--r--ansible_collections/community/aws/plugins/modules/config_recorder.py212
-rw-r--r--ansible_collections/community/aws/plugins/modules/config_rule.py275
-rw-r--r--ansible_collections/community/aws/plugins/modules/data_pipeline.py634
-rw-r--r--ansible_collections/community/aws/plugins/modules/directconnect_confirm_connection.py158
-rw-r--r--ansible_collections/community/aws/plugins/modules/directconnect_connection.py345
-rw-r--r--ansible_collections/community/aws/plugins/modules/directconnect_gateway.py368
-rw-r--r--ansible_collections/community/aws/plugins/modules/directconnect_link_aggregation_group.py466
-rw-r--r--ansible_collections/community/aws/plugins/modules/directconnect_virtual_interface.py519
-rw-r--r--ansible_collections/community/aws/plugins/modules/dms_endpoint.py698
-rw-r--r--ansible_collections/community/aws/plugins/modules/dms_replication_subnet_group.py225
-rw-r--r--ansible_collections/community/aws/plugins/modules/dynamodb_table.py1087
-rw-r--r--ansible_collections/community/aws/plugins/modules/dynamodb_ttl.py157
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_ami_copy.py223
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_customer_gateway.py255
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_customer_gateway_info.py137
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_launch_template.py827
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_placement_group.py251
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_placement_group_info.py126
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_snapshot_copy.py186
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_transit_gateway.py512
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_info.py258
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment.py336
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment_info.py198
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_vpc_egress_igw.py191
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl.py608
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl_info.py216
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_vpc_peer.py590
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_vpc_peering_info.py256
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw.py529
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw_info.py189
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py803
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn_info.py216
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_win_password.py217
-rw-r--r--ansible_collections/community/aws/plugins/modules/ecs_attribute.py302
-rw-r--r--ansible_collections/community/aws/plugins/modules/ecs_cluster.py359
-rw-r--r--ansible_collections/community/aws/plugins/modules/ecs_ecr.py626
-rw-r--r--ansible_collections/community/aws/plugins/modules/ecs_service.py1253
-rw-r--r--ansible_collections/community/aws/plugins/modules/ecs_service_info.py247
-rw-r--r--ansible_collections/community/aws/plugins/modules/ecs_tag.py223
-rw-r--r--ansible_collections/community/aws/plugins/modules/ecs_task.py481
-rw-r--r--ansible_collections/community/aws/plugins/modules/ecs_taskdefinition.py1153
-rw-r--r--ansible_collections/community/aws/plugins/modules/ecs_taskdefinition_info.py380
-rw-r--r--ansible_collections/community/aws/plugins/modules/efs.py786
-rw-r--r--ansible_collections/community/aws/plugins/modules/efs_info.py392
-rw-r--r--ansible_collections/community/aws/plugins/modules/efs_tag.py183
-rw-r--r--ansible_collections/community/aws/plugins/modules/eks_cluster.py305
-rw-r--r--ansible_collections/community/aws/plugins/modules/eks_fargate_profile.py353
-rw-r--r--ansible_collections/community/aws/plugins/modules/eks_nodegroup.py713
-rw-r--r--ansible_collections/community/aws/plugins/modules/elasticache.py549
-rw-r--r--ansible_collections/community/aws/plugins/modules/elasticache_info.py509
-rw-r--r--ansible_collections/community/aws/plugins/modules/elasticache_parameter_group.py340
-rw-r--r--ansible_collections/community/aws/plugins/modules/elasticache_snapshot.py211
-rw-r--r--ansible_collections/community/aws/plugins/modules/elasticache_subnet_group.py256
-rw-r--r--ansible_collections/community/aws/plugins/modules/elasticbeanstalk_app.py224
-rw-r--r--ansible_collections/community/aws/plugins/modules/elb_classic_lb_info.py234
-rw-r--r--ansible_collections/community/aws/plugins/modules/elb_instance.py399
-rw-r--r--ansible_collections/community/aws/plugins/modules/elb_network_lb.py496
-rw-r--r--ansible_collections/community/aws/plugins/modules/elb_target.py334
-rw-r--r--ansible_collections/community/aws/plugins/modules/elb_target_group.py992
-rw-r--r--ansible_collections/community/aws/plugins/modules/elb_target_group_info.py319
-rw-r--r--ansible_collections/community/aws/plugins/modules/elb_target_info.py430
-rw-r--r--ansible_collections/community/aws/plugins/modules/glue_connection.py393
-rw-r--r--ansible_collections/community/aws/plugins/modules/glue_crawler.py426
-rw-r--r--ansible_collections/community/aws/plugins/modules/glue_job.py484
-rw-r--r--ansible_collections/community/aws/plugins/modules/iam_access_key.py317
-rw-r--r--ansible_collections/community/aws/plugins/modules/iam_access_key_info.py128
-rw-r--r--ansible_collections/community/aws/plugins/modules/iam_group.py433
-rw-r--r--ansible_collections/community/aws/plugins/modules/iam_managed_policy.py371
-rw-r--r--ansible_collections/community/aws/plugins/modules/iam_mfa_device_info.py104
-rw-r--r--ansible_collections/community/aws/plugins/modules/iam_password_policy.py213
-rw-r--r--ansible_collections/community/aws/plugins/modules/iam_role.py736
-rw-r--r--ansible_collections/community/aws/plugins/modules/iam_role_info.py282
-rw-r--r--ansible_collections/community/aws/plugins/modules/iam_saml_federation.py248
-rw-r--r--ansible_collections/community/aws/plugins/modules/iam_server_certificate.py397
-rw-r--r--ansible_collections/community/aws/plugins/modules/iam_server_certificate_info.py161
-rw-r--r--ansible_collections/community/aws/plugins/modules/inspector_target.py246
-rw-r--r--ansible_collections/community/aws/plugins/modules/kinesis_stream.py1262
-rw-r--r--ansible_collections/community/aws/plugins/modules/lightsail.py340
-rw-r--r--ansible_collections/community/aws/plugins/modules/lightsail_static_ip.py149
-rw-r--r--ansible_collections/community/aws/plugins/modules/msk_cluster.py848
-rw-r--r--ansible_collections/community/aws/plugins/modules/msk_config.py313
-rw-r--r--ansible_collections/community/aws/plugins/modules/networkfirewall.py352
-rw-r--r--ansible_collections/community/aws/plugins/modules/networkfirewall_info.py237
-rw-r--r--ansible_collections/community/aws/plugins/modules/networkfirewall_policy.py437
-rw-r--r--ansible_collections/community/aws/plugins/modules/networkfirewall_policy_info.py260
-rw-r--r--ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group.py831
-rw-r--r--ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group_info.py449
-rw-r--r--ansible_collections/community/aws/plugins/modules/opensearch.py1501
-rw-r--r--ansible_collections/community/aws/plugins/modules/opensearch_info.py531
-rw-r--r--ansible_collections/community/aws/plugins/modules/redshift.py673
-rw-r--r--ansible_collections/community/aws/plugins/modules/redshift_cross_region_snapshots.py202
-rw-r--r--ansible_collections/community/aws/plugins/modules/redshift_info.py348
-rw-r--r--ansible_collections/community/aws/plugins/modules/redshift_subnet_group.py273
-rw-r--r--ansible_collections/community/aws/plugins/modules/s3_bucket_info.py620
-rw-r--r--ansible_collections/community/aws/plugins/modules/s3_bucket_notification.py410
-rw-r--r--ansible_collections/community/aws/plugins/modules/s3_cors.py170
-rw-r--r--ansible_collections/community/aws/plugins/modules/s3_lifecycle.py667
-rw-r--r--ansible_collections/community/aws/plugins/modules/s3_logging.py216
-rw-r--r--ansible_collections/community/aws/plugins/modules/s3_metrics_configuration.py223
-rw-r--r--ansible_collections/community/aws/plugins/modules/s3_sync.py538
-rw-r--r--ansible_collections/community/aws/plugins/modules/s3_website.py321
-rw-r--r--ansible_collections/community/aws/plugins/modules/secretsmanager_secret.py649
-rw-r--r--ansible_collections/community/aws/plugins/modules/ses_identity.py544
-rw-r--r--ansible_collections/community/aws/plugins/modules/ses_identity_policy.py200
-rw-r--r--ansible_collections/community/aws/plugins/modules/ses_rule_set.py252
-rw-r--r--ansible_collections/community/aws/plugins/modules/sns.py252
-rw-r--r--ansible_collections/community/aws/plugins/modules/sns_topic.py697
-rw-r--r--ansible_collections/community/aws/plugins/modules/sns_topic_info.py173
-rw-r--r--ansible_collections/community/aws/plugins/modules/sqs_queue.py525
-rw-r--r--ansible_collections/community/aws/plugins/modules/ssm_parameter.py597
-rw-r--r--ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine.py221
-rw-r--r--ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine_execution.py196
-rw-r--r--ansible_collections/community/aws/plugins/modules/storagegateway_info.py362
-rw-r--r--ansible_collections/community/aws/plugins/modules/sts_assume_role.py172
-rw-r--r--ansible_collections/community/aws/plugins/modules/sts_session_token.py142
-rw-r--r--ansible_collections/community/aws/plugins/modules/waf_condition.py742
-rw-r--r--ansible_collections/community/aws/plugins/modules/waf_info.py144
-rw-r--r--ansible_collections/community/aws/plugins/modules/waf_rule.py357
-rw-r--r--ansible_collections/community/aws/plugins/modules/waf_web_acl.py364
-rw-r--r--ansible_collections/community/aws/plugins/modules/wafv2_ip_set.py360
-rw-r--r--ansible_collections/community/aws/plugins/modules/wafv2_ip_set_info.py151
-rw-r--r--ansible_collections/community/aws/plugins/modules/wafv2_resources.py176
-rw-r--r--ansible_collections/community/aws/plugins/modules/wafv2_resources_info.py125
-rw-r--r--ansible_collections/community/aws/plugins/modules/wafv2_rule_group.py438
-rw-r--r--ansible_collections/community/aws/plugins/modules/wafv2_rule_group_info.py164
-rw-r--r--ansible_collections/community/aws/plugins/modules/wafv2_web_acl.py583
-rw-r--r--ansible_collections/community/aws/plugins/modules/wafv2_web_acl_info.py155
170 files changed, 68368 insertions, 0 deletions
diff --git a/ansible_collections/community/aws/plugins/connection/__init__.py b/ansible_collections/community/aws/plugins/connection/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/connection/__init__.py
diff --git a/ansible_collections/community/aws/plugins/connection/aws_ssm.py b/ansible_collections/community/aws/plugins/connection/aws_ssm.py
new file mode 100644
index 000000000..68d761c9d
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/connection/aws_ssm.py
@@ -0,0 +1,928 @@
+# Based on the ssh connection plugin by Michael DeHaan
+#
+# Copyright: (c) 2018, Pat Sharkey <psharkey@cleo.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+author:
+- Pat Sharkey (@psharkey) <psharkey@cleo.com>
+- HanumanthaRao MVL (@hanumantharaomvl) <hanumanth@flux7.com>
+- Gaurav Ashtikar (@gau1991) <gaurav.ashtikar@flux7.com>
+name: aws_ssm
+short_description: execute via AWS Systems Manager
+description:
+- This connection plugin allows ansible to execute tasks on an EC2 instance via the aws ssm CLI.
+requirements:
+- The remote EC2 instance must be running the AWS Systems Manager Agent (SSM Agent).
+- The control machine must have the aws session manager plugin installed.
+- The remote EC2 linux instance must have the curl installed.
+options:
+ access_key_id:
+ description: The STS access key to use when connecting via session-manager.
+ vars:
+ - name: ansible_aws_ssm_access_key_id
+ env:
+ - name: AWS_ACCESS_KEY_ID
+ version_added: 1.3.0
+ secret_access_key:
+ description: The STS secret key to use when connecting via session-manager.
+ vars:
+ - name: ansible_aws_ssm_secret_access_key
+ env:
+ - name: AWS_SECRET_ACCESS_KEY
+ version_added: 1.3.0
+ session_token:
+ description: The STS session token to use when connecting via session-manager.
+ vars:
+ - name: ansible_aws_ssm_session_token
+ env:
+ - name: AWS_SESSION_TOKEN
+ version_added: 1.3.0
+ instance_id:
+ description: The EC2 instance ID.
+ vars:
+ - name: ansible_aws_ssm_instance_id
+ region:
+ description: The region the EC2 instance is located.
+ vars:
+ - name: ansible_aws_ssm_region
+ env:
+ - name: AWS_REGION
+ - name: AWS_DEFAULT_REGION
+ default: 'us-east-1'
+ bucket_name:
+ description: The name of the S3 bucket used for file transfers.
+ vars:
+ - name: ansible_aws_ssm_bucket_name
+ bucket_endpoint_url:
+ description: The S3 endpoint URL of the bucket used for file transfers.
+ vars:
+ - name: ansible_aws_ssm_bucket_endpoint_url
+ version_added: 5.3.0
+ plugin:
+ description: This defines the location of the session-manager-plugin binary.
+ vars:
+ - name: ansible_aws_ssm_plugin
+ default: '/usr/local/bin/session-manager-plugin'
+ profile:
+ description: Sets AWS profile to use.
+ vars:
+ - name: ansible_aws_ssm_profile
+ env:
+ - name: AWS_PROFILE
+ version_added: 1.5.0
+ reconnection_retries:
+ description: Number of attempts to connect.
+ default: 3
+ type: integer
+ vars:
+ - name: ansible_aws_ssm_retries
+ ssm_timeout:
+ description: Connection timeout seconds.
+ default: 60
+ type: integer
+ vars:
+ - name: ansible_aws_ssm_timeout
+ bucket_sse_mode:
+ description: Server-side encryption mode to use for uploads on the S3 bucket used for file transfer.
+ choices: [ 'AES256', 'aws:kms' ]
+ required: false
+ version_added: 2.2.0
+ vars:
+ - name: ansible_aws_ssm_bucket_sse_mode
+ bucket_sse_kms_key_id:
+ description: KMS key id to use when encrypting objects using C(bucket_sse_mode=aws:kms). Ignored otherwise.
+ version_added: 2.2.0
+ vars:
+ - name: ansible_aws_ssm_bucket_sse_kms_key_id
+ ssm_document:
+ description: SSM document to use when connecting.
+ vars:
+ - name: ansible_aws_ssm_document
+ version_added: 5.2.0
+ s3_addressing_style:
+ description:
+ - The addressing style to use when using S3 URLs.
+ - When the S3 bucket isn't in the same region as the Instance
+ explicitly setting the addressing style to 'virtual' may be necessary
+ U(https://repost.aws/knowledge-center/s3-http-307-response) as this forces
+ the use of a specific endpoint.
+ choices: [ 'path', 'virtual', 'auto' ]
+ default: 'auto'
+ version_added: 5.2.0
+ vars:
+ - name: ansible_aws_ssm_s3_addressing_style
+'''
+
+EXAMPLES = r'''
+
+# Wait for SSM Agent to be available on the Instance
+- name: Wait for connection to be available
+ vars:
+ ansible_connection: aws_ssm
+ ansible_aws_ssm_bucket_name: nameofthebucket
+ ansible_aws_ssm_region: us-west-2
+ # When the S3 bucket isn't in the same region as the Instance
+ # Explicitly setting the addressing style to 'virtual' may be necessary
+ # https://repost.aws/knowledge-center/s3-http-307-response
+ ansible_aws_ssm_s3_addressing_style: virtual
+ tasks:
+ - name: Wait for connection
+ wait_for_connection:
+
+# Stop Spooler Process on Windows Instances
+- name: Stop Spooler Service on Windows Instances
+ vars:
+ ansible_connection: aws_ssm
+ ansible_shell_type: powershell
+ ansible_aws_ssm_bucket_name: nameofthebucket
+ ansible_aws_ssm_region: us-east-1
+ tasks:
+ - name: Stop spooler service
+ win_service:
+ name: spooler
+ state: stopped
+
+# Install a Nginx Package on Linux Instance
+- name: Install a Nginx Package
+ vars:
+ ansible_connection: aws_ssm
+ ansible_aws_ssm_bucket_name: nameofthebucket
+ ansible_aws_ssm_region: us-west-2
+ tasks:
+ - name: Install a Nginx Package
+ yum:
+ name: nginx
+ state: present
+
+# Create a directory in Windows Instances
+- name: Create a directory in Windows Instance
+ vars:
+ ansible_connection: aws_ssm
+ ansible_shell_type: powershell
+ ansible_aws_ssm_bucket_name: nameofthebucket
+ ansible_aws_ssm_region: us-east-1
+ tasks:
+ - name: Create a Directory
+ win_file:
+ path: C:\Windows\temp
+ state: directory
+
+# Making use of Dynamic Inventory Plugin
+# =======================================
+# aws_ec2.yml (Dynamic Inventory - Linux)
+# This will return the Instance IDs matching the filter
+#plugin: aws_ec2
+#regions:
+# - us-east-1
+#hostnames:
+# - instance-id
+#filters:
+# tag:SSMTag: ssmlinux
+# -----------------------
+- name: install aws-cli
+ hosts: all
+ gather_facts: false
+ vars:
+ ansible_connection: aws_ssm
+ ansible_aws_ssm_bucket_name: nameofthebucket
+ ansible_aws_ssm_region: us-east-1
+ tasks:
+ - name: aws-cli
+ raw: yum install -y awscli
+ tags: aws-cli
+# Execution: ansible-playbook linux.yaml -i aws_ec2.yml
+# The playbook tasks will get executed on the instance ids returned from the dynamic inventory plugin using ssm connection.
+# =====================================================
+# aws_ec2.yml (Dynamic Inventory - Windows)
+#plugin: aws_ec2
+#regions:
+# - us-east-1
+#hostnames:
+# - instance-id
+#filters:
+# tag:SSMTag: ssmwindows
+# -----------------------
+- name: Create a dir.
+ hosts: all
+ gather_facts: false
+ vars:
+ ansible_connection: aws_ssm
+ ansible_shell_type: powershell
+ ansible_aws_ssm_bucket_name: nameofthebucket
+ ansible_aws_ssm_region: us-east-1
+ tasks:
+ - name: Create the directory
+ win_file:
+ path: C:\Temp\SSM_Testing5
+ state: directory
+# Execution: ansible-playbook win_file.yaml -i aws_ec2.yml
+# The playbook tasks will get executed on the instance ids returned from the dynamic inventory plugin using ssm connection.
+
+# Install a Nginx Package on Linux Instance; with specific SSE for file transfer
+- name: Install a Nginx Package
+ vars:
+ ansible_connection: aws_ssm
+ ansible_aws_ssm_bucket_name: nameofthebucket
+ ansible_aws_ssm_region: us-west-2
+ ansible_aws_ssm_bucket_sse_mode: 'aws:kms'
+ ansible_aws_ssm_bucket_sse_kms_key_id: alias/kms-key-alias
+ tasks:
+ - name: Install a Nginx Package
+ yum:
+ name: nginx
+ state: present
+
+# Install a Nginx Package on Linux Instance; with dedicated SSM document
+- name: Install a Nginx Package
+ vars:
+ ansible_connection: aws_ssm
+ ansible_aws_ssm_bucket_name: nameofthebucket
+ ansible_aws_ssm_region: us-west-2
+ ansible_aws_ssm_document: nameofthecustomdocument
+ tasks:
+ - name: Install a Nginx Package
+ yum:
+ name: nginx
+ state: present
+'''
+
+import os
+import getpass
+import json
+import pty
+import random
+import re
+import select
+import string
+import subprocess
+import time
+
+try:
+ import boto3
+ from botocore.client import Config
+except ImportError as e:
+ pass
+
+from functools import wraps
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3
+from ansible.errors import AnsibleConnectionFailure
+from ansible.errors import AnsibleError
+from ansible.errors import AnsibleFileNotFound
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils._text import to_text
+from ansible.plugins.connection import ConnectionBase
+from ansible.plugins.shell.powershell import _common_args
+from ansible.utils.display import Display
+
+display = Display()
+
+
+def _ssm_retry(func):
+ """
+ Decorator to retry in the case of a connection failure
+ Will retry if:
+ * an exception is caught
+ Will not retry if
+ * remaining_tries is <2
+ * retries limit reached
+ """
+ @wraps(func)
+ def wrapped(self, *args, **kwargs):
+ remaining_tries = int(self.get_option('reconnection_retries')) + 1
+ cmd_summary = f"{args[0]}..."
+ for attempt in range(remaining_tries):
+ try:
+ return_tuple = func(self, *args, **kwargs)
+ self._vvvv(f"ssm_retry: (success) {to_text(return_tuple)}")
+ break
+
+ except (AnsibleConnectionFailure, Exception) as e:
+ if attempt == remaining_tries - 1:
+ raise
+ pause = 2 ** attempt - 1
+ pause = min(pause, 30)
+
+ if isinstance(e, AnsibleConnectionFailure):
+ msg = f"ssm_retry: attempt: {attempt}, cmd ({cmd_summary}), pausing for {pause} seconds"
+ else:
+ msg = f"ssm_retry: attempt: {attempt}, caught exception({e}) from cmd ({cmd_summary}), pausing for {pause} seconds"
+
+ self._vv(msg)
+
+ time.sleep(pause)
+
+ # Do not attempt to reuse the existing session on retries
+ # This will cause the SSM session to be completely restarted,
+ # as well as reinitializing the boto3 clients
+ self.close()
+
+ continue
+
+ return return_tuple
+ return wrapped
+
+
+def chunks(lst, n):
+ """Yield successive n-sized chunks from lst."""
+ for i in range(0, len(lst), n):
+ yield lst[i:i + n]
+
+
+class Connection(ConnectionBase):
+ ''' AWS SSM based connections '''
+
+ transport = 'community.aws.aws_ssm'
+ allow_executable = False
+ allow_extras = True
+ has_pipelining = False
+ is_windows = False
+ _client = None
+ _s3_client = None
+ _session = None
+ _stdout = None
+ _session_id = ''
+ _timeout = False
+ MARK_LENGTH = 26
+
+ def _display(self, f, message):
+ if self.host:
+ host_args = {"host": self.host}
+ else:
+ host_args = {}
+ f(to_text(message), **host_args)
+
+ def _v(self, message):
+ self._display(display.v, message)
+
+ def _vv(self, message):
+ self._display(display.vv, message)
+
+ def _vvv(self, message):
+ self._display(display.vvv, message)
+
+ def _vvvv(self, message):
+ self._display(display.vvvv, message)
+
+ def _get_bucket_endpoint(self):
+ """
+ Fetches the correct S3 endpoint and region for use with our bucket.
+ If we don't explicitly set the endpoint then some commands will use the global
+ endpoint and fail
+ (new AWS regions and new buckets in a region other than the one we're running in)
+ """
+
+ region_name = self.get_option('region') or 'us-east-1'
+ profile_name = self.get_option('profile') or ''
+ self._vvvv("_get_bucket_endpoint: S3 (global)")
+ tmp_s3_client = self._get_boto_client(
+ 's3', region_name=region_name, profile_name=profile_name,
+ )
+ # Fetch the location of the bucket so we can open a client against the 'right' endpoint
+ # This /should/ always work
+ bucket_location = tmp_s3_client.get_bucket_location(
+ Bucket=(self.get_option('bucket_name')),
+ )
+ bucket_region = bucket_location['LocationConstraint']
+
+ if self.get_option("bucket_endpoint_url"):
+ return self.get_option("bucket_endpoint_url"), bucket_region
+
+ # Create another client for the region the bucket lives in, so we can nab the endpoint URL
+ self._vvvv(f"_get_bucket_endpoint: S3 (bucket region) - {bucket_region}")
+ s3_bucket_client = self._get_boto_client(
+ 's3', region_name=bucket_region, profile_name=profile_name,
+ )
+
+ return s3_bucket_client.meta.endpoint_url, s3_bucket_client.meta.region_name
+
+ def _init_clients(self):
+ self._vvvv("INITIALIZE BOTO3 CLIENTS")
+ profile_name = self.get_option('profile') or ''
+ region_name = self.get_option('region')
+
+ # The SSM Boto client, currently used to initiate and manage the session
+ # Note: does not handle the actual SSM session traffic
+ self._vvvv("SETUP BOTO3 CLIENTS: SSM")
+ ssm_client = self._get_boto_client(
+ 'ssm', region_name=region_name, profile_name=profile_name,
+ )
+ self._client = ssm_client
+
+ s3_endpoint_url, s3_region_name = self._get_bucket_endpoint()
+ self._vvvv(f"SETUP BOTO3 CLIENTS: S3 {s3_endpoint_url}")
+ s3_bucket_client = self._get_boto_client(
+ 's3', region_name=s3_region_name, endpoint_url=s3_endpoint_url, profile_name=profile_name,
+ )
+
+ self._s3_client = s3_bucket_client
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ if not HAS_BOTO3:
+ raise AnsibleError(missing_required_lib("boto3"))
+
+ self.host = self._play_context.remote_addr
+
+ if getattr(self._shell, "SHELL_FAMILY", '') == 'powershell':
+ self.delegate = None
+ self.has_native_async = True
+ self.always_pipeline_modules = True
+ self.module_implementation_preferences = ('.ps1', '.exe', '')
+ self.protocol = None
+ self.shell_id = None
+ self._shell_type = 'powershell'
+ self.is_windows = True
+
+ def __del__(self):
+ self.close()
+
+ def _connect(self):
+ ''' connect to the host via ssm '''
+
+ self._play_context.remote_user = getpass.getuser()
+
+ if not self._session_id:
+ self.start_session()
+ return self
+
+ def reset(self):
+ ''' start a fresh ssm session '''
+ self._vvvv('reset called on ssm connection')
+ return self.start_session()
+
+ def start_session(self):
+ ''' start ssm session '''
+
+ if self.get_option('instance_id') is None:
+ self.instance_id = self.host
+ else:
+ self.instance_id = self.get_option('instance_id')
+
+ self._vvv(f"ESTABLISH SSM CONNECTION TO: {self.instance_id}")
+
+ executable = self.get_option('plugin')
+ if not os.path.exists(to_bytes(executable, errors='surrogate_or_strict')):
+ raise AnsibleError(f"failed to find the executable specified {executable}.")
+
+ self._init_clients()
+
+ self._vvvv(f"START SSM SESSION: {self.instance_id}")
+ start_session_args = dict(Target=self.instance_id, Parameters={})
+ document_name = self.get_option('ssm_document')
+ if document_name is not None:
+ start_session_args['DocumentName'] = document_name
+ response = self._client.start_session(**start_session_args)
+ self._session_id = response['SessionId']
+
+ region_name = self.get_option('region')
+ profile_name = self.get_option('profile') or ''
+ cmd = [
+ executable,
+ json.dumps(response),
+ region_name,
+ "StartSession",
+ profile_name,
+ json.dumps({"Target": self.instance_id}),
+ self._client.meta.endpoint_url,
+ ]
+
+ self._vvvv(f"SSM COMMAND: {to_text(cmd)}")
+
+ stdout_r, stdout_w = pty.openpty()
+ session = subprocess.Popen(
+ cmd,
+ stdin=subprocess.PIPE,
+ stdout=stdout_w,
+ stderr=subprocess.PIPE,
+ close_fds=True,
+ bufsize=0,
+ )
+
+ os.close(stdout_w)
+ self._stdout = os.fdopen(stdout_r, 'rb', 0)
+ self._session = session
+ self._poll_stdout = select.poll()
+ self._poll_stdout.register(self._stdout, select.POLLIN)
+
+ # Disable command echo and prompt.
+ self._prepare_terminal()
+
+ self._vvvv(f"SSM CONNECTION ID: {self._session_id}")
+
+ return session
+
+ @_ssm_retry
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ ''' run a command on the ssm host '''
+
+ super().exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ self._vvv(f"EXEC: {to_text(cmd)}")
+
+ session = self._session
+
+ mark_begin = "".join([random.choice(string.ascii_letters) for i in xrange(self.MARK_LENGTH)])
+ if self.is_windows:
+ mark_start = mark_begin + " $LASTEXITCODE"
+ else:
+ mark_start = mark_begin
+ mark_end = "".join([random.choice(string.ascii_letters) for i in xrange(self.MARK_LENGTH)])
+
+ # Wrap command in markers accordingly for the shell used
+ cmd = self._wrap_command(cmd, sudoable, mark_start, mark_end)
+
+ self._flush_stderr(session)
+
+ for chunk in chunks(cmd, 1024):
+ session.stdin.write(to_bytes(chunk, errors='surrogate_or_strict'))
+
+ # Read stdout between the markers
+ stdout = ''
+ win_line = ''
+ begin = False
+ stop_time = int(round(time.time())) + self.get_option('ssm_timeout')
+ while session.poll() is None:
+ remaining = stop_time - int(round(time.time()))
+ if remaining < 1:
+ self._timeout = True
+ self._vvvv(f"EXEC timeout stdout: \n{to_text(stdout)}")
+ raise AnsibleConnectionFailure(
+ f"SSM exec_command timeout on host: {self.instance_id}")
+ if self._poll_stdout.poll(1000):
+ line = self._filter_ansi(self._stdout.readline())
+ self._vvvv(f"EXEC stdout line: \n{to_text(line)}")
+ else:
+ self._vvvv(f"EXEC remaining: {remaining}")
+ continue
+
+ if not begin and self.is_windows:
+ win_line = win_line + line
+ line = win_line
+
+ if mark_start in line:
+ begin = True
+ if not line.startswith(mark_start):
+ stdout = ''
+ continue
+ if begin:
+ if mark_end in line:
+ self._vvvv(f"POST_PROCESS: \n{to_text(stdout)}")
+ returncode, stdout = self._post_process(stdout, mark_begin)
+ self._vvvv(f"POST_PROCESSED: \n{to_text(stdout)}")
+ break
+ stdout = stdout + line
+
+ stderr = self._flush_stderr(session)
+
+ return (returncode, stdout, stderr)
+
+ def _prepare_terminal(self):
+ ''' perform any one-time terminal settings '''
+ # No windows setup for now
+ if self.is_windows:
+ return
+
+ # *_complete variables are 3 valued:
+ # - None: not started
+ # - False: started
+ # - True: complete
+
+ startup_complete = False
+ disable_echo_complete = None
+ disable_echo_cmd = to_bytes("stty -echo\n", errors="surrogate_or_strict")
+
+ disable_prompt_complete = None
+ end_mark = "".join(
+ [random.choice(string.ascii_letters) for i in xrange(self.MARK_LENGTH)]
+ )
+ disable_prompt_cmd = to_bytes(
+ "PS1='' ; printf '\\n%s\\n' '" + end_mark + "'\n",
+ errors="surrogate_or_strict",
+ )
+ disable_prompt_reply = re.compile(
+ r"\r\r\n" + re.escape(end_mark) + r"\r\r\n", re.MULTILINE
+ )
+
+ stdout = ""
+ # Custom command execution for when we're waiting for startup
+ stop_time = int(round(time.time())) + self.get_option("ssm_timeout")
+ while (not disable_prompt_complete) and (self._session.poll() is None):
+ remaining = stop_time - int(round(time.time()))
+ if remaining < 1:
+ self._timeout = True
+ self._vvvv(f"PRE timeout stdout: \n{to_bytes(stdout)}")
+ raise AnsibleConnectionFailure(
+ f"SSM start_session timeout on host: {self.instance_id}"
+ )
+ if self._poll_stdout.poll(1000):
+ stdout += to_text(self._stdout.read(1024))
+ self._vvvv(f"PRE stdout line: \n{to_bytes(stdout)}")
+ else:
+ self._vvvv(f"PRE remaining: {remaining}")
+
+ # wait til prompt is ready
+ if startup_complete is False:
+ match = str(stdout).find("Starting session with SessionId")
+ if match != -1:
+ self._vvvv("PRE startup output received")
+ startup_complete = True
+
+ # disable echo
+ if startup_complete and (disable_echo_complete is None):
+ self._vvvv(f"PRE Disabling Echo: {disable_echo_cmd}")
+ self._session.stdin.write(disable_echo_cmd)
+ disable_echo_complete = False
+
+ if disable_echo_complete is False:
+ match = str(stdout).find("stty -echo")
+ if match != -1:
+ disable_echo_complete = True
+
+ # disable prompt
+ if disable_echo_complete and disable_prompt_complete is None:
+ self._vvvv(f"PRE Disabling Prompt: \n{disable_prompt_cmd}")
+ self._session.stdin.write(disable_prompt_cmd)
+ disable_prompt_complete = False
+
+ if disable_prompt_complete is False:
+ match = disable_prompt_reply.search(stdout)
+ if match:
+ stdout = stdout[match.end():]
+ disable_prompt_complete = True
+
+ if not disable_prompt_complete:
+ raise AnsibleConnectionFailure(
+ f"SSM process closed during _prepare_terminal on host: {self.instance_id}"
+ )
+ self._vvvv("PRE Terminal configured")
+
+ def _wrap_command(self, cmd, sudoable, mark_start, mark_end):
+ ''' wrap command so stdout and status can be extracted '''
+
+ if self.is_windows:
+ if not cmd.startswith(" ".join(_common_args) + " -EncodedCommand"):
+ cmd = self._shell._encode_script(cmd, preserve_rc=True)
+ cmd = cmd + "; echo " + mark_start + "\necho " + mark_end + "\n"
+ else:
+ if sudoable:
+ cmd = "sudo " + cmd
+ cmd = (
+ f"printf '%s\\n' '{mark_start}';\n"
+ f"echo | {cmd};\n"
+ f"printf '\\n%s\\n%s\\n' \"$?\" '{mark_end}';\n"
+ )
+
+ self._vvvv(f"_wrap_command: \n'{to_text(cmd)}'")
+ return cmd
+
+ def _post_process(self, stdout, mark_begin):
+ ''' extract command status and strip unwanted lines '''
+
+ if not self.is_windows:
+ # Get command return code
+ returncode = int(stdout.splitlines()[-2])
+
+ # Throw away final lines
+ for _x in range(0, 3):
+ stdout = stdout[:stdout.rfind('\n')]
+
+ return (returncode, stdout)
+
+ # Windows is a little more complex
+ # Value of $LASTEXITCODE will be the line after the mark
+ trailer = stdout[stdout.rfind(mark_begin):]
+ last_exit_code = trailer.splitlines()[1]
+ if last_exit_code.isdigit:
+ returncode = int(last_exit_code)
+ else:
+ returncode = -1
+ # output to keep will be before the mark
+ stdout = stdout[:stdout.rfind(mark_begin)]
+
+ # If it looks like JSON remove any newlines
+ if stdout.startswith('{'):
+ stdout = stdout.replace('\n', '')
+
+ return (returncode, stdout)
+
+ def _filter_ansi(self, line):
+ ''' remove any ANSI terminal control codes '''
+ line = to_text(line)
+
+ if self.is_windows:
+ osc_filter = re.compile(r'\x1b\][^\x07]*\x07')
+ line = osc_filter.sub('', line)
+ ansi_filter = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]')
+ line = ansi_filter.sub('', line)
+
+ # Replace or strip sequence (at terminal width)
+ line = line.replace('\r\r\n', '\n')
+ if len(line) == 201:
+ line = line[:-1]
+
+ return line
+
+ def _flush_stderr(self, session_process):
+ ''' read and return stderr with minimal blocking '''
+
+ poll_stderr = select.poll()
+ poll_stderr.register(session_process.stderr, select.POLLIN)
+ stderr = ''
+
+ while session_process.poll() is None:
+ if not poll_stderr.poll(1):
+ break
+ line = session_process.stderr.readline()
+ self._vvvv(f"stderr line: {to_text(line)}")
+ stderr = stderr + line
+
+ return stderr
+
+ def _get_url(self, client_method, bucket_name, out_path, http_method, extra_args=None):
+ ''' Generate URL for get_object / put_object '''
+
+ client = self._s3_client
+ params = {'Bucket': bucket_name, 'Key': out_path}
+ if extra_args is not None:
+ params.update(extra_args)
+ return client.generate_presigned_url(client_method, Params=params, ExpiresIn=3600, HttpMethod=http_method)
+
+ def _get_boto_client(self, service, region_name=None, profile_name=None, endpoint_url=None):
+ ''' Gets a boto3 client based on the STS token '''
+
+ aws_access_key_id = self.get_option('access_key_id')
+ aws_secret_access_key = self.get_option('secret_access_key')
+ aws_session_token = self.get_option('session_token')
+
+ session_args = dict(
+ aws_access_key_id=aws_access_key_id,
+ aws_secret_access_key=aws_secret_access_key,
+ aws_session_token=aws_session_token,
+ region_name=region_name,
+ )
+ if profile_name:
+ session_args['profile_name'] = profile_name
+ session = boto3.session.Session(**session_args)
+
+ client = session.client(
+ service,
+ endpoint_url=endpoint_url,
+ config=Config(
+ signature_version="s3v4",
+ s3={'addressing_style': self.get_option('s3_addressing_style')}
+ )
+ )
+ return client
+
+ def _escape_path(self, path):
+ return path.replace("\\", "/")
+
+ def _generate_encryption_settings(self):
+ put_args = {}
+ put_headers = {}
+ if not self.get_option('bucket_sse_mode'):
+ return put_args, put_headers
+
+ put_args['ServerSideEncryption'] = self.get_option('bucket_sse_mode')
+ put_headers['x-amz-server-side-encryption'] = self.get_option('bucket_sse_mode')
+ if self.get_option('bucket_sse_mode') == 'aws:kms' and self.get_option('bucket_sse_kms_key_id'):
+ put_args['SSEKMSKeyId'] = self.get_option('bucket_sse_kms_key_id')
+ put_headers['x-amz-server-side-encryption-aws-kms-key-id'] = self.get_option('bucket_sse_kms_key_id')
+ return put_args, put_headers
+
+ def _generate_commands(self, bucket_name, s3_path, in_path, out_path):
+ put_args, put_headers = self._generate_encryption_settings()
+
+ put_url = self._get_url('put_object', bucket_name, s3_path, 'PUT', extra_args=put_args)
+ get_url = self._get_url('get_object', bucket_name, s3_path, 'GET')
+
+ if self.is_windows:
+ put_command_headers = "; ".join([f"'{h}' = '{v}'" for h, v in put_headers.items()])
+ put_commands = [
+ (
+ "Invoke-WebRequest -Method PUT "
+ f"-Headers @{{{put_command_headers}}} " # @{'key' = 'value'; 'key2' = 'value2'}
+ f"-InFile '{in_path}' "
+ f"-Uri '{put_url}' "
+ f"-UseBasicParsing"
+ ),
+ ]
+ get_commands = [
+ (
+ "Invoke-WebRequest "
+ f"'{get_url}' "
+ f"-OutFile '{out_path}'"
+ ),
+ ]
+ else:
+ put_command_headers = " ".join([f"-H '{h}: {v}'" for h, v in put_headers.items()])
+ put_commands = [
+ (
+ "curl --request PUT "
+ f"{put_command_headers} "
+ f"--upload-file '{in_path}' "
+ f"'{put_url}'"
+ ),
+ ]
+ get_commands = [
+ (
+ "curl "
+ f"-o '{out_path}' "
+ f"'{get_url}'"
+ ),
+ # Due to https://github.com/curl/curl/issues/183 earlier
+ # versions of curl did not create the output file, when the
+ # response was empty. Although this issue was fixed in 2015,
+ # some actively maintained operating systems still use older
+ # versions of it (e.g. CentOS 7)
+ (
+ "touch "
+ f"'{out_path}'"
+ )
+ ]
+
+ return get_commands, put_commands, put_args
+
+ def _exec_transport_commands(self, in_path, out_path, commands):
+ stdout_combined, stderr_combined = '', ''
+ for command in commands:
+ (returncode, stdout, stderr) = self.exec_command(command, in_data=None, sudoable=False)
+
+ # Check the return code
+ if returncode != 0:
+ raise AnsibleError(
+ f"failed to transfer file to {in_path} {out_path}:\n"
+ f"{stdout}\n{stderr}")
+
+ stdout_combined += stdout
+ stderr_combined += stderr
+
+ return (returncode, stdout_combined, stderr_combined)
+
+ @_ssm_retry
+ def _file_transport_command(self, in_path, out_path, ssm_action):
+ ''' transfer a file to/from host using an intermediate S3 bucket '''
+
+ bucket_name = self.get_option("bucket_name")
+ s3_path = self._escape_path(f"{self.instance_id}/{out_path}")
+
+ get_commands, put_commands, put_args = self._generate_commands(
+ bucket_name, s3_path, in_path, out_path,
+ )
+
+ client = self._s3_client
+
+ try:
+ if ssm_action == 'get':
+ (returncode, stdout, stderr) = self._exec_transport_commands(in_path, out_path, put_commands)
+ with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb') as data:
+ client.download_fileobj(bucket_name, s3_path, data)
+ else:
+ with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as data:
+ client.upload_fileobj(data, bucket_name, s3_path, ExtraArgs=put_args)
+ (returncode, stdout, stderr) = self._exec_transport_commands(in_path, out_path, get_commands)
+ return (returncode, stdout, stderr)
+ finally:
+ # Remove the files from the bucket after they've been transferred
+ client.delete_object(Bucket=bucket_name, Key=s3_path)
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to remote '''
+
+ super().put_file(in_path, out_path)
+
+ self._vvv(f"PUT {in_path} TO {out_path}")
+ if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
+ raise AnsibleFileNotFound(f"file or module does not exist: {in_path}")
+
+ return self._file_transport_command(in_path, out_path, 'put')
+
+ def fetch_file(self, in_path, out_path):
+ ''' fetch a file from remote to local '''
+
+ super().fetch_file(in_path, out_path)
+
+ self._vvv(f"FETCH {in_path} TO {out_path}")
+ return self._file_transport_command(in_path, out_path, 'get')
+
+ def close(self):
+ ''' terminate the connection '''
+ if self._session_id:
+
+ self._vvv(f"CLOSING SSM CONNECTION TO: {self.instance_id}")
+ if self._timeout:
+ self._session.terminate()
+ else:
+ cmd = b"\nexit\n"
+ self._session.communicate(cmd)
+
+ self._vvvv(f"TERMINATE SSM SESSION: {self._session_id}")
+ self._client.terminate_session(SessionId=self._session_id)
+ self._session_id = ''
diff --git a/ansible_collections/community/aws/plugins/module_utils/base.py b/ansible_collections/community/aws/plugins/module_utils/base.py
new file mode 100644
index 000000000..1ce732d7a
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/module_utils/base.py
@@ -0,0 +1,376 @@
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#
+# Note: This code should probably live in amazon.aws rather than community.aws.
+# However, for the sake of getting something into a useful shape first, it makes
+# sense for it to start life in community.aws.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from copy import deepcopy
+from functools import wraps
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+
+
+class BaseWaiterFactory():
+ """
+ A helper class used for creating additional waiters.
+ Unlike the waiters available directly from botocore these waiters will
+ automatically retry on common (temporary) AWS failures.
+
+ This class should be treated as an abstract class and subclassed before use.
+ A subclass should:
+ - create the necessary client to pass to BaseWaiterFactory.__init__
+ - override _BaseWaiterFactory._waiter_model_data to return the data defining
+ the waiter
+
+ Usage:
+ waiter_factory = BaseWaiterFactory(module, client)
+ waiter = waiters.get_waiter('my_waiter_name')
+ waiter.wait(**params)
+ """
+ module = None
+ client = None
+
+ def __init__(self, module, client):
+ self.module = module
+ self.client = client
+ # While it would be nice to supliment this with the upstream data,
+ # unfortunately client doesn't have a public method for getting the
+ # waiter configs.
+ data = self._inject_ratelimit_retries(self._waiter_model_data)
+ self._model = botocore.waiter.WaiterModel(
+ waiter_config=dict(version=2, waiters=data),
+ )
+
+ @property
+ def _waiter_model_data(self):
+ r"""
+ Subclasses should override this method to return a dictionary mapping
+ waiter names to the waiter definition.
+
+ This data is similar to the data found in botocore's waiters-2.json
+ files (for example: botocore/botocore/data/ec2/2016-11-15/waiters-2.json)
+ with two differences:
+ 1) Waiter names do not have transformations applied during lookup
+ 2) Only the 'waiters' data is required, the data is assumed to be
+ version 2
+
+ for example:
+
+ @property
+ def _waiter_model_data(self):
+ return dict(
+ tgw_attachment_deleted=dict(
+ operation='DescribeTransitGatewayAttachments',
+ delay=5, maxAttempts=120,
+ acceptors=[
+ dict(state='retry', matcher='pathAll', expected='deleting', argument='TransitGatewayAttachments[].State'),
+ dict(state='success', matcher='pathAll', expected='deleted', argument='TransitGatewayAttachments[].State'),
+ dict(state='success', matcher='path', expected=True, argument='length(TransitGatewayAttachments[]) == `0`'),
+ dict(state='success', matcher='error', expected='InvalidRouteTableID.NotFound'),
+ ]
+ ),
+ )
+
+ or
+
+ @property
+ def _waiter_model_data(self):
+ return {
+ "instance_exists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeInstances",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": true,
+ "argument": "length(Reservations[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidInstanceID.NotFound",
+ "state": "retry"
+ }
+ ]
+ },
+ }
+ """
+
+ return dict()
+
+ def _inject_ratelimit_retries(self, model):
+ extra_retries = [
+ 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable',
+ 'InternalFailure', 'InternalError', 'TooManyRequestsException',
+ 'Throttling']
+
+ acceptors = []
+ for error in extra_retries:
+ acceptors.append(dict(state="retry", matcher="error", expected=error))
+
+ _model = deepcopy(model)
+ for waiter in _model:
+ _model[waiter]["acceptors"].extend(acceptors)
+
+ return _model
+
+ def get_waiter(self, waiter_name):
+ waiters = self._model.waiter_names
+ if waiter_name not in waiters:
+ self.module.fail_json(
+ 'Unable to find waiter {0}. Available_waiters: {1}'
+ .format(waiter_name, waiters))
+ return botocore.waiter.create_waiter_with_client(
+ waiter_name, self._model, self.client,
+ )
+
+
+class Boto3Mixin():
+ @staticmethod
+ def aws_error_handler(description):
+ r"""
+ A simple wrapper that handles the usual botocore exceptions and exits
+ with module.fail_json_aws. Designed to be used with BaseResourceManager.
+ Assumptions:
+ 1) First argument (usually `self` of method being wrapped will have a
+ 'module' attribute which is an AnsibleAWSModule
+ 2) First argument of method being wrapped will have an
+ _extra_error_output() method which takes no arguments and returns a
+ dictionary of extra parameters to be returned in the event of a
+ botocore exception.
+ Parameters:
+ description (string): In the event of a botocore exception the error
+ message will be 'Failed to {DESCRIPTION}'.
+
+ Example Usage:
+ class ExampleClass(Boto3Mixin):
+ def __init__(self, module)
+ self.module = module
+ self._get_client()
+
+ @Boto3Mixin.aws_error_handler("connect to AWS")
+ def _get_client(self):
+ self.client = self.module.client('ec2')
+
+ @Boto3Mixin.aws_error_handler("describe EC2 instances")
+ def _do_something(**params):
+ return self.client.describe_instances(**params)
+ """
+
+ def wrapper(func):
+ @wraps(func)
+ def handler(_self, *args, **kwargs):
+ extra_ouput = _self._extra_error_output()
+ try:
+ return func(_self, *args, **kwargs)
+ except (botocore.exceptions.WaiterError) as e:
+ _self.module.fail_json_aws(e, msg='Failed waiting for {DESC}'.format(DESC=description), **extra_ouput)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ _self.module.fail_json_aws(e, msg='Failed to {DESC}'.format(DESC=description), **extra_ouput)
+ return handler
+ return wrapper
+
+ def _normalize_boto3_resource(self, resource, add_tags=False):
+ r"""
+ Performs common boto3 resource to Ansible resource conversion.
+ `resource['Tags']` will by default be converted from the boto3 tag list
+ format to a simple dictionary.
+ Parameters:
+ resource (dict): The boto3 style resource to convert to the normal Ansible
+ format (snake_case).
+ add_tags (bool): When `true`, if a resource does not have 'Tags' property
+ the returned resource will have tags set to an empty
+ dictionary.
+ """
+ if resource is None:
+ return None
+
+ tags = resource.get('Tags', None)
+ if tags:
+ tags = boto3_tag_list_to_ansible_dict(tags)
+ elif add_tags or tags is not None:
+ tags = {}
+
+ normalized_resource = camel_dict_to_snake_dict(resource)
+ if tags is not None:
+ normalized_resource['tags'] = tags
+ return normalized_resource
+
+ def _extra_error_output(self):
+ # In the event of an error it can be helpful to ouput things like the
+ # 'name'/'arn' of a resource.
+ return dict()
+
+
+class BaseResourceManager(Boto3Mixin):
+ def __init__(self, module):
+ r"""
+ Parameters:
+ module (AnsibleAWSModule): An Ansible module.
+ """
+ self.module = module
+ self.changed = False
+ self.original_resource = dict()
+ self.updated_resource = dict()
+ self._resource_updates = dict()
+ self._preupdate_resource = dict()
+ self._wait = True
+ self._wait_timeout = None
+ super(BaseResourceManager, self).__init__()
+
+ def _merge_resource_changes(self, filter_immutable=True, creation=False):
+ """
+ Merges the contents of the 'pre_update' resource and metadata variables
+ with the pending updates
+ """
+ resource = deepcopy(self._preupdate_resource)
+ resource.update(self._resource_updates)
+
+ if filter_immutable:
+ resource = self._filter_immutable_resource_attributes(resource)
+
+ return resource
+
+ def _filter_immutable_resource_attributes(self, resource):
+ return deepcopy(resource)
+
+ def _do_creation_wait(self, **params):
+ pass
+
+ def _do_deletion_wait(self, **params):
+ pass
+
+ def _do_update_wait(self, **params):
+ pass
+
+ @property
+ def _waiter_config(self):
+ params = dict()
+ if self._wait_timeout:
+ delay = min(5, self._wait_timeout)
+ max_attempts = (self._wait_timeout // delay)
+ config = dict(Delay=delay, MaxAttempts=max_attempts)
+ params['WaiterConfig'] = config
+ return params
+
+ def _wait_for_deletion(self):
+ if not self._wait:
+ return
+ params = self._waiter_config
+ self._do_deletion_wait(**params)
+
+ def _wait_for_creation(self):
+ if not self._wait:
+ return
+ params = self._waiter_config
+ self._do_creation_wait(**params)
+
+ def _wait_for_update(self):
+ if not self._wait:
+ return
+ params = self._waiter_config
+ self._do_update_wait(**params)
+
+ def _generate_updated_resource(self):
+ """
+ Merges all pending changes into self.updated_resource
+ Used during check mode where it's not possible to get and
+ refresh the resource
+ """
+ return self._merge_resource_changes(filter_immutable=False)
+
+ # If you override _flush_update you're responsible for handling check_mode
+ # If you override _do_update_resource you'll only be called if check_mode == False
+ def _flush_create(self):
+ changed = True
+
+ if not self.module.check_mode:
+ changed = self._do_create_resource()
+ self._wait_for_creation()
+ self._do_creation_wait()
+ self.updated_resource = self.get_resource()
+ else: # (CHECK MODE)
+ self.updated_resource = self._normalize_resource(self._generate_updated_resource())
+
+ self._resource_updates = dict()
+ self.changed = changed
+ return True
+
+ def _check_updates_pending(self):
+ if self._resource_updates:
+ return True
+ return False
+
+ # If you override _flush_update you're responsible for handling check_mode
+ # If you override _do_update_resource you'll only be called if there are
+ # updated pending and check_mode == False
+ def _flush_update(self):
+ if not self._check_updates_pending():
+ self.updated_resource = self.original_resource
+ return False
+
+ if not self.module.check_mode:
+ self._do_update_resource()
+ response = self._wait_for_update()
+ self.updated_resource = self.get_resource()
+ else: # (CHECK_MODE)
+ self.updated_resource = self._normalize_resource(self._generate_updated_resource())
+
+ self._resource_updates = dict()
+ return True
+
+ def flush_changes(self):
+ if self.original_resource:
+ return self._flush_update()
+ else:
+ return self._flush_create()
+
+ def _set_resource_value(self, key, value, description=None, immutable=False):
+ if value is None:
+ return False
+ if value == self._get_resource_value(key):
+ return False
+ if immutable and self.original_resource:
+ if description is None:
+ description = key
+ self.module.fail_json(msg='{0} can not be updated after creation'
+ .format(description))
+ self._resource_updates[key] = value
+ self.changed = True
+ return True
+
+ def _get_resource_value(self, key, default=None):
+ default_value = self._preupdate_resource.get(key, default)
+ return self._resource_updates.get(key, default_value)
+
+ def set_wait(self, wait):
+ if wait is None:
+ return False
+ if wait == self._wait:
+ return False
+
+ self._wait = wait
+ return True
+
+ def set_wait_timeout(self, timeout):
+ if timeout is None:
+ return False
+ if timeout == self._wait_timeout:
+ return False
+
+ self._wait_timeout = timeout
+ return True
diff --git a/ansible_collections/community/aws/plugins/module_utils/ec2.py b/ansible_collections/community/aws/plugins/module_utils/ec2.py
new file mode 100644
index 000000000..5ae789857
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/module_utils/ec2.py
@@ -0,0 +1,189 @@
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from copy import deepcopy
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications
+
+from ansible_collections.community.aws.plugins.module_utils.base import BaseResourceManager
+from ansible_collections.community.aws.plugins.module_utils.base import BaseWaiterFactory
+from ansible_collections.community.aws.plugins.module_utils.base import Boto3Mixin
+
+
+class Ec2WaiterFactory(BaseWaiterFactory):
+ def __init__(self, module):
+ # the AWSRetry wrapper doesn't support the wait functions (there's no
+ # public call we can cleanly wrap)
+ client = module.client('ec2')
+ super(Ec2WaiterFactory, self).__init__(module, client)
+
+ @property
+ def _waiter_model_data(self):
+ data = super(Ec2WaiterFactory, self)._waiter_model_data
+ return data
+
+
+class Ec2Boto3Mixin(Boto3Mixin):
+
+ @AWSRetry.jittered_backoff()
+ def _paginated_describe_subnets(self, **params):
+ paginator = self.client.get_paginator('describe_subnets')
+ return paginator.paginate(**params).build_full_result()
+
+ @Boto3Mixin.aws_error_handler('describe subnets')
+ def _describe_subnets(self, **params):
+ try:
+ result = self._paginated_describe_subnets(**params)
+ except is_boto3_error_code('SubnetID.NotFound'):
+ return None
+ return result.get('Subnets', None)
+
+
+class BaseEc2Manager(Ec2Boto3Mixin, BaseResourceManager):
+
+ resource_id = None
+ TAG_RESOURCE_TYPE = None
+ # This can be overridden by a subclass *if* 'Tags' isn't returned as a part of
+ # the standard Resource description
+ TAGS_ON_RESOURCE = True
+ # If the resource supports using "TagSpecifications" on creation we can
+ TAGS_ON_CREATE = 'TagSpecifications'
+
+ def __init__(self, module, id=None):
+ r"""
+ Parameters:
+ module (AnsibleAWSModule): An Ansible module.
+ """
+ super(BaseEc2Manager, self).__init__(module)
+ self.client = self._create_client()
+ self._tagging_updates = dict()
+ self.resource_id = id
+
+ # Name parameter is unique (by region) and can not be modified.
+ if self.resource_id:
+ resource = deepcopy(self.get_resource())
+ self.original_resource = resource
+
+ def _flush_update(self):
+ changed = False
+ changed |= self._do_tagging()
+ changed |= super(BaseEc2Manager, self)._flush_update()
+ return changed
+
+ @Boto3Mixin.aws_error_handler('connect to AWS')
+ def _create_client(self, client_name='ec2'):
+ client = self.module.client(client_name, retry_decorator=AWSRetry.jittered_backoff())
+ return client
+
+ @Boto3Mixin.aws_error_handler('set tags on resource')
+ def _add_tags(self, **params):
+ self.client.create_tags(aws_retry=True, **params)
+ return True
+
+ @Boto3Mixin.aws_error_handler('unset tags on resource')
+ def _remove_tags(self, **params):
+ self.client.delete_tags(aws_retry=True, **params)
+ return True
+
+ @AWSRetry.jittered_backoff()
+ def _paginated_describe_tags(self, **params):
+ paginator = self.client.get_paginator('describe_tags')
+ return paginator.paginate(**params).build_full_result()
+
+ @Boto3Mixin.aws_error_handler('list tags on resource')
+ def _describe_tags(self, id=None):
+ if not id:
+ id = self.resource_id
+ filters = ansible_dict_to_boto3_filter_list({"resource-id": id})
+ tags = self._paginated_describe_tags(Filters=filters)
+ return tags
+
+ def _get_tags(self, id=None):
+ if id is None:
+ id = self.resource_id
+ # If the Tags are available from the resource, then use them
+ if self.TAGS_ON_RESOURCE:
+ tags = self._preupdate_resource.get('Tags', [])
+ # Otherwise we'll have to look them up
+ else:
+ tags = self._describe_tags(id=id)
+ return boto3_tag_list_to_ansible_dict(tags)
+
+ def _do_tagging(self):
+ changed = False
+ tags_to_add = self._tagging_updates.get('add')
+ tags_to_remove = self._tagging_updates.get('remove')
+
+ if tags_to_add:
+ changed = True
+ tags = ansible_dict_to_boto3_tag_list(tags_to_add)
+ if not self.module.check_mode:
+ self._add_tags(Resources=[self.resource_id], Tags=tags)
+ if tags_to_remove:
+ changed = True
+ if not self.module.check_mode:
+ tag_list = [dict(Key=tagkey) for tagkey in tags_to_remove]
+ self._remove_tags(Resources=[self.resource_id], Tags=tag_list)
+
+ return changed
+
+ def _merge_resource_changes(self, filter_immutable=True, creation=False):
+
+ resource = super(BaseEc2Manager, self)._merge_resource_changes(
+ filter_immutable=filter_immutable,
+ creation=creation
+ )
+
+ if creation:
+ if not self.TAGS_ON_CREATE:
+ resource.pop('Tags', None)
+ elif self.TAGS_ON_CREATE == 'TagSpecifications':
+ tags = boto3_tag_list_to_ansible_dict(resource.pop('Tags', []))
+ tag_specs = boto3_tag_specifications(tags, types=[self.TAG_RESOURCE_TYPE])
+ if tag_specs:
+ resource['TagSpecifications'] = tag_specs
+
+ return resource
+
+ def set_tags(self, tags, purge_tags):
+
+ if tags is None:
+ return False
+ changed = False
+
+ # Tags are returned as a part of the resource, but have to be updated
+ # via dedicated tagging methods
+ current_tags = self._get_tags()
+
+ # So that diff works in check mode we need to know the full target state
+ if purge_tags:
+ desired_tags = deepcopy(tags)
+ else:
+ desired_tags = deepcopy(current_tags)
+ desired_tags.update(tags)
+
+ tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags)
+
+ if tags_to_add:
+ self._tagging_updates['add'] = tags_to_add
+ changed = True
+ if tags_to_remove:
+ self._tagging_updates['remove'] = tags_to_remove
+ changed = True
+
+ if changed:
+ # Tags are a stored as a list, but treated like a list, the
+ # simplisic '==' in _set_resource_value doesn't do the comparison
+ # properly
+ return self._set_resource_value('Tags', ansible_dict_to_boto3_tag_list(desired_tags))
+
+ return False
diff --git a/ansible_collections/community/aws/plugins/module_utils/etag.py b/ansible_collections/community/aws/plugins/module_utils/etag.py
new file mode 100644
index 000000000..a8cab5082
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/module_utils/etag.py
@@ -0,0 +1,62 @@
+# source: https://github.com/tlastowka/calculate_multipart_etag/blob/master/calculate_multipart_etag.py
+#
+# calculate_multipart_etag Copyright (C) 2015
+# Tony Lastowka <tlastowka at gmail dot com>
+# https://github.com/tlastowka
+#
+#
+# calculate_multipart_etag is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# calculate_multipart_etag is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with calculate_multipart_etag. If not, see <http://www.gnu.org/licenses/>.
+
+import hashlib
+
+try:
+ from boto3.s3.transfer import TransferConfig
+ DEFAULT_CHUNK_SIZE = TransferConfig().multipart_chunksize
+except ImportError:
+ DEFAULT_CHUNK_SIZE = 5 * 1024 * 1024
+ pass # Handled by AnsibleAWSModule
+
+
+def calculate_multipart_etag(source_path, chunk_size=DEFAULT_CHUNK_SIZE):
+ """
+ calculates a multipart upload etag for amazon s3
+
+ Arguments:
+
+ source_path -- The file to calculate the etag for
+ chunk_size -- The chunk size to calculate for.
+ """
+
+ md5s = []
+
+ with open(source_path, 'rb') as fp:
+ while True:
+
+ data = fp.read(chunk_size)
+
+ if not data:
+ break
+ md5 = hashlib.new('md5', usedforsecurity=False)
+ md5.update(data)
+ md5s.append(md5)
+
+ if len(md5s) == 1:
+ new_etag = '"{0}"'.format(md5s[0].hexdigest())
+ else: # > 1
+ digests = b"".join(m.digest() for m in md5s)
+
+ new_md5 = hashlib.md5(digests)
+ new_etag = '"{0}-{1}"'.format(new_md5.hexdigest(), len(md5s))
+
+ return new_etag
diff --git a/ansible_collections/community/aws/plugins/module_utils/networkfirewall.py b/ansible_collections/community/aws/plugins/module_utils/networkfirewall.py
new file mode 100644
index 000000000..920c9f092
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/module_utils/networkfirewall.py
@@ -0,0 +1,1762 @@
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from copy import deepcopy
+import time
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.six import string_types
+
+from ansible_collections.amazon.aws.plugins.module_utils.arn import parse_aws_arn
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+from ansible_collections.community.aws.plugins.module_utils.base import Boto3Mixin
+from ansible_collections.community.aws.plugins.module_utils.base import BaseResourceManager
+from ansible_collections.community.aws.plugins.module_utils.base import BaseWaiterFactory
+
+from ansible_collections.community.aws.plugins.module_utils.ec2 import BaseEc2Manager
+
+
+def _merge_set(current, new, purge):
+ _current = set(current)
+ _new = set(new)
+ if purge:
+ final = _new
+ else:
+ final = _new | _current
+
+ return final
+
+
+def _merge_dict(current, new, purge):
+ _current = deepcopy(current)
+ if purge:
+ final = dict()
+ else:
+ final = _current
+ final.update(new)
+
+ return final
+
+
+def _string_list(value):
+ if isinstance(value, string_types):
+ value = [value]
+ elif isinstance(value, bool):
+ value = [to_text(value).lower()]
+ elif isinstance(value, list):
+ value = [to_text(v) for v in value]
+ else:
+ value = [to_text(value)]
+ return value
+
+
+class NetworkFirewallWaiterFactory(BaseWaiterFactory):
+ def __init__(self, module):
+ # the AWSRetry wrapper doesn't support the wait functions (there's no
+ # public call we can cleanly wrap)
+ client = module.client('network-firewall')
+ super(NetworkFirewallWaiterFactory, self).__init__(module, client)
+
+ @property
+ def _waiter_model_data(self):
+ data = super(NetworkFirewallWaiterFactory, self)._waiter_model_data
+ nw_data = dict(
+ rule_group_active=dict(
+ operation='DescribeRuleGroup',
+ delay=5, maxAttempts=120,
+ acceptors=[
+ dict(state='failure', matcher='path', expected='DELETING', argument='RuleGroupResponse.RuleGroupStatus'),
+ dict(state='success', matcher='path', expected='ACTIVE', argument='RuleGroupResponse.RuleGroupStatus'),
+ ]
+ ),
+ rule_group_deleted=dict(
+ operation='DescribeRuleGroup',
+ delay=5, maxAttempts=120,
+ acceptors=[
+ dict(state='retry', matcher='path', expected='DELETING', argument='RuleGroupResponse.RuleGroupStatus'),
+ dict(state='success', matcher='error', expected='ResourceNotFoundException'),
+ ]
+ ),
+ policy_active=dict(
+ operation='DescribeFirewallPolicy',
+ delay=5, maxAttempts=120,
+ acceptors=[
+ dict(state='failure', matcher='path', expected='DELETING', argument='FirewallPolicyResponse.FirewallPolicyStatus'),
+ dict(state='success', matcher='path', expected='ACTIVE', argument='FirewallPolicyResponse.FirewallPolicyStatus'),
+ ]
+ ),
+ policy_deleted=dict(
+ operation='DescribeFirewallPolicy',
+ delay=5, maxAttempts=120,
+ acceptors=[
+ dict(state='retry', matcher='path', expected='DELETING', argument='FirewallPolicyResponse.FirewallPolicyStatus'),
+ dict(state='success', matcher='error', expected='ResourceNotFoundException'),
+ ]
+ ),
+ firewall_active=dict(
+ operation='DescribeFirewall',
+ delay=5, maxAttempts=120,
+ acceptors=[
+ dict(state='failure', matcher='path', expected='DELETING', argument='FirewallStatus.Status'),
+ dict(state='retry', matcher='path', expected='PROVISIONING', argument='FirewallStatus.Status'),
+ dict(state='success', matcher='path', expected='READY', argument='FirewallStatus.Status'),
+ ]
+ ),
+ firewall_updated=dict(
+ operation='DescribeFirewall',
+ delay=5, maxAttempts=240,
+ acceptors=[
+ dict(state='failure', matcher='path', expected='DELETING', argument='FirewallStatus.Status'),
+ dict(state='retry', matcher='path', expected='PROVISIONING', argument='FirewallStatus.Status'),
+ dict(state='retry', matcher='path', expected='PENDING', argument='FirewallStatus.ConfigurationSyncStateSummary'),
+ dict(state='success', matcher='path', expected='IN_SYNC', argument='FirewallStatus.ConfigurationSyncStateSummary'),
+ ]
+ ),
+ firewall_deleted=dict(
+ operation='DescribeFirewall',
+ delay=5, maxAttempts=240,
+ acceptors=[
+ dict(state='retry', matcher='path', expected='DELETING', argument='FirewallStatus.Status'),
+ dict(state='success', matcher='error', expected='ResourceNotFoundException'),
+ ]
+ ),
+ )
+ data.update(nw_data)
+ return data
+
+
+class NetworkFirewallBoto3Mixin(Boto3Mixin):
+ def __init__(self, module):
+ r"""
+ Parameters:
+ module (AnsibleAWSModule): An Ansible module.
+ """
+ self.nf_waiter_factory = NetworkFirewallWaiterFactory(module)
+ super(NetworkFirewallBoto3Mixin, self).__init__(module)
+ self._update_token = None
+
+
+class NFRuleGroupBoto3Mixin(NetworkFirewallBoto3Mixin):
+ # Paginators can't be (easily) wrapped, so we wrap this method with the
+ # retry - retries the full fetch, but better than simply giving up.
+ @AWSRetry.jittered_backoff()
+ def _paginated_list_rule_groups(self, **params):
+ paginator = self.client.get_paginator('list_rule_groups')
+ result = paginator.paginate(**params).build_full_result()
+ return result.get('RuleGroups', None)
+
+ @Boto3Mixin.aws_error_handler('list all rule groups')
+ def _list_rule_groups(self, **params):
+ return self._paginated_list_rule_groups(**params)
+
+ @Boto3Mixin.aws_error_handler('describe rule group')
+ def _describe_rule_group(self, **params):
+ try:
+ result = self.client.describe_rule_group(aws_retry=True, **params)
+ except is_boto3_error_code('ResourceNotFoundException'):
+ return None
+
+ update_token = result.get('UpdateToken', None)
+ if update_token:
+ self._update_token = update_token
+ rule_group = result.get('RuleGroup', None)
+ metadata = result.get('RuleGroupResponse', None)
+ return dict(RuleGroup=rule_group, RuleGroupMetadata=metadata)
+
+ @Boto3Mixin.aws_error_handler('create rule group')
+ def _create_rule_group(self, **params):
+ result = self.client.create_rule_group(aws_retry=True, **params)
+
+ update_token = result.get('UpdateToken', None)
+ if update_token:
+ self._update_token = update_token
+ return result.get('RuleGroupResponse', None)
+
+ @Boto3Mixin.aws_error_handler('update rule group')
+ def _update_rule_group(self, **params):
+ if self._update_token and 'UpdateToken' not in params:
+ params['UpdateToken'] = self._update_token
+ result = self.client.update_rule_group(aws_retry=True, **params)
+
+ update_token = result.get('UpdateToken', None)
+ if update_token:
+ self._update_token = update_token
+ return result.get('RuleGroupResponse', None)
+
+ @Boto3Mixin.aws_error_handler('delete rule group')
+ def _delete_rule_group(self, **params):
+ try:
+ result = self.client.delete_rule_group(aws_retry=True, **params)
+ except is_boto3_error_code('ResourceNotFoundException'):
+ return None
+
+ return result.get('RuleGroupResponse', None)
+
+ @Boto3Mixin.aws_error_handler('firewall rule to finish deleting')
+ def _wait_rule_group_deleted(self, **params):
+ waiter = self.nf_waiter_factory.get_waiter('rule_group_deleted')
+ waiter.wait(**params)
+
+ @Boto3Mixin.aws_error_handler('firewall rule to become active')
+ def _wait_rule_group_active(self, **params):
+ waiter = self.nf_waiter_factory.get_waiter('rule_group_active')
+ waiter.wait(**params)
+
+
+class NFPolicyBoto3Mixin(NetworkFirewallBoto3Mixin):
+ # Paginators can't be (easily) wrapped, so we wrap this method with the
+ # retry - retries the full fetch, but better than simply giving up.
+ @AWSRetry.jittered_backoff()
+ def _paginated_list_policies(self, **params):
+ paginator = self.client.get_paginator('list_firewall_policies')
+ result = paginator.paginate(**params).build_full_result()
+ return result.get('FirewallPolicies', None)
+
+ @Boto3Mixin.aws_error_handler('list all firewall policies')
+ def _list_policies(self, **params):
+ return self._paginated_list_policies(**params)
+
+ @Boto3Mixin.aws_error_handler('describe firewall policy')
+ def _describe_policy(self, **params):
+ try:
+ result = self.client.describe_firewall_policy(aws_retry=True, **params)
+ except is_boto3_error_code('ResourceNotFoundException'):
+ return None
+
+ update_token = result.get('UpdateToken', None)
+ if update_token:
+ self._update_token = update_token
+ policy = result.get('FirewallPolicy', None)
+ metadata = result.get('FirewallPolicyResponse', None)
+ return dict(FirewallPolicy=policy, FirewallPolicyMetadata=metadata)
+
+ @Boto3Mixin.aws_error_handler('create firewall policy')
+ def _create_policy(self, **params):
+ result = self.client.create_firewall_policy(aws_retry=True, **params)
+
+ update_token = result.get('UpdateToken', None)
+ if update_token:
+ self._update_token = update_token
+ return result.get('FirewallPolicyResponse', None)
+
+ @Boto3Mixin.aws_error_handler('update firewall policy')
+ def _update_policy(self, **params):
+ if self._update_token and 'UpdateToken' not in params:
+ params['UpdateToken'] = self._update_token
+ result = self.client.update_firewall_policy(aws_retry=True, **params)
+
+ update_token = result.get('UpdateToken', None)
+ if update_token:
+ self._update_token = update_token
+ return result.get('FirewallPolicyResponse', None)
+
+ @Boto3Mixin.aws_error_handler('delete firewall policy')
+ def _delete_policy(self, **params):
+ try:
+ result = self.client.delete_firewall_policy(aws_retry=True, **params)
+ except is_boto3_error_code('ResourceNotFoundException'):
+ return None
+
+ return result.get('FirewallPolicyResponse', None)
+
+ @Boto3Mixin.aws_error_handler('firewall policy to finish deleting')
+ def _wait_policy_deleted(self, **params):
+ waiter = self.nf_waiter_factory.get_waiter('policy_deleted')
+ waiter.wait(**params)
+
+ @Boto3Mixin.aws_error_handler('firewall policy to become active')
+ def _wait_policy_active(self, **params):
+ waiter = self.nf_waiter_factory.get_waiter('policy_active')
+ waiter.wait(**params)
+
+
+class NFFirewallBoto3Mixin(NetworkFirewallBoto3Mixin):
+ # Paginators can't be (easily) wrapped, so we wrap this method with the
+ # retry - retries the full fetch, but better than simply giving up.
+ @AWSRetry.jittered_backoff()
+ def _paginated_list_firewalls(self, **params):
+ paginator = self.client.get_paginator('list_firewalls')
+ result = paginator.paginate(**params).build_full_result()
+ return result.get('Firewalls', None)
+
+ @Boto3Mixin.aws_error_handler('list all firewalls')
+ def _list_firewalls(self, **params):
+ return self._paginated_list_firewalls(**params)
+
+ @Boto3Mixin.aws_error_handler('describe firewall')
+ def _describe_firewall(self, **params):
+ try:
+ result = self.client.describe_firewall(aws_retry=True, **params)
+ except is_boto3_error_code('ResourceNotFoundException'):
+ return None
+
+ update_token = result.get('UpdateToken', None)
+ if update_token:
+ self._update_token = update_token
+ firewall = result.get('Firewall', None)
+ metadata = result.get('FirewallStatus', None)
+ return dict(Firewall=firewall, FirewallMetadata=metadata)
+
+ @Boto3Mixin.aws_error_handler('create firewall')
+ def _create_firewall(self, **params):
+ result = self.client.create_firewall(aws_retry=True, **params)
+
+ update_token = result.get('UpdateToken', None)
+ if update_token:
+ self._update_token = update_token
+ return result.get('FirewallStatus', None)
+
+ @Boto3Mixin.aws_error_handler('update firewall description')
+ def _update_firewall_description(self, **params):
+ if self._update_token and 'UpdateToken' not in params:
+ params['UpdateToken'] = self._update_token
+ result = self.client.update_firewall_description(aws_retry=True, **params)
+
+ update_token = result.get('UpdateToken', None)
+ if update_token:
+ self._update_token = update_token
+ return result.get('FirewallName', None)
+
+ @Boto3Mixin.aws_error_handler('update firewall subnet change protection')
+ def _update_subnet_change_protection(self, **params):
+ if self._update_token and 'UpdateToken' not in params:
+ params['UpdateToken'] = self._update_token
+ result = self.client.update_subnet_change_protection(aws_retry=True, **params)
+
+ update_token = result.get('UpdateToken', None)
+ if update_token:
+ self._update_token = update_token
+ return result.get('FirewallName', None)
+
+ @Boto3Mixin.aws_error_handler('update firewall policy change protection')
+ def _update_firewall_policy_change_protection(self, **params):
+ if self._update_token and 'UpdateToken' not in params:
+ params['UpdateToken'] = self._update_token
+ result = self.client.update_firewall_policy_change_protection(aws_retry=True, **params)
+
+ update_token = result.get('UpdateToken', None)
+ if update_token:
+ self._update_token = update_token
+ return result.get('FirewallName', None)
+
+ @Boto3Mixin.aws_error_handler('update firewall deletion protection')
+ def _update_firewall_delete_protection(self, **params):
+ if self._update_token and 'UpdateToken' not in params:
+ params['UpdateToken'] = self._update_token
+ result = self.client.update_firewall_delete_protection(aws_retry=True, **params)
+
+ update_token = result.get('UpdateToken', None)
+ if update_token:
+ self._update_token = update_token
+ return result.get('FirewallName', None)
+
+ @Boto3Mixin.aws_error_handler('associate policy with firewall')
+ def _associate_firewall_policy(self, **params):
+ if self._update_token and 'UpdateToken' not in params:
+ params['UpdateToken'] = self._update_token
+ result = self.client.associate_firewall_policy(aws_retry=True, **params)
+
+ update_token = result.get('UpdateToken', None)
+ if update_token:
+ self._update_token = update_token
+ return result.get('FirewallName', None)
+
+ @Boto3Mixin.aws_error_handler('associate subnets with firewall')
+ def _associate_subnets(self, **params):
+ if self._update_token and 'UpdateToken' not in params:
+ params['UpdateToken'] = self._update_token
+ result = self.client.associate_subnets(aws_retry=True, **params)
+
+ update_token = result.get('UpdateToken', None)
+ if update_token:
+ self._update_token = update_token
+ return result.get('FirewallName', None)
+
+ @Boto3Mixin.aws_error_handler('disassociate subnets from firewall')
+ def _disassociate_subnets(self, **params):
+ if self._update_token and 'UpdateToken' not in params:
+ params['UpdateToken'] = self._update_token
+ result = self.client.disassociate_subnets(aws_retry=True, **params)
+
+ update_token = result.get('UpdateToken', None)
+ if update_token:
+ self._update_token = update_token
+ return result.get('FirewallName', None)
+
+ @Boto3Mixin.aws_error_handler('delete firewall')
+ def _delete_firewall(self, **params):
+ try:
+ result = self.client.delete_firewall(aws_retry=True, **params)
+ except is_boto3_error_code('ResourceNotFoundException'):
+ return None
+
+ return result.get('FirewallStatus', None)
+
+ @Boto3Mixin.aws_error_handler('firewall to finish deleting')
+ def _wait_firewall_deleted(self, **params):
+ waiter = self.nf_waiter_factory.get_waiter('firewall_deleted')
+ waiter.wait(**params)
+
+ @Boto3Mixin.aws_error_handler('firewall to finish updating')
+ def _wait_firewall_updated(self, **params):
+ waiter = self.nf_waiter_factory.get_waiter('firewall_updated')
+ waiter.wait(**params)
+
+ @Boto3Mixin.aws_error_handler('firewall to become active')
+ def _wait_firewall_active(self, **params):
+ waiter = self.nf_waiter_factory.get_waiter('firewall_active')
+ waiter.wait(**params)
+
+
+class BaseNetworkFirewallManager(BaseResourceManager):
+ def __init__(self, module):
+ r"""
+ Parameters:
+ module (AnsibleAWSModule): An Ansible module.
+ """
+ super().__init__(module)
+
+ self.client = self._create_client()
+
+ # Network Firewall returns a token when you perform create/get/update
+ # actions
+ self._preupdate_metadata = dict()
+ self._metadata_updates = dict()
+ self._tagging_updates = dict()
+
+ @Boto3Mixin.aws_error_handler('connect to AWS')
+ def _create_client(self, client_name='network-firewall'):
+ client = self.module.client(client_name, retry_decorator=AWSRetry.jittered_backoff())
+ return client
+
+ def _get_id_params(self):
+ return dict()
+
+ def _check_updates_pending(self):
+ if self._metadata_updates:
+ return True
+ return super(BaseNetworkFirewallManager, self)._check_updates_pending()
+
+ def _merge_metadata_changes(self, filter_immutable=True):
+ """
+ Merges the contents of the 'pre_update' metadata variables
+ with the pending updates
+ """
+ metadata = deepcopy(self._preupdate_metadata)
+ metadata.update(self._metadata_updates)
+
+ if filter_immutable:
+ metadata = self._filter_immutable_metadata_attributes(metadata)
+
+ return metadata
+
+ def _merge_changes(self, filter_metadata=True):
+ """
+ Merges the contents of the 'pre_update' resource and metadata variables
+ with the pending updates
+ """
+ metadata = self._merge_metadata_changes(filter_metadata)
+ resource = self._merge_resource_changes()
+ return metadata, resource
+
+ def _filter_immutable_metadata_attributes(self, metadata):
+ """
+ Removes information from the metadata which can't be updated.
+ Returns a *copy* of the metadata dictionary.
+ """
+ return deepcopy(metadata)
+
+ def _flush_create(self):
+ changed = super(BaseNetworkFirewallManager, self)._flush_create()
+ self._metadata_updates = dict()
+ return changed
+
+ def _flush_update(self):
+ changed = super(BaseNetworkFirewallManager, self)._flush_update()
+ self._metadata_updates = dict()
+ return changed
+
+ @BaseResourceManager.aws_error_handler('set tags on resource')
+ def _add_tags(self, **params):
+ self.client.tag_resource(aws_retry=True, **params)
+ return True
+
+ @BaseResourceManager.aws_error_handler('unset tags on resource')
+ def _remove_tags(self, **params):
+ self.client.untag_resource(aws_retry=True, **params)
+ return True
+
+ def _get_preupdate_arn(self):
+ return self._preupdate_metadata.get('Arn')
+
+ def _set_metadata_value(self, key, value, description=None, immutable=False):
+ if value is None:
+ return False
+ if value == self._get_metadata_value(key):
+ return False
+ if immutable and self.original_resource:
+ if description is None:
+ description = key
+ self.module.fail_json(msg='{0} can not be updated after creation'
+ .format(description))
+ self._metadata_updates[key] = value
+ self.changed = True
+ return True
+
+ def _get_metadata_value(self, key, default=None):
+ return self._metadata_updates.get(key, self._preupdate_metadata.get(key, default))
+
+ def _set_tag_values(self, desired_tags):
+ return self._set_metadata_value('Tags', ansible_dict_to_boto3_tag_list(desired_tags))
+
+ def _get_tag_values(self):
+ return self._get_metadata_value('Tags', [])
+
+ def _flush_tagging(self):
+ changed = False
+ tags_to_add = self._tagging_updates.get('add')
+ tags_to_remove = self._tagging_updates.get('remove')
+
+ resource_arn = self._get_preupdate_arn()
+ if not resource_arn:
+ return False
+
+ if tags_to_add:
+ changed = True
+ tags = ansible_dict_to_boto3_tag_list(tags_to_add)
+ if not self.module.check_mode:
+ self._add_tags(ResourceArn=resource_arn, Tags=tags)
+ if tags_to_remove:
+ changed = True
+ if not self.module.check_mode:
+ self._remove_tags(ResourceArn=resource_arn, TagKeys=tags_to_remove)
+
+ return changed
+
+ def set_tags(self, tags, purge_tags):
+
+ if tags is None:
+ return False
+ changed = False
+
+ # Tags are returned as a part of the metadata, but have to be updated
+ # via dedicated tagging methods
+ current_tags = boto3_tag_list_to_ansible_dict(self._get_tag_values())
+
+ # So that diff works in check mode we need to know the full target state
+ if purge_tags:
+ desired_tags = deepcopy(tags)
+ else:
+ desired_tags = deepcopy(current_tags)
+ desired_tags.update(tags)
+
+ tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags)
+
+ if tags_to_add:
+ self._tagging_updates['add'] = tags_to_add
+ changed = True
+ if tags_to_remove:
+ self._tagging_updates['remove'] = tags_to_remove
+ changed = True
+
+ if changed:
+ # Tags are a stored as a list, but treated like a list, the
+ # simplisic '==' in _set_metadata_value doesn't do the comparison
+ # properly
+ return self._set_tag_values(desired_tags)
+
+ return False
+
+
+class NetworkFirewallRuleManager(NFRuleGroupBoto3Mixin, BaseNetworkFirewallManager):
+
+ RULE_TYPES = frozenset(['StatelessRulesAndCustomActions', 'StatefulRules',
+ 'RulesSourceList', 'RulesString'])
+
+ name = None
+ rule_type = None
+ arn = None
+
+ def __init__(self, module, name=None, rule_type=None, arn=None):
+ super().__init__(module)
+ # Name parameter is unique (by region) and can not be modified.
+ self.name = name
+ self.rule_type = rule_type
+ self.arn = arn
+ if self.name or self.arn:
+ rule_group = deepcopy(self.get_rule_group())
+ self.original_resource = rule_group
+
+ def _extra_error_output(self):
+ output = super(NetworkFirewallRuleManager, self)._extra_error_output()
+ if self.name:
+ output['RuleGroupName'] = self.name
+ if self.rule_type:
+ output['Type'] = self.rule_type
+ if self.arn:
+ output['RuleGroupArn'] = self.arn
+ return output
+
+ def _filter_immutable_metadata_attributes(self, metadata):
+ metadata = super(NetworkFirewallRuleManager, self)._filter_immutable_metadata_attributes(metadata)
+ metadata.pop('RuleGroupArn', None)
+ metadata.pop('RuleGroupName', None)
+ metadata.pop('RuleGroupId', None)
+ metadata.pop('Type', None)
+ metadata.pop('Capacity', None)
+ metadata.pop('RuleGroupStatus', None)
+ metadata.pop('Tags', None)
+ metadata.pop('ConsumedCapacity', None)
+ metadata.pop('NumberOfAssociations', None)
+ return metadata
+
+ def _get_preupdate_arn(self):
+ return self._get_metadata_value('RuleGroupArn')
+
+ def _get_id_params(self, name=None, rule_type=None, arn=None):
+ if arn:
+ return dict(RuleGroupArn=arn)
+ if self.arn:
+ return dict(RuleGroupArn=self.arn)
+ if not name:
+ name = self.name
+ if not rule_type:
+ rule_type = self.rule_type
+ if rule_type:
+ rule_type = rule_type.upper()
+ if not rule_type or not name:
+ # Users should never see this, but let's cover ourself
+ self.module.fail_json(msg='Rule identifier parameters missing')
+ return dict(RuleGroupName=name, Type=rule_type)
+
+ @staticmethod
+ def _empty_rule_variables():
+ return dict(IPSets=dict(), PortSets=dict())
+
+ @staticmethod
+ def _transform_rule_variables(variables):
+ return {k: dict(Definition=_string_list(v)) for (k, v) in variables.items()}
+
+ def delete(self, name=None, rule_type=None, arn=None):
+
+ id_params = self._get_id_params(name=name, rule_type=rule_type, arn=arn)
+ result = self._get_rule_group(**id_params)
+
+ if not result:
+ return False
+
+ self.updated_resource = dict()
+
+ # Rule Group is already in the process of being deleted (takes time)
+ rule_status = self._get_metadata_value('RuleGroupStatus', '').upper()
+ if rule_status == 'DELETING':
+ self._wait_for_deletion()
+ return False
+
+ if self.module.check_mode:
+ self.changed = True
+ return True
+
+ result = self._delete_rule_group(**id_params)
+ self._wait_for_deletion()
+ self.changed |= bool(result)
+ return bool(result)
+
+ def list(self, scope=None):
+ params = dict()
+ if scope:
+ scope = scope.upper()
+ params['Scope'] = scope
+ rule_groups = self._list_rule_groups(**params)
+ if not rule_groups:
+ return list()
+
+ return [r.get('Arn', None) for r in rule_groups]
+
+ def _normalize_rule_variable(self, variable):
+ if variable is None:
+ return None
+ return {k: variable.get(k, dict()).get('Definition', []) for k in variable.keys()}
+
+ def _normalize_rule_variables(self, variables):
+ if variables is None:
+ return None
+ result = dict()
+ ip_sets = self._normalize_rule_variable(variables.get('IPSets', None))
+ if ip_sets:
+ result['ip_sets'] = ip_sets
+ port_sets = self._normalize_rule_variable(variables.get('PortSets', None))
+ if port_sets:
+ result['port_sets'] = port_sets
+ return result
+
+ def _normalize_rule_group(self, rule_group):
+ if rule_group is None:
+ return None
+ rule_variables = self._normalize_rule_variables(rule_group.get('RuleVariables', None))
+ rule_group = self._normalize_boto3_resource(rule_group)
+ if rule_variables is not None:
+ rule_group['rule_variables'] = rule_variables
+ return rule_group
+
+ def _normalize_rule_group_metadata(self, rule_group_metadata):
+ return self._normalize_boto3_resource(rule_group_metadata, add_tags=True)
+
+ def _normalize_rule_group_result(self, result):
+ if result is None:
+ return None
+ rule_group = self._normalize_rule_group(result.get('RuleGroup', None))
+ rule_group_metadata = self._normalize_rule_group_metadata(result.get('RuleGroupMetadata', None))
+ result = camel_dict_to_snake_dict(result)
+ if rule_group:
+ result['rule_group'] = rule_group
+ if rule_group_metadata:
+ result['rule_group_metadata'] = rule_group_metadata
+ return result
+
+ def _normalize_resource(self, resource):
+ return self._normalize_rule_group_result(resource)
+
+ def get_rule_group(self, name=None, rule_type=None, arn=None):
+
+ id_params = self._get_id_params(name=name, rule_type=rule_type, arn=arn)
+ result = self._get_rule_group(**id_params)
+
+ if not result:
+ return None
+
+ rule_group = self._normalize_rule_group_result(result)
+ return rule_group
+
+ def set_description(self, description):
+ return self._set_metadata_value('Description', description)
+
+ def set_capacity(self, capacity):
+ return self._set_metadata_value(
+ 'Capacity', capacity,
+ description="Reserved Capacity", immutable=True)
+
+ def _set_rule_option(self, option_name, description, value, immutable=False, default_value=None):
+ if value is None:
+ return False
+
+ rule_options = deepcopy(self._get_resource_value('StatefulRuleOptions', dict()))
+ if value == rule_options.get(option_name, default_value):
+ return False
+ if immutable and self.original_resource:
+ self.module.fail_json(msg='{0} can not be updated after creation'
+ .format(description))
+
+ rule_options[option_name] = value
+
+ return self._set_resource_value('StatefulRuleOptions', rule_options)
+
+ def set_rule_order(self, order):
+ RULE_ORDER_MAP = {
+ 'default': 'DEFAULT_ACTION_ORDER',
+ 'strict': 'STRICT_ORDER',
+ }
+ value = RULE_ORDER_MAP.get(order)
+ changed = self._set_rule_option('RuleOrder', 'Rule order', value, True, 'DEFAULT_ACTION_ORDER')
+ self.changed |= changed
+ return changed
+
+ def _set_rule_variables(self, set_name, variables, purge):
+ if variables is None:
+ return False
+
+ variables = self._transform_rule_variables(variables)
+
+ all_variables = deepcopy(self._get_resource_value('RuleVariables', self._empty_rule_variables()))
+
+ current_variables = all_variables.get(set_name, dict())
+ updated_variables = _merge_dict(current_variables, variables, purge)
+
+ if current_variables == updated_variables:
+ return False
+
+ all_variables[set_name] = updated_variables
+
+ return self._set_resource_value('RuleVariables', all_variables)
+
+ def set_ip_variables(self, variables, purge):
+ return self._set_rule_variables('IPSets', variables, purge)
+
+ def set_port_variables(self, variables, purge):
+ return self._set_rule_variables('PortSets', variables, purge)
+
+ def _set_rule_source(self, rule_type, rules):
+ if not rules:
+ return False
+ conflicting_types = self.RULE_TYPES.difference({rule_type})
+ rules_source = deepcopy(self._get_resource_value('RulesSource', dict()))
+ current_keys = set(rules_source.keys())
+ conflicting_rule_type = conflicting_types.intersection(current_keys)
+ if conflicting_rule_type:
+ self.module.fail_json('Unable to add {0} rules, {1} rules already set'
+ .format(rule_type, " and ".join(conflicting_rule_type)))
+
+ original_rules = rules_source.get(rule_type, None)
+ if rules == original_rules:
+ return False
+ rules_source[rule_type] = rules
+ return self._set_resource_value('RulesSource', rules_source)
+
+ def set_rule_string(self, rule):
+ if rule is None:
+ return False
+ if not rule:
+ self.module.fail_json('Rule string must include at least one rule')
+
+ rule = "\n".join(_string_list(rule))
+ return self._set_rule_source('RulesString', rule)
+
+ def set_domain_list(self, options):
+ if not options:
+ return False
+ changed = False
+ domain_names = options.get('domain_names')
+ home_net = options.get('source_ips', None)
+ action = options.get('action')
+ filter_http = options.get('filter_http', False)
+ filter_https = options.get('filter_https', False)
+
+ if home_net:
+ # Seems a little kludgy but the HOME_NET ip variable is how you
+ # configure which source CIDRs the traffic should be filtered for.
+ changed |= self.set_ip_variables(dict(HOME_NET=home_net), purge=True)
+ else:
+ self.set_ip_variables(dict(), purge=True)
+
+ # Perform some transformations
+ target_types = []
+ if filter_http:
+ target_types.append('HTTP_HOST')
+ if filter_https:
+ target_types.append('TLS_SNI')
+
+ if action == 'allow':
+ action = 'ALLOWLIST'
+ else:
+ action = 'DENYLIST'
+
+ # Finally build the 'rule'
+ rule = dict(
+ Targets=domain_names,
+ TargetTypes=target_types,
+ GeneratedRulesType=action,
+ )
+ changed |= self._set_rule_source('RulesSourceList', rule)
+ return changed
+
+ def _format_rule_options(self, options, sid):
+ formatted_options = []
+ opt = dict(Keyword='sid:{0}'.format(sid))
+ formatted_options.append(opt)
+ if options:
+ for option in sorted(options.keys()):
+ opt = dict(Keyword=option)
+ settings = options.get(option)
+ if settings:
+ opt['Settings'] = _string_list(settings)
+ formatted_options.append(opt)
+ return formatted_options
+
+ def _format_stateful_rule(self, rule):
+ options = self._format_rule_options(
+ rule.get('rule_options', dict()),
+ rule.get('sid'),
+ )
+ formatted_rule = dict(
+ Action=rule.get('action').upper(),
+ RuleOptions=options,
+ Header=dict(
+ Protocol=rule.get('protocol').upper(),
+ Source=rule.get('source'),
+ SourcePort=rule.get('source_port'),
+ Direction=rule.get('direction').upper(),
+ Destination=rule.get('destination'),
+ DestinationPort=rule.get('destination_port'),
+ ),
+ )
+ return formatted_rule
+
+ def set_rule_list(self, rules):
+ if rules is None:
+ return False
+ if not rules:
+ self.module.fail_json(msg='Rule list must include at least one rule')
+
+ formatted_rules = [self._format_stateful_rule(r) for r in rules]
+ return self._set_rule_source('StatefulRules', formatted_rules)
+
+ def _do_create_resource(self):
+ metadata, resource = self._merge_changes(filter_metadata=False)
+ params = metadata
+ params.update(self._get_id_params())
+ params['RuleGroup'] = resource
+ response = self._create_rule_group(**params)
+ return bool(response)
+
+ def _generate_updated_resource(self):
+ metadata, resource = self._merge_changes(filter_metadata=False)
+ metadata.update(self._get_id_params())
+ updated_resource = dict(
+ RuleGroup=resource,
+ RuleGroupMetadata=metadata
+ )
+ return updated_resource
+
+ def _flush_create(self):
+ # Apply some pre-flight tests before trying to run the creation.
+ if 'Capacity' not in self._metadata_updates:
+ self.module.fail_json('Capacity must be provided when creating a new Rule Group')
+
+ rules_source = self._get_resource_value('RulesSource', dict())
+ rule_type = self.RULE_TYPES.intersection(set(rules_source.keys()))
+ if len(rule_type) != 1:
+ self.module.fail_json('Exactly one of rule strings, domain list or rule list'
+ ' must be provided when creating a new rule group',
+ rule_type=rule_type, keys=self._resource_updates.keys(),
+ types=self.RULE_TYPES)
+
+ return super(NetworkFirewallRuleManager, self)._flush_create()
+
+ def _do_update_resource(self):
+ filtered_metadata_updates = self._filter_immutable_metadata_attributes(self._metadata_updates)
+ filtered_resource_updates = self._resource_updates
+
+ if not filtered_resource_updates and not filtered_metadata_updates:
+ return False
+
+ metadata, resource = self._merge_changes()
+
+ params = metadata
+ params.update(self._get_id_params())
+ params['RuleGroup'] = resource
+
+ if not self.module.check_mode:
+ response = self._update_rule_group(**params)
+
+ return True
+
+ def _flush_update(self):
+ changed = False
+ changed |= self._flush_tagging()
+ changed |= super(NetworkFirewallRuleManager, self)._flush_update()
+ return changed
+
+ def _get_rule_group(self, **params):
+ result = self._describe_rule_group(**params)
+ if not result:
+ return None
+
+ rule_group = result.get('RuleGroup', None)
+ metadata = result.get('RuleGroupMetadata', None)
+ self._preupdate_resource = deepcopy(rule_group)
+ self._preupdate_metadata = deepcopy(metadata)
+ return dict(RuleGroup=rule_group, RuleGroupMetadata=metadata)
+
+ def get_resource(self):
+ return self.get_rule_group()
+
+ def _do_creation_wait(self, **params):
+ all_params = self._get_id_params()
+ all_params.update(params)
+ return self._wait_rule_group_active(**all_params)
+
+ def _do_deletion_wait(self, **params):
+ all_params = self._get_id_params()
+ all_params.update(params)
+ return self._wait_rule_group_deleted(**all_params)
+
+
+class NetworkFirewallPolicyManager(NFPolicyBoto3Mixin, NFRuleGroupBoto3Mixin, BaseNetworkFirewallManager):
+
+ name = None
+ arn = None
+ _group_name_cache = None
+
+ def __init__(self, module, name=None, arn=None):
+ super().__init__(module)
+ # Name parameter is unique (by region) and can not be modified.
+ self.name = name
+ self.arn = arn
+ if self.name or self.arn:
+ policy = deepcopy(self.get_policy())
+ self.original_resource = policy
+
+ def _extra_error_output(self):
+ output = super(NetworkFirewallPolicyManager, self)._extra_error_output()
+ if self.name:
+ output['FirewallPolicyName'] = self.name
+ if self.arn:
+ output['FirewallPolicyArn'] = self.arn
+ return output
+
+ def _filter_immutable_metadata_attributes(self, metadata):
+ metadata = super(NetworkFirewallPolicyManager, self)._filter_immutable_metadata_attributes(metadata)
+ metadata.pop('FirewallPolicyArn', None)
+ metadata.pop('FirewallPolicyName', None)
+ metadata.pop('FirewallPolicyId', None)
+ metadata.pop('FirewallPolicyStatus', None)
+ metadata.pop('ConsumedStatelessRuleCapacity', None)
+ metadata.pop('ConsumedStatefulRuleCapacity', None)
+ metadata.pop('Tags', None)
+ metadata.pop('NumberOfAssociations', None)
+ return metadata
+
+ def _get_preupdate_arn(self):
+ return self._get_metadata_value('FirewallPolicyArn')
+
+ def _get_id_params(self, name=None, arn=None):
+ if arn:
+ return dict(FirewallPolicyArn=arn)
+ if self.arn:
+ return dict(FirewallPolicyArn=self.arn)
+ if not name:
+ name = self.name
+ return dict(FirewallPolicyName=name)
+
+ def delete(self, name=None, arn=None):
+
+ id_params = self._get_id_params(name=name, arn=arn)
+ result = self._get_policy(**id_params)
+
+ if not result:
+ return False
+
+ self.updated_resource = dict()
+
+ # Policy is already in the process of being deleted (takes time)
+ rule_status = self._get_metadata_value('FirewallPolicyStatus', '').upper()
+ if rule_status == 'DELETING':
+ self._wait_for_deletion()
+ return False
+
+ if self.module.check_mode:
+ self.changed = True
+ return True
+
+ result = self._delete_policy(**id_params)
+ self._wait_for_deletion()
+ self.changed |= bool(result)
+ return bool(result)
+
+ def list(self):
+ params = dict()
+ policies = self._list_policies(**params)
+ if not policies:
+ return list()
+
+ return [p.get('Arn', None) for p in policies]
+
+ @property
+ def _rule_group_name_cache(self):
+ if self._group_name_cache:
+ return self._group_name_cache
+ results = self._list_rule_groups()
+ if not results:
+ return dict()
+
+ group_cache = {r.get('Name', None): r.get('Arn', None) for r in results}
+ self._group_name_cache = group_cache
+ return group_cache
+
+ @property
+ def _stateful_rule_order(self):
+ engine_options = self._get_resource_value('StatefulEngineOptions', None)
+ if not engine_options:
+ return 'DEFAULT_ACTION_ORDER'
+ return engine_options.get('RuleOrder', 'DEFAULT_ACTION_ORDER')
+
+ def _canonicalize_rule_group(self, name, group_type):
+ """Iterates through a mixed list of ARNs and Names converting them to
+ ARNs. Also checks that the group type matches the provided group_type.
+ """
+ arn = None
+ # : is only valid in ARNs
+ if ':' in name:
+ arn = name
+ else:
+ arn = self._rule_group_name_cache.get(name, None)
+ if not arn:
+ self.module.fail_json('Unable to fetch ARN for rule group', name=name,
+ group_name_cache=self._rule_group_name_cache)
+ arn_info = parse_aws_arn(arn)
+ if not arn_info:
+ self.module.fail_json('Unable to parse ARN for rule group', arn=arn, arn_info=arn_info)
+ arn_type = arn_info['resource'].split('/')[0]
+ if arn_type != group_type:
+ self.module.fail_json('Rule group not of expected type', name=name,
+ arn=arn, expected_type=group_type, found_type=arn_type)
+
+ return arn
+
+ def _format_rulegroup_references(self, groups, strict_order):
+ formated_groups = list()
+ for idx, arn in enumerate(groups):
+ entry = dict(ResourceArn=arn)
+ if strict_order:
+ entry['Priority'] = idx + 1
+ formated_groups.append(entry)
+ return formated_groups
+
+ def _rulegroup_references_list(self, groups):
+ return [g.get('ResourceArn') for g in groups]
+
+ def _sorted_rulegroup_references_list(self, groups):
+ sorted_list = sorted(groups, key=lambda g: g.get('Priority', None))
+ return self._rulegroup_references_list(sorted_list)
+
+ def _compare_rulegroup_references(self, current_groups, desired_groups, strict_order):
+ if current_groups is None:
+ return False
+ if strict_order:
+ current_groups = self._sorted_rulegroup_references_list(current_groups)
+ return current_groups == desired_groups
+ else:
+ current_groups = self._rulegroup_references_list(current_groups)
+ return set(current_groups) == set(desired_groups)
+
+ def _set_engine_option(self, option_name, description, value, immutable=False, default_value=None):
+ if value is None:
+ return False
+
+ engine_options = deepcopy(self._get_resource_value('StatefulEngineOptions', dict()))
+ if value == engine_options.get(option_name, default_value):
+ return False
+ if immutable and self.original_resource:
+ self.module.fail_json(msg='{0} can not be updated after creation'
+ .format(description))
+
+ engine_options[option_name] = value
+ return self._set_resource_value('StatefulEngineOptions', engine_options)
+
+ def set_stateful_rule_order(self, order):
+ RULE_ORDER_MAP = {
+ 'default': 'DEFAULT_ACTION_ORDER',
+ 'strict': 'STRICT_ORDER',
+ }
+ value = RULE_ORDER_MAP.get(order)
+ changed = self._set_engine_option('RuleOrder', 'Rule order', value, True, 'DEFAULT_ACTION_ORDER')
+ self.changed |= changed
+ return changed
+
+ def _set_rule_groups(self, groups, group_type, parameter_name, strict_order):
+ if groups is None:
+ return False
+ group_arns = [self._canonicalize_rule_group(g, group_type) for g in groups]
+ current_groups = self._get_resource_value(parameter_name)
+ if self._compare_rulegroup_references(current_groups, group_arns, strict_order):
+ return False
+ formated_groups = self._format_rulegroup_references(group_arns, strict_order)
+ return self._set_resource_value(parameter_name, formated_groups)
+
+ def set_stateful_rule_groups(self, groups):
+ strict_order = self._stateful_rule_order == 'STRICT_ORDER'
+ return self._set_rule_groups(groups, 'stateful-rulegroup',
+ 'StatefulRuleGroupReferences',
+ strict_order)
+
+ def set_stateless_rule_groups(self, groups):
+ return self._set_rule_groups(groups, 'stateless-rulegroup',
+ 'StatelessRuleGroupReferences', True)
+
+ def set_default_actions(self, key, actions, valid_actions=None):
+ if actions is None:
+ return False
+
+ invalid_actions = list(set(actions) - set(valid_actions or []))
+ if valid_actions and invalid_actions:
+ self.module.fail_json(
+ msg='{0} contains invalid actions'.format(key),
+ valid_actions=valid_actions, invalid_actions=invalid_actions,
+ actions=actions)
+
+ return self._set_resource_value(key, actions)
+
+ def set_stateful_default_actions(self, actions):
+ if actions is None:
+ return False
+ if self._stateful_rule_order != 'STRICT_ORDER':
+ self.module.fail_json(msg='Stateful default actions can only be set when using strict rule order')
+
+ valid_actions = [
+ 'aws:drop_strict', 'aws:drop_established',
+ 'aws:alert_strict', 'aws:alert_established'
+ ]
+ return self.set_default_actions('StatefulDefaultActions', actions, valid_actions)
+
+ def _set_stateless_default_actions(self, key, actions):
+ valid_actions = [
+ 'aws:pass', 'aws:drop', 'aws:forward_to_sfe'
+ ]
+ custom_actions = self._get_resource_value('StatelessCustomActions', dict())
+ custom_action_names = [a['ActionName'] for a in custom_actions]
+ valid_actions.extend(custom_action_names)
+ return self.set_default_actions(key, actions, valid_actions)
+
+ def set_stateless_default_actions(self, actions):
+ return self._set_stateless_default_actions('StatelessDefaultActions', actions)
+
+ def set_stateless_fragment_default_actions(self, actions):
+ return self._set_stateless_default_actions('StatelessFragmentDefaultActions', actions)
+
+ def _normalize_policy(self, policy):
+ if policy is None:
+ return None
+ policy = self._normalize_boto3_resource(policy)
+ return policy
+
+ def _normalize_policy_metadata(self, policy_metadata):
+ if policy_metadata is None:
+ return None
+ return self._normalize_boto3_resource(policy_metadata, add_tags=True)
+
+ def _normalize_policy_result(self, result):
+ if result is None:
+ return None
+ policy = self._normalize_policy(result.get('FirewallPolicy', None))
+ policy_metadata = self._normalize_policy_metadata(result.get('FirewallPolicyMetadata', None))
+ result = dict()
+ if policy:
+ result['policy'] = policy
+ if policy_metadata:
+ result['policy_metadata'] = policy_metadata
+ return result
+
+ def _normalize_resource(self, resource):
+ return self._normalize_policy_result(resource)
+
+ def get_policy(self, name=None, arn=None):
+
+ id_params = self._get_id_params(name=name, arn=arn)
+ result = self._get_policy(**id_params)
+
+ if not result:
+ return None
+
+ policy = self._normalize_policy_result(result)
+ return policy
+
+ def _format_custom_action(self, action):
+ formatted_action = dict(
+ ActionName=action['name'],
+ )
+ action_definition = dict()
+ if 'publish_metric_dimension_value' in action:
+ values = _string_list(action['publish_metric_dimension_value'])
+ dimensions = [dict(Value=v) for v in values]
+ action_definition['PublishMetricAction'] = dict(
+ Dimensions=dimensions,
+ )
+ if action_definition:
+ formatted_action['ActionDefinition'] = action_definition
+ return formatted_action
+
+ def _custom_action_map(self, actions):
+ return {a['ActionName']: a['ActionDefinition'] for a in actions}
+
+ def set_custom_stateless_actions(self, actions, purge_actions):
+ if actions is None:
+ return False
+ new_action_list = [self._format_custom_action(a) for a in actions]
+ new_action_map = self._custom_action_map(new_action_list)
+
+ existing_action_map = self._custom_action_map(
+ self._get_resource_value('StatelessCustomActions', [])
+ )
+ if purge_actions:
+ desired_action_map = dict()
+ else:
+ desired_action_map = deepcopy(existing_action_map)
+ desired_action_map.update(new_action_map)
+
+ if desired_action_map == existing_action_map:
+ return False
+
+ action_list = [dict(ActionName=k, ActionDefinition=v) for k, v in desired_action_map.items()]
+ self._set_resource_value('StatelessCustomActions', action_list)
+
+ def set_description(self, description):
+ return self._set_metadata_value('Description', description)
+
+ def _do_create_resource(self):
+ metadata, resource = self._merge_changes(filter_metadata=False)
+ params = metadata
+ params.update(self._get_id_params())
+ params['FirewallPolicy'] = resource
+ response = self._create_policy(**params)
+ return bool(response)
+
+ def _generate_updated_resource(self):
+ metadata, resource = self._merge_changes(filter_metadata=False)
+ metadata.update(self._get_id_params())
+ updated_resource = dict(
+ FirewallPolicy=resource,
+ FirewallPolicyMetadata=metadata
+ )
+ return updated_resource
+
+ def _flush_create(self):
+ # Set some defaults
+ if self._get_resource_value('StatelessDefaultActions', None) is None:
+ self._set_resource_value('StatelessDefaultActions', ['aws:forward_to_sfe'])
+ if self._get_resource_value('StatelessFragmentDefaultActions', None) is None:
+ self._set_resource_value('StatelessFragmentDefaultActions', ['aws:forward_to_sfe'])
+ return super(NetworkFirewallPolicyManager, self)._flush_create()
+
+ def _do_update_resource(self):
+ filtered_metadata_updates = self._filter_immutable_metadata_attributes(self._metadata_updates)
+ filtered_resource_updates = self._resource_updates
+
+ if not filtered_resource_updates and not filtered_metadata_updates:
+ return False
+
+ metadata, resource = self._merge_changes()
+
+ params = metadata
+ params.update(self._get_id_params())
+ params['FirewallPolicy'] = resource
+
+ if not self.module.check_mode:
+ response = self._update_policy(**params)
+
+ return True
+
+ def _flush_update(self):
+ changed = False
+ changed |= self._flush_tagging()
+ changed |= super(NetworkFirewallPolicyManager, self)._flush_update()
+ return changed
+
+ def _get_policy(self, **params):
+ result = self._describe_policy(**params)
+ if not result:
+ return None
+
+ policy = result.get('FirewallPolicy', None)
+ # During deletion, there's a phase where this will return Metadata but
+ # no policy
+ if policy is None:
+ policy = dict()
+
+ metadata = result.get('FirewallPolicyMetadata', None)
+ self._preupdate_resource = deepcopy(policy)
+ self._preupdate_metadata = deepcopy(metadata)
+ return dict(FirewallPolicy=policy, FirewallPolicyMetadata=metadata)
+
+ def get_resource(self):
+ return self.get_policy()
+
+ def _do_creation_wait(self, **params):
+ all_params = self._get_id_params()
+ all_params.update(params)
+ return self._wait_policy_active(**all_params)
+
+ def _do_deletion_wait(self, **params):
+ all_params = self._get_id_params()
+ all_params.update(params)
+ return self._wait_policy_deleted(**all_params)
+
+
+class NetworkFirewallManager(NFFirewallBoto3Mixin, NFPolicyBoto3Mixin, BaseNetworkFirewallManager):
+
+ name = None
+ arn = None
+ ec2_manager = None
+ _subnet_updates = None
+ _policy_list_cache = None
+ _slow_start_change = False
+
+ def __init__(self, module, name=None, arn=None):
+ super().__init__(module)
+ # Name parameter is unique (by region) and can not be modified.
+ self.name = name
+ self.arn = arn
+ self.ec2_manager = BaseEc2Manager(module=module)
+ self._subnet_updates = dict()
+ if self.name or self.arn:
+ firewall = deepcopy(self.get_firewall())
+ self.original_resource = firewall
+
+ def _extra_error_output(self):
+ output = super(NetworkFirewallManager, self)._extra_error_output()
+ if self.name:
+ output['FirewallName'] = self.name
+ if self.arn:
+ output['FirewallArn'] = self.arn
+ return output
+
+ def _get_preupdate_arn(self):
+ return self._get_resource_value('FirewallArn')
+
+ def _get_id_params(self, name=None, arn=None):
+ if arn:
+ return dict(FirewallArn=arn)
+ if self.arn:
+ return dict(FirewallArn=self.arn)
+ if not name:
+ name = self.name
+ if not name:
+ # Users should never see this, but let's cover ourself
+ self.module.fail_json(msg='Firewall identifier parameters missing')
+ return dict(FirewallName=name)
+
+ def delete(self, name=None, arn=None):
+
+ id_params = self._get_id_params(name=name, arn=arn)
+ result = self._get_firewall(**id_params)
+
+ if not result:
+ return False
+
+ self.updated_resource = dict()
+
+ # Firewall is already in the process of being deleted (takes time)
+ firewall_status = self._get_metadata_value('Status', '').upper()
+ if firewall_status == 'DELETING':
+ self._wait_for_deletion()
+ return False
+
+ if self.module.check_mode:
+ self.changed = True
+ return True
+
+ if 'DeleteProtection' in self._resource_updates:
+ self._update_firewall_delete_protection(
+ DeleteProtection=self._resource_updates['DeleteProtection'], **id_params,
+ )
+
+ result = self._delete_firewall(**id_params)
+ self._wait_for_deletion()
+ self.changed |= bool(result)
+ return bool(result)
+
+ def list(self, vpc_ids=None):
+ params = dict()
+ if vpc_ids:
+ params['VpcIds'] = vpc_ids
+ firewalls = self._list_firewalls(**params)
+ if not firewalls:
+ return list()
+
+ return [f.get('FirewallArn', None) for f in firewalls]
+
+ def _normalize_firewall(self, firewall):
+ if firewall is None:
+ return None
+ subnets = [s.get('SubnetId') for s in firewall.get('SubnetMappings', [])]
+ firewall = self._normalize_boto3_resource(firewall, add_tags=True)
+ firewall['subnets'] = subnets
+ return firewall
+
+ def _normalize_sync_state_config(self, policy):
+ return self._normalize_boto3_resource(policy)
+
+ def _normalize_sync_state(self, state):
+ config = {k: self._normalize_sync_state_config(v) for k, v in state.pop('Config', {}).items()}
+ state = self._normalize_boto3_resource(state)
+ state['config'] = config or {}
+ return state
+
+ def _normalize_firewall_metadata(self, firewall_metadata):
+ if firewall_metadata is None:
+ return None
+ states = {k: self._normalize_sync_state(v) for k, v in firewall_metadata.pop('SyncStates', {}).items()}
+ metadata = self._normalize_boto3_resource(firewall_metadata, add_tags=False)
+ metadata['sync_states'] = states or {}
+ return metadata
+
+ def _normalize_firewall_result(self, result):
+ if result is None:
+ return None
+ firewall = self._normalize_firewall(result.get('Firewall', None))
+ firewall_metadata = self._normalize_firewall_metadata(result.get('FirewallMetadata', None))
+ result = camel_dict_to_snake_dict(result)
+ if firewall:
+ result['firewall'] = firewall
+ if firewall_metadata:
+ result['firewall_metadata'] = firewall_metadata
+ return result
+
+ def _normalize_resource(self, resource):
+ return self._normalize_firewall_result(resource)
+
+ def get_firewall(self, name=None, arn=None):
+
+ id_params = self._get_id_params(name=name, arn=arn)
+ result = self._get_firewall(**id_params)
+
+ if not result:
+ return None
+
+ firewall = self._normalize_firewall_result(result)
+ return firewall
+
+ @property
+ def _subnets(self):
+ subnet_mappings = self._get_resource_value('SubnetMappings', [])
+ subnets = [s.get('SubnetId') for s in subnet_mappings]
+ return subnets
+
+ def _subnets_to_vpc(self, subnets, subnet_details=None):
+ if not subnets:
+ return None
+ if not subnet_details:
+ subnet_details = self.ec2_manager._describe_subnets(SubnetIds=list(subnets))
+ vpcs = [s.get('VpcId') for s in subnet_details]
+ if len(set(vpcs)) > 1:
+ self.module.fail_json(
+ msg='Firewall subnets may only be in one VPC, multiple VPCs found',
+ vpcs=list(set(vpcs)), subnets=subnet_details)
+ return vpcs[0]
+
+ def _format_subnet_mapping(self, subnets):
+ if not subnets:
+ return []
+ return [dict(SubnetId=s) for s in subnets]
+
+ @property
+ def _policy_name_cache(self):
+ if self._policy_list_cache:
+ return self._policy_list_cache
+ results = self._list_policies()
+ if not results:
+ return dict()
+
+ policy_cache = {p.get('Name', None): p.get('Arn', None) for p in results}
+ self._policy_list_cache = policy_cache
+ return policy_cache
+
+ def _canonicalize_policy(self, name):
+ """Iterates through a mixed list of ARNs and Names converting them to
+ ARNs.
+ """
+ arn = None
+ # : is only valid in ARNs
+ if ':' in name:
+ arn = name
+ else:
+ arn = self._policy_name_cache.get(name, None)
+ if not arn:
+ self.module.fail_json('Unable to fetch ARN for policy', name=name,
+ policy_name_cache=self._policy_name_cache)
+ arn_info = parse_aws_arn(arn)
+ if not arn_info:
+ self.module.fail_json('Unable to parse ARN for policy', arn=arn, arn_info=arn_info)
+ arn_type = arn_info['resource'].split('/')[0]
+ if arn_type != 'firewall-policy':
+ self.module.fail_json('Policy ARN not of expected resource type', name=name,
+ arn=arn, expected_type='firewall-policy', found_type=arn_type)
+
+ return arn
+
+ def set_policy(self, policy):
+ if policy is None:
+ return False
+
+ # Because the canonicalization of a non-ARN policy name will require an API call,
+ # try comparing the current name to the policy name we've been passed.
+ # If they match we don't need to perform the lookup.
+ current_policy = self._get_resource_value('FirewallPolicyArn', None)
+ if current_policy:
+ arn_info = parse_aws_arn(current_policy)
+ current_name = arn_info['resource'].split('/')[-1]
+ if current_name == policy:
+ return False
+
+ policy = self._canonicalize_policy(policy)
+ return self._set_resource_value('FirewallPolicyArn', policy)
+
+ def set_subnets(self, subnets, purge=True):
+ if subnets is None:
+ return False
+ current_subnets = set(self._subnets)
+ desired_subnets = set(subnets)
+ if not purge:
+ desired_subnets = desired_subnets.union(current_subnets)
+
+ # We don't need to perform EC2 lookups if we're not changing anything.
+ if current_subnets == desired_subnets:
+ return False
+
+ subnet_details = self.ec2_manager._describe_subnets(SubnetIds=list(desired_subnets))
+ vpc = self._subnets_to_vpc(desired_subnets, subnet_details)
+ self._set_resource_value('VpcId', vpc, description='firewall VPC', immutable=True)
+
+ azs = [s.get('AvailabilityZoneId') for s in subnet_details]
+ if len(azs) != len(set(azs)):
+ self.module.fail_json(
+ msg='Only one subnet per availability zone may set.',
+ availability_zones=azs, subnets=subnet_details)
+
+ subnets_to_add = list(desired_subnets.difference(current_subnets))
+ subnets_to_remove = list(current_subnets.difference(desired_subnets))
+ self._subnet_updates = dict(add=subnets_to_add, remove=subnets_to_remove)
+ self._set_resource_value('SubnetMappings', self._format_subnet_mapping(desired_subnets))
+ return True
+
+ def set_policy_change_protection(self, protection):
+ return self._set_resource_value('FirewallPolicyChangeProtection', protection)
+
+ def set_subnet_change_protection(self, protection):
+ return self._set_resource_value('SubnetChangeProtection', protection)
+
+ def set_delete_protection(self, protection):
+ return self._set_resource_value('DeleteProtection', protection)
+
+ def set_description(self, description):
+ return self._set_resource_value('Description', description)
+
+ def _do_create_resource(self):
+ metadata, resource = self._merge_changes(filter_metadata=False)
+ params = metadata
+ params.update(self._get_id_params())
+ params.update(resource)
+ response = self._create_firewall(**params)
+ return bool(response)
+
+ def _generate_updated_resource(self):
+ metadata, resource = self._merge_changes(filter_metadata=False)
+ resource.update(self._get_id_params())
+ updated_resource = dict(
+ Firewall=resource,
+ FirewallMetadata=metadata
+ )
+ return updated_resource
+
+ def _flush_create(self):
+ # # Apply some pre-flight tests before trying to run the creation.
+ # if 'Capacity' not in self._metadata_updates:
+ # self.module.fail_json('Capacity must be provided when creating a new Rule Group')
+
+ return super(NetworkFirewallManager, self)._flush_create()
+
+ def _do_update_resource(self):
+ # There are no 'metadata' components of a Firewall to update
+ resource_updates = self._resource_updates
+ if not resource_updates:
+ return False
+ if self.module.check_mode:
+ return True
+
+ id_params = self._get_id_params()
+
+ # There's no tool for 'bulk' updates, we need to iterate through these
+ # one at a time...
+ if 'Description' in resource_updates:
+ self._update_firewall_description(
+ Description=resource_updates['Description'], **id_params,
+ )
+ if 'DeleteProtection' in resource_updates:
+ self._update_firewall_delete_protection(
+ DeleteProtection=resource_updates['DeleteProtection'], **id_params,
+ )
+
+ # Disable Change Protection...
+ # When disabling change protection, do so *before* making changes
+ if 'FirewallPolicyChangeProtection' in resource_updates:
+ if not self._get_resource_value('FirewallPolicyChangeProtection'):
+ self._update_firewall_policy_change_protection(
+ FirewallPolicyChangeProtection=resource_updates['FirewallPolicyChangeProtection'], **id_params,
+ )
+ if 'SubnetChangeProtection' in resource_updates:
+ if not self._get_resource_value('SubnetChangeProtection'):
+ self._update_subnet_change_protection(
+ SubnetChangeProtection=resource_updates['SubnetChangeProtection'], **id_params,
+ )
+
+ # General Changes
+ if 'SubnetMappings' in resource_updates:
+ self._slow_start_change = True
+ subnets_to_add = self._subnet_updates.get('add', None)
+ subnets_to_remove = self._subnet_updates.get('remove', None)
+ if subnets_to_remove:
+ self._disassociate_subnets(
+ SubnetIds=subnets_to_remove, **id_params)
+ if subnets_to_add:
+ subnets_to_add = self._format_subnet_mapping(subnets_to_add)
+ self._associate_subnets(
+ SubnetMappings=subnets_to_add, **id_params)
+
+ if 'FirewallPolicyArn' in resource_updates:
+ self._slow_start_change = True
+ self._associate_firewall_policy(
+ FirewallPolicyArn=resource_updates['FirewallPolicyArn'],
+ **id_params
+ )
+
+ # Enable Change Protection.
+ # When enabling change protection, do so *after* making changes
+ if 'FirewallPolicyChangeProtection' in resource_updates:
+ if self._get_resource_value('FirewallPolicyChangeProtection'):
+ self._update_firewall_policy_change_protection(
+ FirewallPolicyChangeProtection=resource_updates['FirewallPolicyChangeProtection'], **id_params,
+ )
+ if 'SubnetChangeProtection' in resource_updates:
+ if self._get_resource_value('SubnetChangeProtection'):
+ self._update_subnet_change_protection(
+ SubnetChangeProtection=resource_updates['SubnetChangeProtection'], **id_params,
+ )
+ return True
+
+ def _flush_update(self):
+ changed = False
+ changed |= self._flush_tagging()
+ changed |= super(NetworkFirewallManager, self)._flush_update()
+ self._subnet_updates = dict()
+ self._slow_start_change = False
+ return changed
+
+ def _get_firewall(self, **params):
+ result = self._describe_firewall(**params)
+ if not result:
+ return None
+
+ firewall = result.get('Firewall', None)
+ metadata = result.get('FirewallMetadata', None)
+ self._preupdate_resource = deepcopy(firewall)
+ self._preupdate_metadata = deepcopy(metadata)
+ return dict(Firewall=firewall, FirewallMetadata=metadata)
+
+ def get_resource(self):
+ return self.get_firewall()
+
+ def _do_creation_wait(self, **params):
+ all_params = self._get_id_params()
+ all_params.update(params)
+ return self._wait_firewall_active(**all_params)
+
+ def _do_deletion_wait(self, **params):
+ all_params = self._get_id_params()
+ all_params.update(params)
+ return self._wait_firewall_deleted(**all_params)
+
+ def _do_update_wait(self, **params):
+ # It takes a couple of seconds before the firewall starts to update
+ # the subnets and policies, pause if we know we've changed them. We'll
+ # be waiting subtantially more than this...
+ if self._slow_start_change:
+ time.sleep(4)
+ all_params = self._get_id_params()
+ all_params.update(params)
+ return self._wait_firewall_updated(**all_params)
+
+ # Unlike RuleGroups and Policies for some reason Firewalls have the tags set
+ # directly on the resource.
+ def _set_tag_values(self, desired_tags):
+ return self._set_resource_value('Tags', ansible_dict_to_boto3_tag_list(desired_tags))
+
+ def _get_tag_values(self):
+ return self._get_resource_value('Tags', [])
diff --git a/ansible_collections/community/aws/plugins/module_utils/opensearch.py b/ansible_collections/community/aws/plugins/module_utils/opensearch.py
new file mode 100644
index 000000000..8189378e5
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/module_utils/opensearch.py
@@ -0,0 +1,280 @@
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from copy import deepcopy
+import datetime
+import functools
+import time
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (
+ ansible_dict_to_boto3_tag_list,
+ camel_dict_to_snake_dict,
+ compare_aws_tags,
+)
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import (
+ boto3_tag_list_to_ansible_dict,
+)
+from ansible.module_utils.six import string_types
+
+
+def get_domain_status(client, module, domain_name):
+ """
+ Get the status of an existing OpenSearch cluster.
+ """
+ try:
+ response = client.describe_domain(DomainName=domain_name)
+ except is_boto3_error_code("ResourceNotFoundException"):
+ return None
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't get domain {0}".format(domain_name))
+ return response["DomainStatus"]
+
+
+def get_domain_config(client, module, domain_name):
+ """
+ Get the configuration of an existing OpenSearch cluster, convert the data
+ such that it can be used as input parameter to client.update_domain().
+ The status info is removed.
+ The returned config includes the 'EngineVersion' property, it needs to be removed
+ from the dict before invoking client.update_domain().
+
+ Return (domain_config, domain_arn) or (None, None) if the domain does not exist.
+ """
+ try:
+ response = client.describe_domain_config(DomainName=domain_name)
+ except is_boto3_error_code("ResourceNotFoundException"):
+ return (None, None)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't get domain {0}".format(domain_name))
+ domain_config = {}
+ arn = None
+ if response is not None:
+ for k in response["DomainConfig"]:
+ domain_config[k] = response["DomainConfig"][k]["Options"]
+ domain_config["DomainName"] = domain_name
+ # If ES cluster is attached to the Internet, the "VPCOptions" property is not present.
+ if "VPCOptions" in domain_config:
+ # The "VPCOptions" returned by the describe_domain_config API has
+ # additional attributes that would cause an error if sent in the HTTP POST body.
+ dc = {}
+ if "SubnetIds" in domain_config["VPCOptions"]:
+ dc["SubnetIds"] = deepcopy(domain_config["VPCOptions"]["SubnetIds"])
+ if "SecurityGroupIds" in domain_config["VPCOptions"]:
+ dc["SecurityGroupIds"] = deepcopy(domain_config["VPCOptions"]["SecurityGroupIds"])
+ domain_config["VPCOptions"] = dc
+ # The "StartAt" property is converted to datetime, but when doing comparisons it should
+ # be in the string format "YYYY-MM-DD".
+ for s in domain_config["AutoTuneOptions"]["MaintenanceSchedules"]:
+ if isinstance(s["StartAt"], datetime.datetime):
+ s["StartAt"] = s["StartAt"].strftime("%Y-%m-%d")
+ # Provisioning of "AdvancedOptions" is not supported by this module yet.
+ domain_config.pop("AdvancedOptions", None)
+
+ # Get the ARN of the OpenSearch cluster.
+ domain = get_domain_status(client, module, domain_name)
+ if domain is not None:
+ arn = domain["ARN"]
+ return (domain_config, arn)
+
+
+def normalize_opensearch(client, module, domain):
+ """
+ Merge the input domain object with tags associated with the domain,
+ convert the attributes from camel case to snake case, and return the object.
+ """
+ try:
+ domain["Tags"] = boto3_tag_list_to_ansible_dict(
+ client.list_tags(ARN=domain["ARN"], aws_retry=True)["TagList"]
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(
+ e, "Couldn't get tags for domain %s" % domain["domain_name"]
+ )
+ except KeyError:
+ module.fail_json(msg=str(domain))
+
+ return camel_dict_to_snake_dict(domain, ignore_list=["Tags"])
+
+
+def wait_for_domain_status(client, module, domain_name, waiter_name):
+ if not module.params["wait"]:
+ return
+ timeout = module.params["wait_timeout"]
+ deadline = time.time() + timeout
+ status_msg = ""
+ while time.time() < deadline:
+ status = get_domain_status(client, module, domain_name)
+ if status is None:
+ status_msg = "Not Found"
+ if waiter_name == "domain_deleted":
+ return
+ else:
+ status_msg = "Created: {0}. Processing: {1}. UpgradeProcessing: {2}".format(
+ status["Created"],
+ status["Processing"],
+ status["UpgradeProcessing"],
+ )
+ if (
+ waiter_name == "domain_available"
+ and status["Created"]
+ and not status["Processing"]
+ and not status["UpgradeProcessing"]
+ ):
+ return
+ time.sleep(15)
+ # Timeout occured.
+ module.fail_json(
+ msg=f"Timeout waiting for wait state '{waiter_name}'. {status_msg}"
+ )
+
+
+def parse_version(engine_version):
+ '''
+ Parse the engine version, which should be Elasticsearch_X.Y or OpenSearch_X.Y
+ Return dict { 'engine_type': engine_type, 'major': major, 'minor': minor }
+ '''
+ version = engine_version.split("_")
+ if len(version) != 2:
+ return None
+ semver = version[1].split(".")
+ if len(semver) != 2:
+ return None
+ engine_type = version[0]
+ if engine_type not in ['Elasticsearch', 'OpenSearch']:
+ return None
+ if not (semver[0].isdigit() and semver[1].isdigit()):
+ return None
+ major = int(semver[0])
+ minor = int(semver[1])
+ return {'engine_type': engine_type, 'major': major, 'minor': minor}
+
+
+def compare_domain_versions(version1, version2):
+ supported_engines = {
+ 'Elasticsearch': 1,
+ 'OpenSearch': 2,
+ }
+ if isinstance(version1, string_types):
+ version1 = parse_version(version1)
+ if isinstance(version2, string_types):
+ version2 = parse_version(version2)
+ if version1 is None and version2 is not None:
+ return -1
+ elif version1 is not None and version2 is None:
+ return 1
+ elif version1 is None and version2 is None:
+ return 0
+ e1 = supported_engines.get(version1.get('engine_type'))
+ e2 = supported_engines.get(version2.get('engine_type'))
+ if e1 < e2:
+ return -1
+ elif e1 > e2:
+ return 1
+ else:
+ if version1.get('major') < version2.get('major'):
+ return -1
+ elif version1.get('major') > version2.get('major'):
+ return 1
+ else:
+ if version1.get('minor') < version2.get('minor'):
+ return -1
+ elif version1.get('minor') > version2.get('minor'):
+ return 1
+ else:
+ return 0
+
+
+def get_target_increment_version(client, module, domain_name, target_version):
+ """
+ Returns the highest compatible version which is less than or equal to target_version.
+ When upgrading a domain from version V1 to V2, it may not be possible to upgrade
+ directly from V1 to V2. The domain may have to be upgraded through intermediate versions.
+ Return None if there is no such version.
+ For example, it's not possible to upgrade directly from Elasticsearch 5.5 to 7.10.
+ """
+ api_compatible_versions = None
+ try:
+ api_compatible_versions = client.get_compatible_versions(DomainName=domain_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(
+ e,
+ msg="Couldn't get compatible versions for domain {0}".format(
+ domain_name),
+ )
+ compat = api_compatible_versions.get('CompatibleVersions')
+ if compat is None:
+ module.fail_json(
+ "Unable to determine list of compatible versions",
+ compatible_versions=api_compatible_versions)
+ if len(compat) == 0:
+ module.fail_json(
+ "Unable to determine list of compatible versions",
+ compatible_versions=api_compatible_versions)
+ if compat[0].get("TargetVersions") is None:
+ module.fail_json(
+ "No compatible versions found",
+ compatible_versions=api_compatible_versions)
+ compatible_versions = []
+ for v in compat[0].get("TargetVersions"):
+ if target_version == v:
+ # It's possible to upgrade directly to the target version.
+ return target_version
+ semver = parse_version(v)
+ if semver is not None:
+ compatible_versions.append(semver)
+ # No direct upgrade is possible. Upgrade to the highest version available.
+ compatible_versions = sorted(compatible_versions, key=functools.cmp_to_key(compare_domain_versions))
+ # Return the highest compatible version which is lower than target_version
+ for v in reversed(compatible_versions):
+ if compare_domain_versions(v, target_version) <= 0:
+ return v
+ return None
+
+
+def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags):
+ if tags is None:
+ return False
+ tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, tags, purge_tags)
+ changed = bool(tags_to_add or tags_to_remove)
+ if tags_to_add:
+ if module.check_mode:
+ module.exit_json(
+ changed=True, msg="Would have added tags to domain if not in check mode"
+ )
+ try:
+ client.add_tags(
+ ARN=resource_arn,
+ TagList=ansible_dict_to_boto3_tag_list(tags_to_add),
+ )
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e:
+ module.fail_json_aws(
+ e, "Couldn't add tags to domain {0}".format(resource_arn)
+ )
+ if tags_to_remove:
+ if module.check_mode:
+ module.exit_json(
+ changed=True, msg="Would have removed tags if not in check mode"
+ )
+ try:
+ client.remove_tags(ARN=resource_arn, TagKeys=tags_to_remove)
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e:
+ module.fail_json_aws(
+ e, "Couldn't remove tags from domain {0}".format(resource_arn)
+ )
+ return changed
diff --git a/ansible_collections/community/aws/plugins/module_utils/sns.py b/ansible_collections/community/aws/plugins/module_utils/sns.py
new file mode 100644
index 000000000..44327d493
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/module_utils/sns.py
@@ -0,0 +1,170 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import copy
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+
+
+@AWSRetry.jittered_backoff()
+def _list_topics_with_backoff(client):
+ paginator = client.get_paginator('list_topics')
+ return paginator.paginate().build_full_result()['Topics']
+
+
+@AWSRetry.jittered_backoff(catch_extra_error_codes=['NotFound'])
+def _list_topic_subscriptions_with_backoff(client, topic_arn):
+ paginator = client.get_paginator('list_subscriptions_by_topic')
+ return paginator.paginate(TopicArn=topic_arn).build_full_result()['Subscriptions']
+
+
+@AWSRetry.jittered_backoff(catch_extra_error_codes=['NotFound'])
+def _list_subscriptions_with_backoff(client):
+ paginator = client.get_paginator('list_subscriptions')
+ return paginator.paginate().build_full_result()['Subscriptions']
+
+
+def list_topic_subscriptions(client, module, topic_arn):
+ try:
+ return _list_topic_subscriptions_with_backoff(client, topic_arn)
+ except is_boto3_error_code('AuthorizationError'):
+ try:
+ # potentially AuthorizationError when listing subscriptions for third party topic
+ return [sub for sub in _list_subscriptions_with_backoff(client)
+ if sub['TopicArn'] == topic_arn]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % topic_arn)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % topic_arn)
+
+
+def list_topics(client, module):
+ try:
+ topics = _list_topics_with_backoff(client)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get topic list")
+ return [t['TopicArn'] for t in topics]
+
+
+def topic_arn_lookup(client, module, name):
+ # topic names cannot have colons, so this captures the full topic name
+ all_topics = list_topics(client, module)
+ lookup_topic = ':%s' % name
+ for topic in all_topics:
+ if topic.endswith(lookup_topic):
+ return topic
+
+
+def compare_delivery_policies(policy_a, policy_b):
+ _policy_a = copy.deepcopy(policy_a)
+ _policy_b = copy.deepcopy(policy_b)
+ # AWS automatically injects disableSubscriptionOverrides if you set an
+ # http policy
+ if 'http' in policy_a:
+ if 'disableSubscriptionOverrides' not in policy_a['http']:
+ _policy_a['http']['disableSubscriptionOverrides'] = False
+ if 'http' in policy_b:
+ if 'disableSubscriptionOverrides' not in policy_b['http']:
+ _policy_b['http']['disableSubscriptionOverrides'] = False
+ comparison = (_policy_a != _policy_b)
+ return comparison
+
+
+def canonicalize_endpoint(protocol, endpoint):
+ # AWS SNS expects phone numbers in
+ # and canonicalizes to E.164 format
+ # See <https://docs.aws.amazon.com/sns/latest/dg/sms_publish-to-phone.html>
+ if protocol == 'sms':
+ return re.sub('[^0-9+]*', '', endpoint)
+ return endpoint
+
+
+def get_tags(client, module, topic_arn):
+ try:
+ return boto3_tag_list_to_ansible_dict(client.list_tags_for_resource(ResourceArn=topic_arn)['Tags'])
+ except is_boto3_error_code('AuthorizationError'):
+ module.warn("Permission denied accessing tags")
+ return {}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain topic tags")
+
+
+def get_info(connection, module, topic_arn):
+ name = module.params.get('name')
+ topic_type = module.params.get('topic_type')
+ state = module.params.get('state')
+ subscriptions = module.params.get('subscriptions')
+ purge_subscriptions = module.params.get('purge_subscriptions')
+ content_based_deduplication = module.params.get('content_based_deduplication')
+ subscriptions_existing = module.params.get('subscriptions_existing', [])
+ subscriptions_deleted = module.params.get('subscriptions_deleted', [])
+ subscriptions_added = module.params.get('subscriptions_added', [])
+ subscriptions_added = module.params.get('subscriptions_added', [])
+ topic_created = module.params.get('topic_created', False)
+ topic_deleted = module.params.get('topic_deleted', False)
+ attributes_set = module.params.get('attributes_set', [])
+ check_mode = module.check_mode
+
+ info = {
+ 'name': name,
+ 'topic_type': topic_type,
+ 'state': state,
+ 'subscriptions_new': subscriptions,
+ 'subscriptions_existing': subscriptions_existing,
+ 'subscriptions_deleted': subscriptions_deleted,
+ 'subscriptions_added': subscriptions_added,
+ 'subscriptions_purge': purge_subscriptions,
+ 'content_based_deduplication': content_based_deduplication,
+ 'check_mode': check_mode,
+ 'topic_created': topic_created,
+ 'topic_deleted': topic_deleted,
+ 'attributes_set': attributes_set,
+ }
+ if state != 'absent':
+ if topic_arn in list_topics(connection, module):
+ info.update(camel_dict_to_snake_dict(connection.get_topic_attributes(TopicArn=topic_arn)['Attributes']))
+ info['delivery_policy'] = info.pop('effective_delivery_policy')
+ info['subscriptions'] = [camel_dict_to_snake_dict(sub) for sub in list_topic_subscriptions(connection, module, topic_arn)]
+ info["tags"] = get_tags(connection, module, topic_arn)
+ return info
+
+
+def update_tags(client, module, topic_arn):
+
+ if module.params.get('tags') is None:
+ return False
+
+ existing_tags = get_tags(client, module, topic_arn)
+ to_update, to_delete = compare_aws_tags(existing_tags, module.params['tags'], module.params['purge_tags'])
+
+ if not bool(to_delete or to_update):
+ return False
+
+ if module.check_mode:
+ return True
+
+ if to_update:
+ try:
+ client.tag_resource(ResourceArn=topic_arn,
+ Tags=ansible_dict_to_boto3_tag_list(to_update))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't add tags to topic")
+ if to_delete:
+ try:
+ client.untag_resource(ResourceArn=topic_arn, TagKeys=to_delete)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't remove tags from topic")
+
+ return True
diff --git a/ansible_collections/community/aws/plugins/module_utils/transitgateway.py b/ansible_collections/community/aws/plugins/module_utils/transitgateway.py
new file mode 100644
index 000000000..3ec198abd
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/module_utils/transitgateway.py
@@ -0,0 +1,345 @@
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from copy import deepcopy
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+
+from ansible_collections.community.aws.plugins.module_utils.ec2 import BaseEc2Manager
+from ansible_collections.community.aws.plugins.module_utils.ec2 import Boto3Mixin
+from ansible_collections.community.aws.plugins.module_utils.ec2 import Ec2WaiterFactory
+
+
+class TgwWaiterFactory(Ec2WaiterFactory):
+ @property
+ def _waiter_model_data(self):
+ data = super(TgwWaiterFactory, self)._waiter_model_data
+ # split the TGW waiters so we can keep them close to everything else.
+ tgw_data = dict(
+ tgw_attachment_available=dict(
+ operation='DescribeTransitGatewayAttachments',
+ delay=5, maxAttempts=120,
+ acceptors=[
+ dict(state='success', matcher='pathAll', expected='available', argument='TransitGatewayAttachments[].State'),
+ ]
+ ),
+ tgw_attachment_deleted=dict(
+ operation='DescribeTransitGatewayAttachments',
+ delay=5, maxAttempts=120,
+ acceptors=[
+ dict(state='retry', matcher='pathAll', expected='deleting', argument='TransitGatewayAttachments[].State'),
+ dict(state='success', matcher='pathAll', expected='deleted', argument='TransitGatewayAttachments[].State'),
+ dict(state='success', matcher='path', expected=True, argument='length(TransitGatewayAttachments[]) == `0`'),
+ dict(state='success', matcher='error', expected='InvalidRouteTableID.NotFound'),
+ ]
+ ),
+ )
+ data.update(tgw_data)
+ return data
+
+
+class TGWAttachmentBoto3Mixin(Boto3Mixin):
+ def __init__(self, module, **kwargs):
+ self.tgw_waiter_factory = TgwWaiterFactory(module)
+ super(TGWAttachmentBoto3Mixin, self).__init__(module, **kwargs)
+
+ # Paginators can't be (easily) wrapped, so we wrap this method with the
+ # retry - retries the full fetch, but better than simply giving up.
+ @AWSRetry.jittered_backoff()
+ def _paginated_describe_transit_gateway_vpc_attachments(self, **params):
+ paginator = self.client.get_paginator('describe_transit_gateway_vpc_attachments')
+ return paginator.paginate(**params).build_full_result()
+
+ @Boto3Mixin.aws_error_handler('describe transit gateway attachments')
+ def _describe_vpc_attachments(self, **params):
+ result = self._paginated_describe_transit_gateway_vpc_attachments(**params)
+ return result.get('TransitGatewayVpcAttachments', None)
+
+ @Boto3Mixin.aws_error_handler('create transit gateway attachment')
+ def _create_vpc_attachment(self, **params):
+ result = self.client.create_transit_gateway_vpc_attachment(aws_retry=True, **params)
+ return result.get('TransitGatewayVpcAttachment', None)
+
+ @Boto3Mixin.aws_error_handler('modify transit gateway attachment')
+ def _modify_vpc_attachment(self, **params):
+ result = self.client.modify_transit_gateway_vpc_attachment(aws_retry=True, **params)
+ return result.get('TransitGatewayVpcAttachment', None)
+
+ @Boto3Mixin.aws_error_handler('delete transit gateway attachment')
+ def _delete_vpc_attachment(self, **params):
+ try:
+ result = self.client.delete_transit_gateway_vpc_attachment(aws_retry=True, **params)
+ except is_boto3_error_code('ResourceNotFoundException'):
+ return None
+ return result.get('TransitGatewayVpcAttachment', None)
+
+ @Boto3Mixin.aws_error_handler('transit gateway attachment to finish deleting')
+ def _wait_tgw_attachment_deleted(self, **params):
+ waiter = self.tgw_waiter_factory.get_waiter('tgw_attachment_deleted')
+ waiter.wait(**params)
+
+ @Boto3Mixin.aws_error_handler('transit gateway attachment to become available')
+ def _wait_tgw_attachment_available(self, **params):
+ waiter = self.tgw_waiter_factory.get_waiter('tgw_attachment_available')
+ waiter.wait(**params)
+
+ def _normalize_tgw_attachment(self, rtb):
+ return self._normalize_boto3_resource(rtb)
+
+ def _get_tgw_vpc_attachment(self, **params):
+ # Only for use with a single attachment, use _describe_vpc_attachments for
+ # multiple tables.
+ attachments = self._describe_vpc_attachments(**params)
+
+ if not attachments:
+ return None
+
+ attachment = attachments[0]
+ return attachment
+
+
+class BaseTGWManager(BaseEc2Manager):
+
+ @Boto3Mixin.aws_error_handler('connect to AWS')
+ def _create_client(self, client_name='ec2'):
+ if client_name == 'ec2':
+ error_codes = ['IncorrectState']
+ else:
+ error_codes = []
+
+ retry_decorator = AWSRetry.jittered_backoff(
+ catch_extra_error_codes=error_codes,
+ )
+ client = self.module.client(client_name, retry_decorator=retry_decorator)
+ return client
+
+
+class TransitGatewayVpcAttachmentManager(TGWAttachmentBoto3Mixin, BaseTGWManager):
+
+ TAG_RESOURCE_TYPE = 'transit-gateway-attachment'
+
+ def __init__(self, module, id=None):
+ self._subnet_updates = dict()
+ super(TransitGatewayVpcAttachmentManager, self).__init__(module=module, id=id)
+
+ def _get_id_params(self, id=None, id_list=False):
+ if not id:
+ id = self.resource_id
+ if not id:
+ # Users should never see this, but let's cover ourself
+ self.module.fail_json(msg='Attachment identifier parameter missing')
+
+ if id_list:
+ return dict(TransitGatewayAttachmentIds=[id])
+ return dict(TransitGatewayAttachmentId=id)
+
+ def _extra_error_output(self):
+ output = super(TransitGatewayVpcAttachmentManager, self)._extra_error_output()
+ if self.resource_id:
+ output['TransitGatewayAttachmentId'] = self.resource_id
+ return output
+
+ def _filter_immutable_resource_attributes(self, resource):
+ resource = super(TransitGatewayVpcAttachmentManager, self)._filter_immutable_resource_attributes(resource)
+ resource.pop('TransitGatewayId', None)
+ resource.pop('VpcId', None)
+ resource.pop('VpcOwnerId', None)
+ resource.pop('State', None)
+ resource.pop('SubnetIds', None)
+ resource.pop('CreationTime', None)
+ resource.pop('Tags', None)
+ return resource
+
+ def _set_option(self, name, value):
+ if value is None:
+ return False
+ # For now VPC Attachment options are all enable/disable
+ if value:
+ value = 'enable'
+ else:
+ value = 'disable'
+
+ options = deepcopy(self._preupdate_resource.get('Options', dict()))
+ options.update(self._resource_updates.get('Options', dict()))
+ options[name] = value
+
+ return self._set_resource_value('Options', options)
+
+ def set_dns_support(self, value):
+ return self._set_option('DnsSupport', value)
+
+ def set_ipv6_support(self, value):
+ return self._set_option('Ipv6Support', value)
+
+ def set_appliance_mode_support(self, value):
+ return self._set_option('ApplianceModeSupport', value)
+
+ def set_transit_gateway(self, tgw_id):
+ return self._set_resource_value('TransitGatewayId', tgw_id)
+
+ def set_vpc(self, vpc_id):
+ return self._set_resource_value('VpcId', vpc_id)
+
+ def set_subnets(self, subnets=None, purge=True):
+ if subnets is None:
+ return False
+
+ current_subnets = set(self._preupdate_resource.get('SubnetIds', []))
+ desired_subnets = set(subnets)
+ if not purge:
+ desired_subnets = desired_subnets.union(current_subnets)
+
+ # We'll pull the VPC ID from the subnets, no point asking for
+ # information we 'know'.
+ subnet_details = self._describe_subnets(SubnetIds=list(desired_subnets))
+ vpc_id = self.subnets_to_vpc(desired_subnets, subnet_details)
+ self._set_resource_value('VpcId', vpc_id, immutable=True)
+
+ # Only one subnet per-AZ is permitted
+ azs = [s.get('AvailabilityZoneId') for s in subnet_details]
+ if len(azs) != len(set(azs)):
+ self.module.fail_json(
+ msg='Only one attachment subnet per availability zone may be set.',
+ availability_zones=azs, subnets=subnet_details)
+
+ subnets_to_add = list(desired_subnets.difference(current_subnets))
+ subnets_to_remove = list(current_subnets.difference(desired_subnets))
+ if not subnets_to_remove and not subnets_to_add:
+ return False
+ self._subnet_updates = dict(add=subnets_to_add, remove=subnets_to_remove)
+ self._set_resource_value('SubnetIds', list(desired_subnets))
+ return True
+
+ def subnets_to_vpc(self, subnets, subnet_details=None):
+ if not subnets:
+ return None
+
+ if subnet_details is None:
+ subnet_details = self._describe_subnets(SubnetIds=list(subnets))
+
+ vpcs = [s.get('VpcId') for s in subnet_details]
+ if len(set(vpcs)) > 1:
+ self.module.fail_json(
+ msg='Attachment subnets may only be in one VPC, multiple VPCs found',
+ vpcs=list(set(vpcs)), subnets=subnet_details)
+
+ return vpcs[0]
+
+ def _do_deletion_wait(self, id=None, **params):
+ all_params = self._get_id_params(id=id, id_list=True)
+ all_params.update(**params)
+ return self._wait_tgw_attachment_deleted(**all_params)
+
+ def _do_creation_wait(self, id=None, **params):
+ all_params = self._get_id_params(id=id, id_list=True)
+ all_params.update(**params)
+ return self._wait_tgw_attachment_available(**all_params)
+
+ def _do_update_wait(self, id=None, **params):
+ all_params = self._get_id_params(id=id, id_list=True)
+ all_params.update(**params)
+ return self._wait_tgw_attachment_available(**all_params)
+
+ def _do_create_resource(self):
+ params = self._merge_resource_changes(filter_immutable=False, creation=True)
+ response = self._create_vpc_attachment(**params)
+ if response:
+ self.resource_id = response.get('TransitGatewayAttachmentId', None)
+ return response
+
+ def _do_update_resource(self):
+ if self._preupdate_resource.get('State', None) == 'pending':
+ # Resources generally don't like it if you try to update before creation
+ # is complete. If things are in a 'pending' state they'll often throw
+ # exceptions.
+ self._wait_for_creation()
+ elif self._preupdate_resource.get('State', None) == 'deleting':
+ self.module.fail_json(msg='Deletion in progress, unable to update',
+ route_tables=[self.original_resource])
+
+ updates = self._filter_immutable_resource_attributes(self._resource_updates)
+ subnets_to_add = self._subnet_updates.get('add', [])
+ subnets_to_remove = self._subnet_updates.get('remove', [])
+ if subnets_to_add:
+ updates['AddSubnetIds'] = subnets_to_add
+ if subnets_to_remove:
+ updates['RemoveSubnetIds'] = subnets_to_remove
+
+ if not updates:
+ return False
+
+ if self.module.check_mode:
+ return True
+
+ updates.update(self._get_id_params(id_list=False))
+ self._modify_vpc_attachment(**updates)
+ return True
+
+ def get_resource(self):
+ return self.get_attachment()
+
+ def delete(self, id=None):
+
+ if id:
+ id_params = self._get_id_params(id=id, id_list=True)
+ result = self._get_tgw_vpc_attachment(**id_params)
+ else:
+ result = self._preupdate_resource
+
+ self.updated_resource = dict()
+
+ if not result:
+ return False
+
+ if result.get('State') == 'deleting':
+ self._wait_for_deletion()
+ return False
+
+ if self.module.check_mode:
+ self.changed = True
+ return True
+
+ id_params = self._get_id_params(id=id, id_list=False)
+
+ result = self._delete_vpc_attachment(**id_params)
+
+ self.changed |= bool(result)
+
+ self._wait_for_deletion()
+ return bool(result)
+
+ def list(self, filters=None, id=None):
+ params = dict()
+ if id:
+ params['TransitGatewayAttachmentIds'] = [id]
+ if filters:
+ params['Filters'] = ansible_dict_to_boto3_filter_list(filters)
+ attachments = self._describe_vpc_attachments(**params)
+ if not attachments:
+ return list()
+
+ return [self._normalize_tgw_attachment(a) for a in attachments]
+
+ def get_attachment(self, id=None):
+
+ # RouteTable needs a list, Association/Propagation needs a single ID
+ id_params = self._get_id_params(id=id, id_list=True)
+ id_param = self._get_id_params(id=id, id_list=False)
+ result = self._get_tgw_vpc_attachment(**id_params)
+
+ if not result:
+ return None
+
+ if not id:
+ self._preupdate_resource = deepcopy(result)
+
+ attachment = self._normalize_tgw_attachment(result)
+ return attachment
+
+ def _normalize_resource(self, resource):
+ return self._normalize_tgw_attachment(resource)
diff --git a/ansible_collections/community/aws/plugins/module_utils/wafv2.py b/ansible_collections/community/aws/plugins/module_utils/wafv2.py
new file mode 100644
index 000000000..18f19974d
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/module_utils/wafv2.py
@@ -0,0 +1,206 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+
+@AWSRetry.jittered_backoff()
+def _list_tags(wafv2, arn, fail_json_aws, next_marker=None):
+ params = dict(ResourceARN=arn)
+ if next_marker:
+ params['NextMarker'] = next_marker
+ try:
+ return wafv2.list_tags_for_resource(**params)
+ except (BotoCoreError, ClientError) as e:
+ fail_json_aws(e, msg="Failed to list wafv2 tags")
+
+
+def describe_wafv2_tags(wafv2, arn, fail_json_aws):
+ next_marker = None
+ tag_list = []
+ # there is currently no paginator for wafv2
+ while True:
+ responce = _list_tags(wafv2, arn, fail_json_aws)
+ next_marker = responce.get('NextMarker', None)
+ tag_info = responce.get('TagInfoForResource', {})
+ tag_list.extend(tag_info.get('TagList', []))
+ if not next_marker:
+ break
+ return boto3_tag_list_to_ansible_dict(tag_list)
+
+
+def ensure_wafv2_tags(wafv2, arn, tags, purge_tags, fail_json_aws, check_mode):
+ if tags is None:
+ return False
+
+ current_tags = describe_wafv2_tags(wafv2, arn, fail_json_aws)
+ tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags)
+ if not tags_to_add and not tags_to_remove:
+ return False
+
+ if check_mode:
+ return True
+
+ if tags_to_add:
+ try:
+ boto3_tags = ansible_dict_to_boto3_tag_list(tags_to_add)
+ wafv2.tag_resource(ResourceARN=arn, Tags=boto3_tags)
+ except (BotoCoreError, ClientError) as e:
+ fail_json_aws(e, msg="Failed to add wafv2 tags")
+ if tags_to_remove:
+ try:
+ wafv2.untag_resource(ResourceARN=arn, TagKeys=tags_to_remove)
+ except (BotoCoreError, ClientError) as e:
+ fail_json_aws(e, msg="Failed to remove wafv2 tags")
+
+ return True
+
+
+def wafv2_list_web_acls(wafv2, scope, fail_json_aws, nextmarker=None):
+ # there is currently no paginator for wafv2
+ req_obj = {
+ 'Scope': scope,
+ 'Limit': 100
+ }
+ if nextmarker:
+ req_obj['NextMarker'] = nextmarker
+
+ try:
+ response = wafv2.list_web_acls(**req_obj)
+ except (BotoCoreError, ClientError) as e:
+ fail_json_aws(e, msg="Failed to list wafv2 web acl")
+
+ if response.get('NextMarker'):
+ response['WebACLs'] += wafv2_list_web_acls(wafv2, scope, fail_json_aws, nextmarker=response.get('NextMarker')).get('WebACLs')
+ return response
+
+
+def wafv2_list_rule_groups(wafv2, scope, fail_json_aws, nextmarker=None):
+ # there is currently no paginator for wafv2
+ req_obj = {
+ 'Scope': scope,
+ 'Limit': 100
+ }
+ if nextmarker:
+ req_obj['NextMarker'] = nextmarker
+
+ try:
+ response = wafv2.list_rule_groups(**req_obj)
+ except (BotoCoreError, ClientError) as e:
+ fail_json_aws(e, msg="Failed to list wafv2 rule group")
+
+ if response.get('NextMarker'):
+ response['RuleGroups'] += wafv2_list_rule_groups(wafv2, scope, fail_json_aws, nextmarker=response.get('NextMarker')).get('RuleGroups')
+ return response
+
+
+def wafv2_snake_dict_to_camel_dict(a):
+ if not isinstance(a, dict):
+ return a
+
+ retval = {}
+ for item in a.keys():
+ if isinstance(a.get(item), dict):
+ if 'Ip' in item:
+ retval[item.replace('Ip', 'IP')] = wafv2_snake_dict_to_camel_dict(a.get(item))
+ elif 'Arn' == item:
+ retval['ARN'] = wafv2_snake_dict_to_camel_dict(a.get(item))
+ else:
+ retval[item] = wafv2_snake_dict_to_camel_dict(a.get(item))
+ elif isinstance(a.get(item), list):
+ retval[item] = []
+ for idx in range(len(a.get(item))):
+ retval[item].append(wafv2_snake_dict_to_camel_dict(a.get(item)[idx]))
+ elif 'Ip' in item:
+ retval[item.replace('Ip', 'IP')] = a.get(item)
+ elif 'Arn' == item:
+ retval['ARN'] = a.get(item)
+ else:
+ retval[item] = a.get(item)
+ return retval
+
+
+def nested_byte_values_to_strings(rule, keyname):
+ """
+ currently valid nested byte values in statements array are
+ - OrStatement
+ - AndStatement
+ - NotStatement
+ """
+ if rule.get('Statement', {}).get(keyname):
+ for idx in range(len(rule.get('Statement', {}).get(keyname, {}).get('Statements'))):
+ if rule['Statement'][keyname]['Statements'][idx].get('ByteMatchStatement'):
+ rule['Statement'][keyname]['Statements'][idx]['ByteMatchStatement']['SearchString'] = \
+ rule.get('Statement').get(keyname).get('Statements')[idx].get('ByteMatchStatement').get('SearchString').decode('utf-8')
+
+ return rule
+
+
+def byte_values_to_strings_before_compare(rules):
+ for idx in range(len(rules)):
+ if rules[idx].get('Statement', {}).get('ByteMatchStatement', {}).get('SearchString'):
+ rules[idx]['Statement']['ByteMatchStatement']['SearchString'] = \
+ rules[idx].get('Statement').get('ByteMatchStatement').get('SearchString').decode('utf-8')
+
+ else:
+ for statement in ['AndStatement', 'OrStatement', 'NotStatement']:
+ if rules[idx].get('Statement', {}).get(statement):
+ rules[idx] = nested_byte_values_to_strings(rules[idx], statement)
+
+ return rules
+
+
+def compare_priority_rules(existing_rules, requested_rules, purge_rules, state):
+ diff = False
+ existing_rules = sorted(existing_rules, key=lambda k: k['Priority'])
+ existing_rules = byte_values_to_strings_before_compare(existing_rules)
+ requested_rules = sorted(requested_rules, key=lambda k: k['Priority'])
+
+ if purge_rules and state == 'present':
+ merged_rules = requested_rules
+ if len(existing_rules) == len(requested_rules):
+ for idx in range(len(existing_rules)):
+ if existing_rules[idx] != requested_rules[idx]:
+ diff = True
+ break
+ else:
+ diff = True
+
+ else:
+ # find same priority rules
+ # * pop same priority rule from existing rule
+ # * compare existing rule
+ merged_rules = []
+ ex_idx_pop = []
+ for existing_idx in range(len(existing_rules)):
+ for requested_idx in range(len(requested_rules)):
+ if existing_rules[existing_idx].get('Priority') == requested_rules[requested_idx].get('Priority'):
+ if state == 'present':
+ ex_idx_pop.append(existing_idx)
+ if existing_rules[existing_idx] != requested_rules[requested_idx]:
+ diff = True
+ elif existing_rules[existing_idx] == requested_rules[requested_idx]:
+ ex_idx_pop.append(existing_idx)
+ diff = True
+
+ prev_count = len(existing_rules)
+ for idx in ex_idx_pop:
+ existing_rules.pop(idx)
+
+ if state == 'present':
+ merged_rules = existing_rules + requested_rules
+
+ if len(merged_rules) != prev_count:
+ diff = True
+ else:
+ merged_rules = existing_rules
+
+ return diff, merged_rules
diff --git a/ansible_collections/community/aws/plugins/modules/__init__.py b/ansible_collections/community/aws/plugins/modules/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/__init__.py
diff --git a/ansible_collections/community/aws/plugins/modules/accessanalyzer_validate_policy_info.py b/ansible_collections/community/aws/plugins/modules/accessanalyzer_validate_policy_info.py
new file mode 100644
index 000000000..e589d0cb0
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/accessanalyzer_validate_policy_info.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: accessanalyzer_validate_policy_info
+version_added: 5.0.0
+short_description: Performs validation of IAM policies
+description:
+ - Requests the validation of a policy and returns a list of findings.
+options:
+ policy:
+ description:
+ - A properly json formatted policy.
+ type: json
+ aliases: ['policy_document']
+ required: true
+ locale:
+ description:
+ - The locale to use for localizing the findings.
+ - Supported locales include C(DE), C(EN), C(ES), C(FR), C(IT), C(JA), C(KO), C(PT_BR),
+ C(ZH_CN) and C(ZH_TW).
+ - For more information about supported locales see the AWS Documentation
+ C(https://docs.aws.amazon.com/access-analyzer/latest/APIReference/API_ValidatePolicy.html)
+ type: str
+ required: false
+ default: 'EN'
+ policy_type:
+ description:
+ - The type of policy to validate.
+ - C(identity) policies grant permissions to IAM principals, including both managed and inline
+ policies for IAM roles, users, and groups.
+ - C(resource) policies policies grant permissions on AWS resources, including trust policies
+ for IAM roles and bucket policies for S3 buckets.
+ type: str
+ choices: ['identity', 'resource', 'service_control']
+ default: 'identity'
+ required: false
+ resource_type:
+ description:
+ - The type of resource to attach to your resource policy.
+ - Ignored unless I(policy_type=resource).
+ - Supported resource types include C(AWS::S3::Bucket), C(AWS::S3::AccessPoint),
+ C(AWS::S3::MultiRegionAccessPoint) and C(AWS::S3ObjectLambda::AccessPoint)
+ - For resource types not supported as valid values, IAM Access Analyzer runs policy checks
+ that apply to all resource policies.
+ - For more information about supported locales see the AWS Documentation
+ C(https://docs.aws.amazon.com/access-analyzer/latest/APIReference/API_ValidatePolicy.html)
+ type: str
+ required: false
+ results_filter:
+ description:
+ - Filter the findings and limit them to specific finding types.
+ type: list
+ elements: str
+ choices: ['error', 'security', 'suggestion', 'warning']
+ required: false
+author:
+ - Mark Chappell (@tremble)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Validate a policy
+- name: Validate a simple IAM policy
+ community.aws.accessanalyzer_validate_policy_info:
+ policy: "{{ lookup('template', 'managed_policy.json.j2') }}"
+'''
+
+RETURN = r'''
+findings:
+ description: The list of findings in a policy returned by IAM Access Analyzer based on its suite of policy checks.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ finding_details:
+ description:
+ - A localized message describing the finding.
+ type: str
+ returned: success
+ sample: 'Resource ARN does not match the expected ARN format. Update the resource portion of the ARN.'
+ finding_type:
+ description:
+ - The severity of the finding.
+ type: str
+ returned: success
+ sample: 'ERROR'
+ issue_code:
+ description:
+ - An identifier for the type of issue found.
+ type: str
+ returned: success
+ sample: 'INVALID_ARN_RESOURCE'
+ learn_more_link:
+ description:
+ - A link to additional information about the finding type.
+ type: str
+ returned: success
+ sample: 'https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-reference-policy-checks.html'
+ locations:
+ description:
+ - The location of the item resulting in the recommendations.
+ type: list
+ returned: success
+ elements: dict
+ contains:
+ path:
+ description: A path in a policy, represented as a sequence of path elements.
+ type: list
+ elements: dict
+ returned: success
+ sample: [{"value": "Statement"}, {"index": 0}, {"value": "Resource"}, {"index": 0}]
+ span:
+ description:
+ - Where in the policy the finding refers to.
+ - Note - when using lookups or passing dictionaries to I(policy) the policy string may be
+ converted to a single line of JSON, changing th column, line and offset values.
+ type: dict
+ contains:
+ start:
+ description: The start position of the span.
+ type: dict
+ returned: success
+ contains:
+ column:
+ description: The column of the position, starting from C(0).
+ type: int
+ returned: success
+ line:
+ description: The line of the position, starting from C(1).
+ type: int
+ returned: success
+ offset:
+ description: The offset within the policy that corresponds to the position, starting from C(0).
+ type: int
+ returned: success
+ end:
+ description: The end position of the span.
+ type: dict
+ returned: success
+ contains:
+ column:
+ description: The column of the position, starting from C(0).
+ type: int
+ returned: success
+ line:
+ description: The line of the position, starting from C(1).
+ type: int
+ returned: success
+ offset:
+ description: The offset within the policy that corresponds to the position, starting from C(0).
+ type: int
+ returned: success
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+def filter_findings(findings, type_filter):
+ if not type_filter:
+ return findings
+
+ # Convert type_filter to the findingType strings returned by the API
+ filter_map = dict(error='ERROR', security='SECURITY_WARNING',
+ suggestion='SUGGESTION', warning='WARNING')
+ allowed_types = [filter_map[t] for t in type_filter]
+
+ filtered_results = [f for f in findings if f.get('findingType', None) in allowed_types]
+ return filtered_results
+
+
+def main():
+ # Botocore only supports specific values for locale and resource_type, however the supported
+ # values are likely to be expanded, let's avoid hard coding limits which might not hold true in
+ # the long term...
+ argument_spec = dict(
+ policy=dict(required=True, type='json', aliases=['policy_document']),
+ locale=dict(required=False, type='str', default='EN'),
+ policy_type=dict(required=False, type='str', default='identity',
+ choices=['identity', 'resource', 'service_control']),
+ resource_type=dict(required=False, type='str'),
+ results_filter=dict(required=False, type='list', elements='str',
+ choices=['error', 'security', 'suggestion', 'warning']),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ policy_type_map = dict(identity='IDENTITY_POLICY', resource='RESOURCE_POLICY',
+ service_control='SERVICE_CONTROL_POLICY')
+
+ policy = module.params.get('policy')
+ policy_type = policy_type_map[module.params.get('policy_type')]
+ locale = module.params.get('locale').upper()
+ resource_type = module.params.get('resource_type')
+ results_filter = module.params.get('results_filter')
+
+ try:
+ client = module.client('accessanalyzer', retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ params = dict(locale=locale, policyDocument=policy, policyType=policy_type)
+ if policy_type == 'RESOURCE_POLICY' and resource_type:
+ params['policyType'] = resource_type
+
+ results = client.validate_policy(aws_retry=True, **params)
+
+ findings = filter_findings(results.get('findings', []), results_filter)
+ results['findings'] = findings
+
+ results = camel_dict_to_snake_dict(results)
+
+ module.exit_json(changed=False, **results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/acm_certificate.py b/ansible_collections/community/aws/plugins/modules/acm_certificate.py
new file mode 100644
index 000000000..abdecadcc
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/acm_certificate.py
@@ -0,0 +1,567 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author:
+# - Matthew Davis <Matthew.Davis.2@team.telstra.com>
+# on behalf of Telstra Corporation Limited
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: acm_certificate
+short_description: Upload and delete certificates in the AWS Certificate Manager service
+version_added: 1.0.0
+description:
+ - >
+ Import and delete certificates in Amazon Web Service's Certificate
+ Manager (AWS ACM).
+ - >
+ This module does not currently interact with AWS-provided certificates.
+ It currently only manages certificates provided to AWS by the user.
+ - The ACM API allows users to upload multiple certificates for the same domain
+ name, and even multiple identical certificates. This module attempts to
+ restrict such freedoms, to be idempotent, as per the Ansible philosophy.
+ It does this through applying AWS resource "Name" tags to ACM certificates.
+ - >
+ When I(state=present),
+ if there is one certificate in ACM
+ with a C(Name) tag equal to the I(name_tag) parameter,
+ and an identical body and chain,
+ this task will succeed without effect.
+ - >
+ When I(state=present),
+ if there is one certificate in ACM
+ a I(Name) tag equal to the I(name_tag) parameter,
+ and a different body,
+ this task will overwrite that certificate.
+ - >
+ When I(state=present),
+ if there are multiple certificates in ACM
+ with a I(Name) tag equal to the I(name_tag) parameter,
+ this task will fail.
+ - >
+ When I(state=absent) and I(certificate_arn) is defined,
+ this module will delete the ACM resource with that ARN if it exists in this
+ region, and succeed without effect if it doesn't exist.
+ - >
+ When I(state=absent) and I(domain_name) is defined, this module will delete
+ all ACM resources in this AWS region with a corresponding domain name.
+ If there are none, it will succeed without effect.
+ - >
+ When I(state=absent) and I(certificate_arn) is not defined,
+ and I(domain_name) is not defined, this module will delete all ACM resources
+ in this AWS region with a corresponding I(Name) tag.
+ If there are none, it will succeed without effect.
+ - >
+ Note that this may not work properly with keys of size 4096 bits, due to a
+ limitation of the ACM API.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_acm).
+ The usage did not change.
+options:
+ certificate:
+ description:
+ - The body of the PEM encoded public certificate.
+ - Required when I(state) is not C(absent) and the certificate does not exist.
+ - >
+ If your certificate is in a file,
+ use C(lookup('file', 'path/to/cert.pem')).
+ type: str
+ certificate_arn:
+ description:
+ - The ARN of a certificate in ACM to modify or delete.
+ - >
+ If I(state=present), the certificate with the specified ARN can be updated.
+ For example, this can be used to add/remove tags to an existing certificate.
+ - >
+ If I(state=absent), you must provide one of
+ I(certificate_arn), I(domain_name) or I(name_tag).
+ - >
+ If I(state=absent) and no resource exists with this ARN in this region,
+ the task will succeed with no effect.
+ - >
+ If I(state=absent) and the corresponding resource exists in a different
+ region, this task may report success without deleting that resource.
+ type: str
+ aliases: [arn]
+ certificate_chain:
+ description:
+ - The body of the PEM encoded chain for your certificate.
+ - >
+ If your certificate chain is in a file,
+ use C(lookup('file', 'path/to/chain.pem')).
+ - Ignored when I(state=absent)
+ type: str
+ domain_name:
+ description:
+ - The domain name of the certificate.
+ - >
+ If I(state=absent) and I(domain_name) is specified,
+ this task will delete all ACM certificates with this domain.
+ - >
+ Exactly one of I(domain_name), I(name_tag) and I(certificate_arn)
+ must be provided.
+ - >
+ If I(state=present) this must not be specified.
+ (Since the domain name is encoded within the public certificate's body.)
+ type: str
+ aliases: [domain]
+ name_tag:
+ description:
+ - >
+ The unique identifier for tagging resources using AWS tags,
+ with key I(Name).
+ - This can be any set of characters accepted by AWS for tag values.
+ - >
+ This is to ensure Ansible can treat certificates idempotently,
+ even though the ACM API allows duplicate certificates.
+ - If I(state=preset), this must be specified.
+ - >
+ If I(state=absent) and I(name_tag) is specified,
+ this task will delete all ACM certificates with this Name tag.
+ - >
+ If I(state=absent), you must provide exactly one of
+ I(certificate_arn), I(domain_name) or I(name_tag).
+ - >
+ If both I(name_tag) and the 'Name' tag in I(tags) are set,
+ the values must be the same.
+ - >
+ If the 'Name' tag in I(tags) is not set and I(name_tag) is set,
+ the I(name_tag) value is copied to I(tags).
+ type: str
+ aliases: [name]
+ private_key:
+ description:
+ - The body of the PEM encoded private key.
+ - Required when I(state=present) and the certificate does not exist.
+ - Ignored when I(state=absent).
+ - >
+ If your private key is in a file,
+ use C(lookup('file', 'path/to/key.pem')).
+ type: str
+ state:
+ description:
+ - >
+ If I(state=present), the specified public certificate and private key
+ will be uploaded, with I(Name) tag equal to I(name_tag).
+ - >
+ If I(state=absent), any certificates in this region
+ with a corresponding I(domain_name), I(name_tag) or I(certificate_arn)
+ will be deleted.
+ choices: [present, absent]
+ default: present
+ type: str
+
+notes:
+ - Support for I(tags) and I(purge_tags) was added in release 3.2.0
+author:
+ - Matthew Davis (@matt-telstra) on behalf of Telstra Corporation Limited
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+ - amazon.aws.tags
+'''
+
+EXAMPLES = '''
+
+- name: upload a self-signed certificate
+ community.aws.aws_acm:
+ certificate: "{{ lookup('file', 'cert.pem' ) }}"
+ privateKey: "{{ lookup('file', 'key.pem' ) }}"
+ name_tag: my_cert # to be applied through an AWS tag as "Name":"my_cert"
+ region: ap-southeast-2 # AWS region
+
+- name: create/update a certificate with a chain
+ community.aws.aws_acm:
+ certificate: "{{ lookup('file', 'cert.pem' ) }}"
+ private_key: "{{ lookup('file', 'key.pem' ) }}"
+ name_tag: my_cert
+ certificate_chain: "{{ lookup('file', 'chain.pem' ) }}"
+ state: present
+ region: ap-southeast-2
+ register: cert_create
+
+- name: print ARN of cert we just created
+ ansible.builtin.debug:
+ var: cert_create.certificate.arn
+
+- name: delete the cert we just created
+ community.aws.aws_acm:
+ name_tag: my_cert
+ state: absent
+ region: ap-southeast-2
+
+- name: delete a certificate with a particular ARN
+ community.aws.aws_acm:
+ certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901"
+ state: absent
+ region: ap-southeast-2
+
+- name: delete all certificates with a particular domain name
+ community.aws.aws_acm:
+ domain_name: acm.ansible.com
+ state: absent
+ region: ap-southeast-2
+
+- name: add tags to an existing certificate with a particular ARN
+ community.aws.aws_acm:
+ certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901"
+ tags:
+ Name: my_certificate
+ Application: search
+ Environment: development
+ purge_tags: true
+'''
+
+RETURN = '''
+certificate:
+ description: Information about the certificate which was uploaded
+ type: complex
+ returned: when I(state=present)
+ contains:
+ arn:
+ description: The ARN of the certificate in ACM
+ type: str
+ returned: when I(state=present) and not in check mode
+ sample: "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901"
+ domain_name:
+ description: The domain name encoded within the public certificate
+ type: str
+ returned: when I(state=present)
+ sample: acm.ansible.com
+arns:
+ description: A list of the ARNs of the certificates in ACM which were deleted
+ type: list
+ elements: str
+ returned: when I(state=absent)
+ sample:
+ - "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901"
+'''
+
+
+import base64
+from copy import deepcopy
+import re # regex library
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.acm import ACMServiceManager
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (
+ boto3_tag_list_to_ansible_dict,
+ ansible_dict_to_boto3_tag_list,
+)
+from ansible.module_utils._text import to_text
+
+
+def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags):
+ if tags is None:
+ return (False, existing_tags)
+
+ tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, tags, purge_tags)
+ changed = bool(tags_to_add or tags_to_remove)
+ if tags_to_add and not module.check_mode:
+ try:
+ client.add_tags_to_certificate(
+ CertificateArn=resource_arn,
+ Tags=ansible_dict_to_boto3_tag_list(tags_to_add),
+ )
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e:
+ module.fail_json_aws(
+ e, "Couldn't add tags to certificate {0}".format(resource_arn)
+ )
+ if tags_to_remove and not module.check_mode:
+ # remove_tags_from_certificate wants a list of key, value pairs, not a list of keys.
+ tags_list = [{'Key': key, 'Value': existing_tags.get(key)} for key in tags_to_remove]
+ try:
+ client.remove_tags_from_certificate(
+ CertificateArn=resource_arn,
+ Tags=tags_list,
+ )
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e:
+ module.fail_json_aws(
+ e, "Couldn't remove tags from certificate {0}".format(resource_arn)
+ )
+ new_tags = deepcopy(existing_tags)
+ for key, value in tags_to_add.items():
+ new_tags[key] = value
+ for key in tags_to_remove:
+ new_tags.pop(key, None)
+ return (changed, new_tags)
+
+
+# Takes in two text arguments
+# Each a PEM encoded certificate
+# Or a chain of PEM encoded certificates
+# May include some lines between each chain in the cert, e.g. "Subject: ..."
+# Returns True iff the chains/certs are functionally identical (including chain order)
+def chain_compare(module, a, b):
+
+ chain_a_pem = pem_chain_split(module, a)
+ chain_b_pem = pem_chain_split(module, b)
+
+ if len(chain_a_pem) != len(chain_b_pem):
+ return False
+
+ # Chain length is the same
+ for (ca, cb) in zip(chain_a_pem, chain_b_pem):
+ der_a = PEM_body_to_DER(module, ca)
+ der_b = PEM_body_to_DER(module, cb)
+ if der_a != der_b:
+ return False
+
+ return True
+
+
+# Takes in PEM encoded data with no headers
+# returns equivilent DER as byte array
+def PEM_body_to_DER(module, pem):
+ try:
+ der = base64.b64decode(to_text(pem))
+ except (ValueError, TypeError) as e:
+ module.fail_json_aws(e, msg="Unable to decode certificate chain")
+ return der
+
+
+# Store this globally to avoid repeated recompilation
+pem_chain_split_regex = re.compile(r"------?BEGIN [A-Z0-9. ]*CERTIFICATE------?([a-zA-Z0-9\+\/=\s]+)------?END [A-Z0-9. ]*CERTIFICATE------?")
+
+
+# Use regex to split up a chain or single cert into an array of base64 encoded data
+# Using "-----BEGIN CERTIFICATE-----" and "----END CERTIFICATE----"
+# Noting that some chains have non-pem data in between each cert
+# This function returns only what's between the headers, excluding the headers
+def pem_chain_split(module, pem):
+
+ pem_arr = re.findall(pem_chain_split_regex, to_text(pem))
+
+ if len(pem_arr) == 0:
+ # This happens if the regex doesn't match at all
+ module.fail_json(msg="Unable to split certificate chain. Possibly zero-length chain?")
+
+ return pem_arr
+
+
+def update_imported_certificate(client, module, acm, old_cert, desired_tags):
+ """
+ Update the existing certificate that was previously imported in ACM.
+ """
+ module.debug("Existing certificate found in ACM")
+ if ('tags' not in old_cert) or ('Name' not in old_cert['tags']):
+ # shouldn't happen
+ module.fail_json(msg="Internal error, unsure which certificate to update", certificate=old_cert)
+ if module.params.get('name_tag') is not None and (old_cert['tags']['Name'] != module.params.get('name_tag')):
+ # This could happen if the user identified the certificate using 'certificate_arn' or 'domain_name',
+ # and the 'Name' tag in the AWS API does not match the ansible 'name_tag'.
+ module.fail_json(msg="Internal error, Name tag does not match", certificate=old_cert)
+ if 'certificate' not in old_cert:
+ # shouldn't happen
+ module.fail_json(msg="Internal error, unsure what the existing cert in ACM is", certificate=old_cert)
+
+ cert_arn = None
+ # Are the existing certificate in ACM and the local certificate the same?
+ same = True
+ if module.params.get('certificate') is not None:
+ same &= chain_compare(module, old_cert['certificate'], module.params['certificate'])
+ if module.params['certificate_chain']:
+ # Need to test this
+ # not sure if Amazon appends the cert itself to the chain when self-signed
+ same &= chain_compare(module, old_cert['certificate_chain'], module.params['certificate_chain'])
+ else:
+ # When there is no chain with a cert
+ # it seems Amazon returns the cert itself as the chain
+ same &= chain_compare(module, old_cert['certificate_chain'], module.params['certificate'])
+
+ if same:
+ module.debug("Existing certificate in ACM is the same")
+ cert_arn = old_cert['certificate_arn']
+ changed = False
+ else:
+ absent_args = ['certificate', 'name_tag', 'private_key']
+ if sum([(module.params[a] is not None) for a in absent_args]) < 3:
+ module.fail_json(msg="When importing a certificate, all of 'name_tag', 'certificate' and 'private_key' must be specified")
+ module.debug("Existing certificate in ACM is different, overwriting")
+ changed = True
+ if module.check_mode:
+ cert_arn = old_cert['certificate_arn']
+ # note: returned domain will be the domain of the previous cert
+ else:
+ # update cert in ACM
+ cert_arn = acm.import_certificate(
+ client,
+ module,
+ certificate=module.params['certificate'],
+ private_key=module.params['private_key'],
+ certificate_chain=module.params['certificate_chain'],
+ arn=old_cert['certificate_arn'],
+ tags=desired_tags,
+ )
+ return (changed, cert_arn)
+
+
+def import_certificate(client, module, acm, desired_tags):
+ """
+ Import a certificate to ACM.
+ """
+ # Validate argument requirements
+ absent_args = ['certificate', 'name_tag', 'private_key']
+ cert_arn = None
+ if sum([(module.params[a] is not None) for a in absent_args]) < 3:
+ module.fail_json(msg="When importing a new certificate, all of 'name_tag', 'certificate' and 'private_key' must be specified")
+ module.debug("No certificate in ACM. Creating new one.")
+ changed = True
+ if module.check_mode:
+ domain = 'example.com'
+ module.exit_json(certificate=dict(domain_name=domain), changed=True)
+ else:
+ cert_arn = acm.import_certificate(
+ client,
+ module,
+ certificate=module.params['certificate'],
+ private_key=module.params['private_key'],
+ certificate_chain=module.params['certificate_chain'],
+ tags=desired_tags,
+ )
+ return (changed, cert_arn)
+
+
+def ensure_certificates_present(client, module, acm, certificates, desired_tags, filter_tags):
+ cert_arn = None
+ changed = False
+ if len(certificates) > 1:
+ msg = "More than one certificate with Name=%s exists in ACM in this region" % module.params['name_tag']
+ module.fail_json(msg=msg, certificates=certificates)
+ elif len(certificates) == 1:
+ # Update existing certificate that was previously imported to ACM.
+ (changed, cert_arn) = update_imported_certificate(client, module, acm, certificates[0], desired_tags)
+ else: # len(certificates) == 0
+ # Import new certificate to ACM.
+ (changed, cert_arn) = import_certificate(client, module, acm, desired_tags)
+
+ # Add/remove tags to/from certificate
+ try:
+ existing_tags = boto3_tag_list_to_ansible_dict(client.list_tags_for_certificate(CertificateArn=cert_arn)['Tags'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Couldn't get tags for certificate")
+
+ purge_tags = module.params.get('purge_tags')
+ (c, new_tags) = ensure_tags(client, module, cert_arn, existing_tags, desired_tags, purge_tags)
+ changed |= c
+ domain = acm.get_domain_of_cert(client=client, module=module, arn=cert_arn)
+ module.exit_json(certificate=dict(domain_name=domain, arn=cert_arn, tags=new_tags), changed=changed)
+
+
+def ensure_certificates_absent(client, module, acm, certificates):
+ for cert in certificates:
+ if not module.check_mode:
+ acm.delete_certificate(client, module, cert['certificate_arn'])
+ module.exit_json(arns=[cert['certificate_arn'] for cert in certificates], changed=(len(certificates) > 0))
+
+
+def main():
+ argument_spec = dict(
+ certificate=dict(),
+ certificate_arn=dict(aliases=['arn']),
+ certificate_chain=dict(),
+ domain_name=dict(aliases=['domain']),
+ name_tag=dict(aliases=['name']),
+ private_key=dict(no_log=True),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ acm = ACMServiceManager(module)
+
+ # Check argument requirements
+ if module.params['state'] == 'present':
+ # at least one of these should be specified.
+ absent_args = ['certificate_arn', 'domain_name', 'name_tag']
+ if sum([(module.params[a] is not None) for a in absent_args]) < 1:
+ for a in absent_args:
+ module.debug("%s is %s" % (a, module.params[a]))
+ module.fail_json(msg="If 'state' is specified as 'present' then at least one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified")
+ else: # absent
+ # exactly one of these should be specified
+ absent_args = ['certificate_arn', 'domain_name', 'name_tag']
+ if sum([(module.params[a] is not None) for a in absent_args]) != 1:
+ for a in absent_args:
+ module.debug("%s is %s" % (a, module.params[a]))
+ module.fail_json(msg="If 'state' is specified as 'absent' then exactly one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified")
+
+ filter_tags = None
+ desired_tags = None
+ if module.params.get('tags') is not None:
+ desired_tags = module.params['tags']
+ else:
+ # Because we're setting the Name tag, we need to explicitly not purge when tags isn't passed
+ module.params['purge_tags'] = False
+ if module.params.get('name_tag') is not None:
+ # The module was originally implemented to filter certificates based on the 'Name' tag.
+ # Other tags are not used to filter certificates.
+ # It would make sense to replace the existing name_tag, domain, certificate_arn attributes
+ # with a 'filter' attribute, but that would break backwards-compatibility.
+ filter_tags = dict(Name=module.params['name_tag'])
+ if desired_tags is not None:
+ if 'Name' in desired_tags:
+ if desired_tags['Name'] != module.params['name_tag']:
+ module.fail_json(msg="Value of 'name_tag' conflicts with value of 'tags.Name'")
+ else:
+ desired_tags['Name'] = module.params['name_tag']
+ else:
+ desired_tags = deepcopy(filter_tags)
+
+ client = module.client('acm')
+
+ # fetch the list of certificates currently in ACM
+ certificates = acm.get_certificates(
+ client=client,
+ module=module,
+ domain_name=module.params['domain_name'],
+ arn=module.params['certificate_arn'],
+ only_tags=filter_tags,
+ )
+
+ module.debug("Found %d corresponding certificates in ACM" % len(certificates))
+ if module.params['state'] == 'present':
+ ensure_certificates_present(client, module, acm, certificates, desired_tags, filter_tags)
+
+ else: # state == absent
+ ensure_certificates_absent(client, module, acm, certificates)
+
+
+if __name__ == '__main__':
+ # tests()
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/acm_certificate_info.py b/ansible_collections/community/aws/plugins/modules/acm_certificate_info.py
new file mode 100644
index 000000000..a84d7c0b0
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/acm_certificate_info.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: acm_certificate_info
+short_description: Retrieve certificate information from AWS Certificate Manager service
+version_added: 1.0.0
+description:
+ - Retrieve information for ACM certificates.
+ - Note that this will not return information about uploaded keys of size 4096 bits, due to a limitation of the ACM API.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_acm_info).
+ The usage did not change.
+options:
+ certificate_arn:
+ description:
+ - If provided, the results will be filtered to show only the certificate with this ARN.
+ - If no certificate with this ARN exists, this task will fail.
+ - If a certificate with this ARN exists in a different region, this task will fail.
+ aliases:
+ - arn
+ type: str
+ domain_name:
+ description:
+ - The domain name of an ACM certificate to limit the search to.
+ aliases:
+ - name
+ type: str
+ statuses:
+ description:
+ - Status to filter the certificate results.
+ choices: ['PENDING_VALIDATION', 'ISSUED', 'INACTIVE', 'EXPIRED', 'VALIDATION_TIMED_OUT', 'REVOKED', 'FAILED']
+ type: list
+ elements: str
+ tags:
+ description:
+ - Filter results to show only certificates with tags that match all the tags specified here.
+ type: dict
+author:
+ - Will Thames (@willthames)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+- name: obtain all ACM certificates
+ community.aws.aws_acm_info:
+
+- name: obtain all information for a single ACM certificate
+ community.aws.aws_acm_info:
+ domain_name: "*.example_com"
+
+- name: obtain all certificates pending validation
+ community.aws.aws_acm_info:
+ statuses:
+ - PENDING_VALIDATION
+
+- name: obtain all certificates with tag Name=foo and myTag=bar
+ community.aws.aws_acm_info:
+ tags:
+ Name: foo
+ myTag: bar
+
+
+# The output is still a list of certificates, just one item long.
+- name: obtain information about a certificate with a particular ARN
+ community.aws.aws_acm_info:
+ certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/abcdeabc-abcd-1234-4321-abcdeabcde12"
+
+'''
+
+RETURN = r'''
+certificates:
+ description: A list of certificates
+ returned: always
+ type: complex
+ contains:
+ certificate:
+ description: The ACM Certificate body
+ returned: when certificate creation is complete
+ sample: '-----BEGIN CERTIFICATE-----\\nMII.....-----END CERTIFICATE-----\\n'
+ type: str
+ certificate_arn:
+ description: Certificate ARN
+ returned: always
+ sample: arn:aws:acm:ap-southeast-2:123456789012:certificate/abcd1234-abcd-1234-abcd-123456789abc
+ type: str
+ certificate_chain:
+ description: Full certificate chain for the certificate
+ returned: when certificate creation is complete
+ sample: '-----BEGIN CERTIFICATE-----\\nMII...\\n-----END CERTIFICATE-----\\n-----BEGIN CERTIFICATE-----\\n...'
+ type: str
+ created_at:
+ description: Date certificate was created
+ returned: always
+ sample: '2017-08-15T10:31:19+10:00'
+ type: str
+ domain_name:
+ description: Domain name for the certificate
+ returned: always
+ sample: '*.example.com'
+ type: str
+ domain_validation_options:
+ description: Options used by ACM to validate the certificate
+ returned: when certificate type is AMAZON_ISSUED
+ type: complex
+ contains:
+ domain_name:
+ description: Fully qualified domain name of the certificate
+ returned: always
+ sample: example.com
+ type: str
+ validation_domain:
+ description: The domain name ACM used to send validation emails
+ returned: always
+ sample: example.com
+ type: str
+ validation_emails:
+ description: A list of email addresses that ACM used to send domain validation emails
+ returned: always
+ sample:
+ - admin@example.com
+ - postmaster@example.com
+ type: list
+ elements: str
+ validation_status:
+ description: Validation status of the domain
+ returned: always
+ sample: SUCCESS
+ type: str
+ failure_reason:
+ description: Reason certificate request failed
+ returned: only when certificate issuing failed
+ type: str
+ sample: NO_AVAILABLE_CONTACTS
+ in_use_by:
+ description: A list of ARNs for the AWS resources that are using the certificate.
+ returned: always
+ sample: []
+ type: list
+ elements: str
+ issued_at:
+ description: Date certificate was issued
+ returned: always
+ sample: '2017-01-01T00:00:00+10:00'
+ type: str
+ issuer:
+ description: Issuer of the certificate
+ returned: always
+ sample: Amazon
+ type: str
+ key_algorithm:
+ description: Algorithm used to generate the certificate
+ returned: always
+ sample: RSA-2048
+ type: str
+ not_after:
+ description: Date after which the certificate is not valid
+ returned: always
+ sample: '2019-01-01T00:00:00+10:00'
+ type: str
+ not_before:
+ description: Date before which the certificate is not valid
+ returned: always
+ sample: '2017-01-01T00:00:00+10:00'
+ type: str
+ renewal_summary:
+ description: Information about managed renewal process
+ returned: when certificate is issued by Amazon and a renewal has been started
+ type: complex
+ contains:
+ domain_validation_options:
+ description: Options used by ACM to validate the certificate
+ returned: when certificate type is AMAZON_ISSUED
+ type: complex
+ contains:
+ domain_name:
+ description: Fully qualified domain name of the certificate
+ returned: always
+ sample: example.com
+ type: str
+ validation_domain:
+ description: The domain name ACM used to send validation emails
+ returned: always
+ sample: example.com
+ type: str
+ validation_emails:
+ description: A list of email addresses that ACM used to send domain validation emails
+ returned: always
+ sample:
+ - admin@example.com
+ - postmaster@example.com
+ type: list
+ elements: str
+ validation_status:
+ description: Validation status of the domain
+ returned: always
+ sample: SUCCESS
+ type: str
+ renewal_status:
+ description: Status of the domain renewal
+ returned: always
+ sample: PENDING_AUTO_RENEWAL
+ type: str
+ revocation_reason:
+ description: Reason for certificate revocation
+ returned: when the certificate has been revoked
+ sample: SUPERCEDED
+ type: str
+ revoked_at:
+ description: Date certificate was revoked
+ returned: when the certificate has been revoked
+ sample: '2017-09-01T10:00:00+10:00'
+ type: str
+ serial:
+ description: The serial number of the certificate
+ returned: always
+ sample: 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f
+ type: str
+ signature_algorithm:
+ description: Algorithm used to sign the certificate
+ returned: always
+ sample: SHA256WITHRSA
+ type: str
+ status:
+ description: Status of the certificate in ACM
+ returned: always
+ sample: ISSUED
+ type: str
+ subject:
+ description: The name of the entity that is associated with the public key contained in the certificate
+ returned: always
+ sample: CN=*.example.com
+ type: str
+ subject_alternative_names:
+ description: Subject Alternative Names for the certificate
+ returned: always
+ sample:
+ - '*.example.com'
+ type: list
+ elements: str
+ tags:
+ description: Tags associated with the certificate
+ returned: always
+ type: dict
+ sample:
+ Application: helloworld
+ Environment: test
+ type:
+ description: The source of the certificate
+ returned: always
+ sample: AMAZON_ISSUED
+ type: str
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.acm import ACMServiceManager
+
+
+def main():
+ argument_spec = dict(
+ certificate_arn=dict(aliases=['arn']),
+ domain_name=dict(aliases=['name']),
+ statuses=dict(
+ type='list',
+ elements='str',
+ choices=['PENDING_VALIDATION', 'ISSUED', 'INACTIVE', 'EXPIRED', 'VALIDATION_TIMED_OUT', 'REVOKED', 'FAILED']
+ ),
+ tags=dict(type='dict'),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ acm_info = ACMServiceManager(module)
+
+ client = module.client('acm')
+
+ certificates = acm_info.get_certificates(client, module,
+ domain_name=module.params['domain_name'],
+ statuses=module.params['statuses'],
+ arn=module.params['certificate_arn'],
+ only_tags=module.params['tags'])
+
+ if module.params['certificate_arn'] and len(certificates) != 1:
+ module.fail_json(msg="No certificate exists in this region with ARN %s" % module.params['certificate_arn'])
+
+ module.exit_json(certificates=certificates)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/api_gateway.py b/ansible_collections/community/aws/plugins/modules/api_gateway.py
new file mode 100644
index 000000000..a084bf93e
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/api_gateway.py
@@ -0,0 +1,368 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: api_gateway
+version_added: 1.0.0
+short_description: Manage AWS API Gateway APIs
+description:
+ - Allows for the management of API Gateway APIs.
+ - Normally you should give the api_id since there is no other
+ stable guaranteed unique identifier for the API. If you do
+ not give api_id then a new API will be created each time
+ this is run.
+ - swagger_file and swagger_text are passed directly on to AWS
+ transparently whilst swagger_dict is an ansible dict which is
+ converted to JSON before the API definitions are uploaded.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_api_gateway).
+ The usage did not change.
+options:
+ api_id:
+ description:
+ - The ID of the API you want to manage.
+ type: str
+ state:
+ description: Create or delete API Gateway.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ swagger_file:
+ description:
+ - JSON or YAML file containing swagger definitions for API.
+ Exactly one of I(swagger_file), I(swagger_text) or I(swagger_dict) must
+ be present.
+ type: path
+ aliases: ['src', 'api_file']
+ swagger_text:
+ description:
+ - Swagger definitions for API in JSON or YAML as a string direct
+ from playbook.
+ type: str
+ swagger_dict:
+ description:
+ - Swagger definitions API ansible dictionary which will be
+ converted to JSON and uploaded.
+ type: json
+ stage:
+ description:
+ - The name of the stage the API should be deployed to.
+ type: str
+ deploy_desc:
+ description:
+ - Description of the deployment.
+ - Recorded and visible in the AWS console.
+ default: Automatic deployment by Ansible.
+ type: str
+ cache_enabled:
+ description:
+ - Enable API GW caching of backend responses.
+ type: bool
+ default: false
+ cache_size:
+ description:
+ - Size in GB of the API GW cache, becomes effective when cache_enabled is true.
+ choices: ['0.5', '1.6', '6.1', '13.5', '28.4', '58.2', '118', '237']
+ type: str
+ default: '0.5'
+ stage_variables:
+ description:
+ - ENV variables for the stage. Define a dict of key values pairs for variables.
+ type: dict
+ default: {}
+ stage_canary_settings:
+ description:
+ - Canary settings for the deployment of the stage.
+ - 'Dict with following settings:'
+ - 'C(percentTraffic): The percent (0-100) of traffic diverted to a canary deployment.'
+ - 'C(deploymentId): The ID of the canary deployment.'
+ - 'C(stageVariableOverrides): Stage variables overridden for a canary release deployment.'
+ - 'C(useStageCache): A Boolean flag to indicate whether the canary deployment uses the stage cache or not.'
+ - See docs U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/apigateway.html#APIGateway.Client.create_stage)
+ type: dict
+ default: {}
+ tracing_enabled:
+ description:
+ - Specifies whether active tracing with X-ray is enabled for the API GW stage.
+ type: bool
+ default: false
+ endpoint_type:
+ description:
+ - Type of endpoint configuration.
+ - Use C(EDGE) for an edge optimized API endpoint,
+ C(REGIONAL) for just a regional deploy or C(PRIVATE) for a private API.
+ - This flag will only be used when creating a new API Gateway setup, not for updates.
+ choices: ['EDGE', 'REGIONAL', 'PRIVATE']
+ type: str
+ default: EDGE
+author:
+ - 'Michael De La Rue (@mikedlr)'
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+
+notes:
+ - A future version of this module will probably use tags or another
+ ID so that an API can be created only once.
+ - As an early work around an intermediate version will probably do
+ the same using a tag embedded in the API name.
+'''
+
+EXAMPLES = '''
+- name: Setup AWS API Gateway setup on AWS and deploy API definition
+ community.aws.api_gateway:
+ swagger_file: my_api.yml
+ stage: production
+ cache_enabled: true
+ cache_size: '1.6'
+ tracing_enabled: true
+ endpoint_type: EDGE
+ state: present
+
+- name: Update API definition to deploy new version
+ community.aws.api_gateway:
+ api_id: 'abc123321cba'
+ swagger_file: my_api.yml
+ deploy_desc: Make auth fix available.
+ cache_enabled: true
+ cache_size: '1.6'
+ endpoint_type: EDGE
+ state: present
+
+- name: Update API definitions and settings and deploy as canary
+ community.aws.api_gateway:
+ api_id: 'abc123321cba'
+ swagger_file: my_api.yml
+ cache_enabled: true
+ cache_size: '6.1'
+ canary_settings: { percentTraffic: 50.0, deploymentId: '123', useStageCache: True }
+ state: present
+'''
+
+RETURN = '''
+api_id:
+ description: API id of the API endpoint created
+ returned: success
+ type: str
+ sample: '0ln4zq7p86'
+configure_response:
+ description: AWS response from the API configure call
+ returned: success
+ type: dict
+ sample: { api_key_source: "HEADER", created_at: "2020-01-01T11:37:59+00:00", id: "0ln4zq7p86" }
+deploy_response:
+ description: AWS response from the API deploy call
+ returned: success
+ type: dict
+ sample: { created_date: "2020-01-01T11:36:59+00:00", id: "rptv4b", description: "Automatic deployment by Ansible." }
+resource_actions:
+ description: Actions performed against AWS API
+ returned: always
+ type: list
+ sample: ["apigateway:CreateRestApi", "apigateway:CreateDeployment", "apigateway:PutRestApi"]
+'''
+
+import json
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+def main():
+ argument_spec = dict(
+ api_id=dict(type='str', required=False),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ swagger_file=dict(type='path', default=None, aliases=['src', 'api_file']),
+ swagger_dict=dict(type='json', default=None),
+ swagger_text=dict(type='str', default=None),
+ stage=dict(type='str', default=None),
+ deploy_desc=dict(type='str', default="Automatic deployment by Ansible."),
+ cache_enabled=dict(type='bool', default=False),
+ cache_size=dict(type='str', default='0.5', choices=['0.5', '1.6', '6.1', '13.5', '28.4', '58.2', '118', '237']),
+ stage_variables=dict(type='dict', default={}),
+ stage_canary_settings=dict(type='dict', default={}),
+ tracing_enabled=dict(type='bool', default=False),
+ endpoint_type=dict(type='str', default='EDGE', choices=['EDGE', 'REGIONAL', 'PRIVATE'])
+ )
+
+ mutually_exclusive = [['swagger_file', 'swagger_dict', 'swagger_text']] # noqa: F841
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=False,
+ mutually_exclusive=mutually_exclusive,
+ )
+
+ api_id = module.params.get('api_id')
+ state = module.params.get('state') # noqa: F841
+ swagger_file = module.params.get('swagger_file')
+ swagger_dict = module.params.get('swagger_dict')
+ swagger_text = module.params.get('swagger_text')
+ endpoint_type = module.params.get('endpoint_type')
+
+ client = module.client('apigateway')
+
+ changed = True # for now it will stay that way until we can sometimes avoid change
+ conf_res = None
+ dep_res = None
+ del_res = None
+
+ if state == "present":
+ if api_id is None:
+ api_id = create_empty_api(module, client, endpoint_type)
+ api_data = get_api_definitions(module, swagger_file=swagger_file,
+ swagger_dict=swagger_dict, swagger_text=swagger_text)
+ conf_res, dep_res = ensure_api_in_correct_state(module, client, api_id, api_data)
+ if state == "absent":
+ del_res = delete_rest_api(module, client, api_id)
+
+ exit_args = {"changed": changed, "api_id": api_id}
+
+ if conf_res is not None:
+ exit_args['configure_response'] = camel_dict_to_snake_dict(conf_res)
+ if dep_res is not None:
+ exit_args['deploy_response'] = camel_dict_to_snake_dict(dep_res)
+ if del_res is not None:
+ exit_args['delete_response'] = camel_dict_to_snake_dict(del_res)
+
+ module.exit_json(**exit_args)
+
+
+def get_api_definitions(module, swagger_file=None, swagger_dict=None, swagger_text=None):
+ apidata = None
+ if swagger_file is not None:
+ try:
+ with open(swagger_file) as f:
+ apidata = f.read()
+ except OSError as e:
+ msg = "Failed trying to read swagger file {0}: {1}".format(str(swagger_file), str(e))
+ module.fail_json(msg=msg, exception=traceback.format_exc())
+ if swagger_dict is not None:
+ apidata = json.dumps(swagger_dict)
+ if swagger_text is not None:
+ apidata = swagger_text
+
+ if apidata is None:
+ module.fail_json(msg='module error - no swagger info provided')
+ return apidata
+
+
+def create_empty_api(module, client, endpoint_type):
+ """
+ creates a new empty API ready to be configured. The description is
+ temporarily set to show the API as incomplete but should be
+ updated when the API is configured.
+ """
+ desc = "Incomplete API creation by ansible api_gateway module"
+ try:
+ awsret = create_api(client, name="ansible-temp-api", description=desc, endpoint_type=endpoint_type)
+ except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
+ module.fail_json_aws(e, msg="creating API")
+ return awsret["id"]
+
+
+def delete_rest_api(module, client, api_id):
+ """
+ Deletes entire REST API setup
+ """
+ try:
+ delete_response = delete_api(client, api_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
+ module.fail_json_aws(e, msg="deleting API {0}".format(api_id))
+ return delete_response
+
+
+def ensure_api_in_correct_state(module, client, api_id, api_data):
+ """Make sure that we have the API configured and deployed as instructed.
+
+ This function first configures the API correctly uploading the
+ swagger definitions and then deploys those. Configuration and
+ deployment should be closely tied because there is only one set of
+ definitions so if we stop, they may be updated by someone else and
+ then we deploy the wrong configuration.
+ """
+
+ configure_response = None
+ try:
+ configure_response = configure_api(client, api_id, api_data=api_data)
+ except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
+ module.fail_json_aws(e, msg="configuring API {0}".format(api_id))
+
+ deploy_response = None
+
+ stage = module.params.get('stage')
+ if stage:
+ try:
+ deploy_response = create_deployment(client, api_id, **module.params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
+ msg = "deploying api {0} to stage {1}".format(api_id, stage)
+ module.fail_json_aws(e, msg)
+
+ return configure_response, deploy_response
+
+
+retry_params = {"retries": 10, "delay": 10, "catch_extra_error_codes": ['TooManyRequestsException']}
+
+
+@AWSRetry.jittered_backoff(**retry_params)
+def create_api(client, name=None, description=None, endpoint_type=None):
+ return client.create_rest_api(name="ansible-temp-api", description=description, endpointConfiguration={'types': [endpoint_type]})
+
+
+@AWSRetry.jittered_backoff(**retry_params)
+def delete_api(client, api_id):
+ return client.delete_rest_api(restApiId=api_id)
+
+
+@AWSRetry.jittered_backoff(**retry_params)
+def configure_api(client, api_id, api_data=None, mode="overwrite"):
+ return client.put_rest_api(restApiId=api_id, mode=mode, body=api_data)
+
+
+@AWSRetry.jittered_backoff(**retry_params)
+def create_deployment(client, rest_api_id, **params):
+ canary_settings = params.get('stage_canary_settings')
+
+ if canary_settings and len(canary_settings) > 0:
+ result = client.create_deployment(
+ restApiId=rest_api_id,
+ stageName=params.get('stage'),
+ description=params.get('deploy_desc'),
+ cacheClusterEnabled=params.get('cache_enabled'),
+ cacheClusterSize=params.get('cache_size'),
+ variables=params.get('stage_variables'),
+ canarySettings=canary_settings,
+ tracingEnabled=params.get('tracing_enabled')
+ )
+ else:
+ result = client.create_deployment(
+ restApiId=rest_api_id,
+ stageName=params.get('stage'),
+ description=params.get('deploy_desc'),
+ cacheClusterEnabled=params.get('cache_enabled'),
+ cacheClusterSize=params.get('cache_size'),
+ variables=params.get('stage_variables'),
+ tracingEnabled=params.get('tracing_enabled')
+ )
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/api_gateway_domain.py b/ansible_collections/community/aws/plugins/modules/api_gateway_domain.py
new file mode 100644
index 000000000..9b4ec8572
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/api_gateway_domain.py
@@ -0,0 +1,336 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: api_gateway_domain
+short_description: Manage AWS API Gateway custom domains
+description:
+ - Manages API Gateway custom domains for API GW Rest APIs.
+ - AWS API Gateway custom domain setups use CloudFront behind the scenes.
+ So you will get a CloudFront distribution as a result, configured to be aliased with your domain.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_api_gateway_domain).
+ The usage did not change.
+version_added: '3.3.0'
+author:
+ - 'Stefan Horning (@stefanhorning)'
+options:
+ domain_name:
+ description:
+ - Domain name you want to use for your API GW deployment.
+ required: true
+ type: str
+ certificate_arn:
+ description:
+ - AWS Certificate Manger (ACM) TLS certificate ARN.
+ type: str
+ required: true
+ security_policy:
+ description:
+ - Set allowed TLS versions through AWS defined policies. Currently only C(TLS_1_0) and C(TLS_1_2) are available.
+ default: TLS_1_2
+ choices: ['TLS_1_0', 'TLS_1_2']
+ type: str
+ endpoint_type:
+ description:
+ - API endpoint configuration for domain. Use EDGE for edge-optimized endpoint, or use C(REGIONAL) or C(PRIVATE).
+ default: EDGE
+ choices: ['EDGE', 'REGIONAL', 'PRIVATE']
+ type: str
+ domain_mappings:
+ description:
+ - Map your domain base paths to your API GW REST APIs, that you previously created. Use provided ID of the API setup and the release stage.
+ - "domain_mappings should be a list of dictionaries containing three keys: base_path, rest_api_id and stage."
+ - "Example: I([{ base_path: v1, rest_api_id: abc123, stage: production }])"
+ - if you want base path to be just I(/) omit the param completely or set it to empty string.
+ required: true
+ type: list
+ elements: dict
+ state:
+ description:
+ - Create or delete custom domain setup.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+notes:
+ - Does not create a DNS entry on Route53, for that use the M(community.aws.route53) module.
+ - Only supports TLS certificates from AWS ACM that can just be referenced by the ARN, while the AWS API still offers (deprecated)
+ options to add own Certificates.
+'''
+
+EXAMPLES = '''
+- name: Setup endpoint for a custom domain for your API Gateway HTTP API
+ community.aws.api_gateway_domain:
+ domain_name: myapi.foobar.com
+ certificate_arn: 'arn:aws:acm:us-east-1:1231123123:certificate/8bd89412-abc123-xxxxx'
+ security_policy: TLS_1_2
+ endpoint_type: EDGE
+ domain_mappings:
+ - { rest_api_id: abc123, stage: production }
+ state: present
+ register: api_gw_domain_result
+
+- name: Create a DNS record for your custom domain on route 53 (using route53 module)
+ community.aws.route53:
+ record: myapi.foobar.com
+ value: "{{ api_gw_domain_result.response.domain.distribution_domain_name }}"
+ type: A
+ alias: true
+ zone: foobar.com
+ alias_hosted_zone_id: "{{ api_gw_domain_result.response.domain.distribution_hosted_zone_id }}"
+ command: create
+'''
+
+RETURN = '''
+response:
+ description: The data returned by create_domain_name (or update and delete) and create_base_path_mapping methods by boto3.
+ returned: success
+ type: dict
+ sample:
+ domain:
+ {
+ domain_name: mydomain.com,
+ certificate_arn: 'arn:aws:acm:xxxxxx',
+ distribution_domain_name: xxxx.cloudfront.net,
+ distribution_hosted_zone_id: ABC123123,
+ endpoint_configuration: { types: ['EDGE'] },
+ domain_name_status: 'AVAILABLE',
+ security_policy: TLS_1_2,
+ tags: {}
+ }
+ path_mappings: [
+ { base_path: '(empty)', rest_api_id: 'abcd123', stage: 'production' }
+ ]
+'''
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError, EndpointConnectionError
+except ImportError:
+ pass # caught by imported AnsibleAWSModule
+
+import copy
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict, snake_dict_to_camel_dict
+
+
+def get_domain(module, client):
+ domain_name = module.params.get('domain_name')
+ result = {}
+ try:
+ result['domain'] = get_domain_name(client, domain_name)
+ result['path_mappings'] = get_domain_mappings(client, domain_name)
+ except is_boto3_error_code('NotFoundException'):
+ return None
+ except (ClientError, BotoCoreError, EndpointConnectionError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="getting API GW domain")
+ return camel_dict_to_snake_dict(result)
+
+
+def create_domain(module, client):
+ path_mappings = module.params.get('domain_mappings', [])
+ domain_name = module.params.get('domain_name')
+ result = {'domain': {}, 'path_mappings': []}
+
+ try:
+ result['domain'] = create_domain_name(
+ module,
+ client,
+ domain_name,
+ module.params.get('certificate_arn'),
+ module.params.get('endpoint_type'),
+ module.params.get('security_policy')
+ )
+
+ for mapping in path_mappings:
+ base_path = mapping.get('base_path', '')
+ rest_api_id = mapping.get('rest_api_id')
+ stage = mapping.get('stage')
+ if rest_api_id is None or stage is None:
+ module.fail_json('Every domain mapping needs a rest_api_id and stage name')
+
+ result['path_mappings'].append(add_domain_mapping(client, domain_name, base_path, rest_api_id, stage))
+
+ except (ClientError, BotoCoreError, EndpointConnectionError) as e:
+ module.fail_json_aws(e, msg="creating API GW domain")
+ return camel_dict_to_snake_dict(result)
+
+
+def update_domain(module, client, existing_domain):
+ domain_name = module.params.get('domain_name')
+ result = existing_domain
+ result['updated'] = False
+
+ domain = existing_domain.get('domain')
+ # Compare only relevant set of domain arguments.
+ # As get_domain_name gathers all kind of state information that can't be set anyways.
+ # Also this module doesn't support custom TLS cert setup params as they are kind of deprecated already and would increase complexity.
+ existing_domain_settings = {
+ 'certificate_arn': domain.get('certificate_arn'),
+ 'security_policy': domain.get('security_policy'),
+ 'endpoint_type': domain.get('endpoint_configuration').get('types')[0]
+ }
+ specified_domain_settings = {
+ 'certificate_arn': module.params.get('certificate_arn'),
+ 'security_policy': module.params.get('security_policy'),
+ 'endpoint_type': module.params.get('endpoint_type')
+ }
+
+ if specified_domain_settings != existing_domain_settings:
+ try:
+ result['domain'] = update_domain_name(client, domain_name, **snake_dict_to_camel_dict(specified_domain_settings))
+ result['updated'] = True
+ except (ClientError, BotoCoreError, EndpointConnectionError) as e:
+ module.fail_json_aws(e, msg="updating API GW domain")
+
+ existing_mappings = copy.deepcopy(existing_domain.get('path_mappings', []))
+ # Cleanout `base_path: "(none)"` elements from dicts as those won't match with specified mappings
+ for mapping in existing_mappings:
+ if mapping.get('base_path', 'missing') == '(none)':
+ mapping.pop('base_path')
+
+ specified_mappings = copy.deepcopy(module.params.get('domain_mappings', []))
+ # Cleanout `base_path: ""` elements from dicts as those won't match with existing mappings
+ for mapping in specified_mappings:
+ if mapping.get('base_path', 'missing') == '':
+ mapping.pop('base_path')
+
+ if specified_mappings != existing_mappings:
+ try:
+ # When lists missmatch delete all existing mappings before adding new ones as specified
+ for mapping in existing_domain.get('path_mappings', []):
+ delete_domain_mapping(client, domain_name, mapping['base_path'])
+ for mapping in module.params.get('domain_mappings', []):
+ result['path_mappings'] = add_domain_mapping(
+ client, domain_name, mapping.get('base_path', ''), mapping.get('rest_api_id'), mapping.get('stage')
+ )
+ result['updated'] = True
+ except (ClientError, BotoCoreError, EndpointConnectionError) as e:
+ module.fail_json_aws(e, msg="updating API GW domain mapping")
+
+ return camel_dict_to_snake_dict(result)
+
+
+def delete_domain(module, client):
+ domain_name = module.params.get('domain_name')
+ try:
+ result = delete_domain_name(client, domain_name)
+ except (ClientError, BotoCoreError, EndpointConnectionError) as e:
+ module.fail_json_aws(e, msg="deleting API GW domain")
+ return camel_dict_to_snake_dict(result)
+
+
+retry_params = {"delay": 5, "backoff": 1.2}
+
+
+@AWSRetry.jittered_backoff(**retry_params)
+def get_domain_name(client, domain_name):
+ return client.get_domain_name(domainName=domain_name)
+
+
+@AWSRetry.jittered_backoff(**retry_params)
+def get_domain_mappings(client, domain_name):
+ return client.get_base_path_mappings(domainName=domain_name, limit=200).get('items', [])
+
+
+@AWSRetry.jittered_backoff(**retry_params)
+def create_domain_name(module, client, domain_name, certificate_arn, endpoint_type, security_policy):
+ endpoint_configuration = {'types': [endpoint_type]}
+
+ if endpoint_type == 'EDGE':
+ return client.create_domain_name(
+ domainName=domain_name,
+ certificateArn=certificate_arn,
+ endpointConfiguration=endpoint_configuration,
+ securityPolicy=security_policy
+ )
+ else:
+ # Use regionalCertificateArn for regional domain deploys
+ return client.create_domain_name(
+ domainName=domain_name,
+ regionalCertificateArn=certificate_arn,
+ endpointConfiguration=endpoint_configuration,
+ securityPolicy=security_policy
+ )
+
+
+@AWSRetry.jittered_backoff(**retry_params)
+def add_domain_mapping(client, domain_name, base_path, rest_api_id, stage):
+ return client.create_base_path_mapping(domainName=domain_name, basePath=base_path, restApiId=rest_api_id, stage=stage)
+
+
+@AWSRetry.jittered_backoff(**retry_params)
+def update_domain_name(client, domain_name, **kwargs):
+ patch_operations = []
+
+ for key, value in kwargs.items():
+ path = "/" + key
+ if key == "endpointType":
+ continue
+ patch_operations.append({"op": "replace", "path": path, "value": value})
+
+ return client.update_domain_name(domainName=domain_name, patchOperations=patch_operations)
+
+
+@AWSRetry.jittered_backoff(**retry_params)
+def delete_domain_name(client, domain_name):
+ return client.delete_domain_name(domainName=domain_name)
+
+
+@AWSRetry.jittered_backoff(**retry_params)
+def delete_domain_mapping(client, domain_name, base_path):
+ return client.delete_base_path_mapping(domainName=domain_name, basePath=base_path)
+
+
+def main():
+ argument_spec = dict(
+ domain_name=dict(type='str', required=True),
+ certificate_arn=dict(type='str', required=True),
+ security_policy=dict(type='str', default='TLS_1_2', choices=['TLS_1_0', 'TLS_1_2']),
+ endpoint_type=dict(type='str', default='EDGE', choices=['EDGE', 'REGIONAL', 'PRIVATE']),
+ domain_mappings=dict(type='list', required=True, elements='dict'),
+ state=dict(type='str', default='present', choices=['present', 'absent'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=False
+ )
+
+ client = module.client('apigateway')
+
+ state = module.params.get('state')
+ changed = False
+
+ if state == "present":
+ existing_domain = get_domain(module, client)
+ if existing_domain is not None:
+ result = update_domain(module, client, existing_domain)
+ changed = result['updated']
+ else:
+ result = create_domain(module, client)
+ changed = True
+ if state == "absent":
+ result = delete_domain(module, client)
+ changed = True
+
+ exit_args = {"changed": changed}
+
+ if result is not None:
+ exit_args['response'] = result
+
+ module.exit_json(**exit_args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/application_autoscaling_policy.py b/ansible_collections/community/aws/plugins/modules/application_autoscaling_policy.py
new file mode 100644
index 000000000..d20c107de
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/application_autoscaling_policy.py
@@ -0,0 +1,539 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: application_autoscaling_policy
+version_added: 1.0.0
+short_description: Manage Application Auto Scaling Scaling Policies
+notes:
+ - For more details of the parameters and returns see
+ U(http://boto3.readthedocs.io/en/latest/reference/services/application-autoscaling.html#ApplicationAutoScaling.Client.put_scaling_policy)
+description:
+ - Creates, updates or removes a Scaling Policy.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_application_scaling_policy).
+ The usage did not change.
+author:
+ - Gustavo Maia (@gurumaia)
+ - Chen Leibovich (@chenl87)
+options:
+ state:
+ description: Whether a policy should be C(present) or C(absent).
+ required: true
+ choices: ['absent', 'present']
+ type: str
+ policy_name:
+ description: The name of the scaling policy.
+ required: true
+ type: str
+ service_namespace:
+ description: The namespace of the AWS service.
+ required: true
+ choices: ['ecs', 'elasticmapreduce', 'ec2', 'appstream', 'dynamodb']
+ type: str
+ resource_id:
+ description: The identifier of the resource associated with the scalable target.
+ required: true
+ type: str
+ scalable_dimension:
+ description: The scalable dimension associated with the scalable target.
+ required: true
+ choices: [ 'ecs:service:DesiredCount',
+ 'ec2:spot-fleet-request:TargetCapacity',
+ 'elasticmapreduce:instancegroup:InstanceCount',
+ 'appstream:fleet:DesiredCapacity',
+ 'dynamodb:table:ReadCapacityUnits',
+ 'dynamodb:table:WriteCapacityUnits',
+ 'dynamodb:index:ReadCapacityUnits',
+ 'dynamodb:index:WriteCapacityUnits']
+ type: str
+ policy_type:
+ description: The policy type.
+ required: true
+ choices: ['StepScaling', 'TargetTrackingScaling']
+ type: str
+ step_scaling_policy_configuration:
+ description: A step scaling policy. This parameter is required if you are creating a policy and I(policy_type=StepScaling).
+ required: false
+ type: dict
+ target_tracking_scaling_policy_configuration:
+ description:
+ - A target tracking policy. This parameter is required if you are creating a new policy and I(policy_type=TargetTrackingScaling).
+ - 'Full documentation of the suboptions can be found in the API documentation:'
+ - 'U(https://docs.aws.amazon.com/autoscaling/application/APIReference/API_TargetTrackingScalingPolicyConfiguration.html)'
+ required: false
+ type: dict
+ suboptions:
+ CustomizedMetricSpecification:
+ description: The metric to use if using a customized metric.
+ type: dict
+ DisableScaleIn:
+ description: Whether scaling-in should be disabled.
+ type: bool
+ PredefinedMetricSpecification:
+ description: The metric to use if using a predefined metric.
+ type: dict
+ ScaleInCooldown:
+ description: The time (in seconds) to wait after scaling-in before another scaling action can occur.
+ type: int
+ ScaleOutCooldown:
+ description: The time (in seconds) to wait after scaling-out before another scaling action can occur.
+ type: int
+ TargetValue:
+ description: The target value for the metric.
+ type: float
+ minimum_tasks:
+ description: The minimum value to scale to in response to a scale in event.
+ This parameter is required if you are creating a first new policy for the specified service.
+ required: false
+ type: int
+ maximum_tasks:
+ description: The maximum value to scale to in response to a scale out event.
+ This parameter is required if you are creating a first new policy for the specified service.
+ required: false
+ type: int
+ override_task_capacity:
+ description:
+ - Whether or not to override values of minimum and/or maximum tasks if it's already set.
+ - Defaults to C(false).
+ required: false
+ type: bool
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create step scaling policy for ECS Service
+- name: scaling_policy
+ community.aws.application_autoscaling_policy:
+ state: present
+ policy_name: test_policy
+ service_namespace: ecs
+ resource_id: service/poc-pricing/test-as
+ scalable_dimension: ecs:service:DesiredCount
+ policy_type: StepScaling
+ minimum_tasks: 1
+ maximum_tasks: 6
+ step_scaling_policy_configuration:
+ AdjustmentType: ChangeInCapacity
+ StepAdjustments:
+ - MetricIntervalUpperBound: 123
+ ScalingAdjustment: 2
+ - MetricIntervalLowerBound: 123
+ ScalingAdjustment: -2
+ Cooldown: 123
+ MetricAggregationType: Average
+
+# Create target tracking scaling policy for ECS Service
+- name: scaling_policy
+ community.aws.application_autoscaling_policy:
+ state: present
+ policy_name: test_policy
+ service_namespace: ecs
+ resource_id: service/poc-pricing/test-as
+ scalable_dimension: ecs:service:DesiredCount
+ policy_type: TargetTrackingScaling
+ minimum_tasks: 1
+ maximum_tasks: 6
+ target_tracking_scaling_policy_configuration:
+ TargetValue: 60
+ PredefinedMetricSpecification:
+ PredefinedMetricType: ECSServiceAverageCPUUtilization
+ ScaleOutCooldown: 60
+ ScaleInCooldown: 60
+
+# Remove scalable target for ECS Service
+- name: scaling_policy
+ community.aws.application_autoscaling_policy:
+ state: absent
+ policy_name: test_policy
+ policy_type: StepScaling
+ service_namespace: ecs
+ resource_id: service/cluster-name/service-name
+ scalable_dimension: ecs:service:DesiredCount
+'''
+
+RETURN = '''
+alarms:
+ description: List of the CloudWatch alarms associated with the scaling policy
+ returned: when state present
+ type: complex
+ contains:
+ alarm_arn:
+ description: The Amazon Resource Name (ARN) of the alarm
+ returned: when state present
+ type: str
+ alarm_name:
+ description: The name of the alarm
+ returned: when state present
+ type: str
+service_namespace:
+ description: The namespace of the AWS service.
+ returned: when state present
+ type: str
+ sample: ecs
+resource_id:
+ description: The identifier of the resource associated with the scalable target.
+ returned: when state present
+ type: str
+ sample: service/cluster-name/service-name
+scalable_dimension:
+ description: The scalable dimension associated with the scalable target.
+ returned: when state present
+ type: str
+ sample: ecs:service:DesiredCount
+policy_arn:
+ description: The Amazon Resource Name (ARN) of the scaling policy..
+ returned: when state present
+ type: str
+policy_name:
+ description: The name of the scaling policy.
+ returned: when state present
+ type: str
+policy_type:
+ description: The policy type.
+ returned: when state present
+ type: str
+min_capacity:
+ description: The minimum value to scale to in response to a scale in event. Required if I(state) is C(present).
+ returned: when state present
+ type: int
+ sample: 1
+max_capacity:
+ description: The maximum value to scale to in response to a scale out event. Required if I(state) is C(present).
+ returned: when state present
+ type: int
+ sample: 2
+role_arn:
+ description: The ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf. Required if I(state) is C(present).
+ returned: when state present
+ type: str
+ sample: arn:aws:iam::123456789012:role/roleName
+step_scaling_policy_configuration:
+ description: The step scaling policy.
+ returned: when state present and the policy type is StepScaling
+ type: complex
+ contains:
+ adjustment_type:
+ description: The adjustment type
+ returned: when state present and the policy type is StepScaling
+ type: str
+ sample: "ChangeInCapacity, PercentChangeInCapacity, ExactCapacity"
+ cooldown:
+ description: The amount of time, in seconds, after a scaling activity completes
+ where previous trigger-related scaling activities can influence future scaling events
+ returned: when state present and the policy type is StepScaling
+ type: int
+ sample: 60
+ metric_aggregation_type:
+ description: The aggregation type for the CloudWatch metrics
+ returned: when state present and the policy type is StepScaling
+ type: str
+ sample: "Average, Minimum, Maximum"
+ step_adjustments:
+ description: A set of adjustments that enable you to scale based on the size of the alarm breach
+ returned: when state present and the policy type is StepScaling
+ type: list
+ elements: dict
+target_tracking_scaling_policy_configuration:
+ description: The target tracking policy.
+ returned: when state present and the policy type is TargetTrackingScaling
+ type: complex
+ contains:
+ predefined_metric_specification:
+ description: A predefined metric
+ returned: when state present and the policy type is TargetTrackingScaling
+ type: complex
+ contains:
+ predefined_metric_type:
+ description: The metric type
+ returned: when state present and the policy type is TargetTrackingScaling
+ type: str
+ sample: "ECSServiceAverageCPUUtilization, ECSServiceAverageMemoryUtilization"
+ resource_label:
+ description: Identifies the resource associated with the metric type
+ returned: when metric type is ALBRequestCountPerTarget
+ type: str
+ scale_in_cooldown:
+ description: The amount of time, in seconds, after a scale in activity completes before another scale in activity can start
+ returned: when state present and the policy type is TargetTrackingScaling
+ type: int
+ sample: 60
+ scale_out_cooldown:
+ description: The amount of time, in seconds, after a scale out activity completes before another scale out activity can start
+ returned: when state present and the policy type is TargetTrackingScaling
+ type: int
+ sample: 60
+ target_value:
+ description: The target value for the metric
+ returned: when state present and the policy type is TargetTrackingScaling
+ type: int
+ sample: 70
+creation_time:
+ description: The Unix timestamp for when the scalable target was created.
+ returned: when state present
+ type: str
+ sample: '2017-09-28T08:22:51.881000-03:00'
+''' # NOQA
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import _camel_to_snake, camel_dict_to_snake_dict
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+
+# Merge the results of the scalable target creation and policy deletion/creation
+# There's no risk in overriding values since mutual keys have the same values in our case
+def merge_results(scalable_target_result, policy_result):
+ if scalable_target_result['changed'] or policy_result['changed']:
+ changed = True
+ else:
+ changed = False
+
+ merged_response = scalable_target_result['response'].copy()
+ merged_response.update(policy_result['response'])
+
+ return {"changed": changed, "response": merged_response}
+
+
+def delete_scaling_policy(connection, module):
+ changed = False
+ try:
+ scaling_policy = connection.describe_scaling_policies(
+ ServiceNamespace=module.params.get('service_namespace'),
+ ResourceId=module.params.get('resource_id'),
+ ScalableDimension=module.params.get('scalable_dimension'),
+ PolicyNames=[module.params.get('policy_name')],
+ MaxResults=1
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe scaling policies")
+
+ if scaling_policy['ScalingPolicies']:
+ try:
+ connection.delete_scaling_policy(
+ ServiceNamespace=module.params.get('service_namespace'),
+ ResourceId=module.params.get('resource_id'),
+ ScalableDimension=module.params.get('scalable_dimension'),
+ PolicyName=module.params.get('policy_name'),
+ )
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to delete scaling policy")
+
+ return {"changed": changed}
+
+
+def create_scalable_target(connection, module):
+ changed = False
+
+ try:
+ scalable_targets = connection.describe_scalable_targets(
+ ServiceNamespace=module.params.get('service_namespace'),
+ ResourceIds=[
+ module.params.get('resource_id'),
+ ],
+ ScalableDimension=module.params.get('scalable_dimension')
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe scalable targets")
+
+ # Scalable target registration will occur if:
+ # 1. There is no scalable target registered for this service
+ # 2. A scalable target exists, different min/max values are defined and override is set to "yes"
+ if (
+ not scalable_targets['ScalableTargets']
+ or (
+ module.params.get('override_task_capacity')
+ and (
+ scalable_targets['ScalableTargets'][0]['MinCapacity'] != module.params.get('minimum_tasks')
+ or scalable_targets['ScalableTargets'][0]['MaxCapacity'] != module.params.get('maximum_tasks')
+ )
+ )
+ ):
+ changed = True
+ try:
+ connection.register_scalable_target(
+ ServiceNamespace=module.params.get('service_namespace'),
+ ResourceId=module.params.get('resource_id'),
+ ScalableDimension=module.params.get('scalable_dimension'),
+ MinCapacity=module.params.get('minimum_tasks'),
+ MaxCapacity=module.params.get('maximum_tasks')
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to register scalable target")
+
+ try:
+ response = connection.describe_scalable_targets(
+ ServiceNamespace=module.params.get('service_namespace'),
+ ResourceIds=[
+ module.params.get('resource_id'),
+ ],
+ ScalableDimension=module.params.get('scalable_dimension')
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe scalable targets")
+
+ if (response['ScalableTargets']):
+ snaked_response = camel_dict_to_snake_dict(response['ScalableTargets'][0])
+ else:
+ snaked_response = {}
+
+ return {"changed": changed, "response": snaked_response}
+
+
+def create_scaling_policy(connection, module):
+ try:
+ scaling_policy = connection.describe_scaling_policies(
+ ServiceNamespace=module.params.get('service_namespace'),
+ ResourceId=module.params.get('resource_id'),
+ ScalableDimension=module.params.get('scalable_dimension'),
+ PolicyNames=[module.params.get('policy_name')],
+ MaxResults=1
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe scaling policies")
+
+ changed = False
+
+ if scaling_policy['ScalingPolicies']:
+ scaling_policy = scaling_policy['ScalingPolicies'][0]
+ # check if the input parameters are equal to what's already configured
+ for attr in ('PolicyName',
+ 'ServiceNamespace',
+ 'ResourceId',
+ 'ScalableDimension',
+ 'PolicyType',
+ 'StepScalingPolicyConfiguration',
+ 'TargetTrackingScalingPolicyConfiguration'):
+ if attr in scaling_policy and scaling_policy[attr] != module.params.get(_camel_to_snake(attr)):
+ changed = True
+ scaling_policy[attr] = module.params.get(_camel_to_snake(attr))
+ else:
+ changed = True
+ scaling_policy = {
+ 'PolicyName': module.params.get('policy_name'),
+ 'ServiceNamespace': module.params.get('service_namespace'),
+ 'ResourceId': module.params.get('resource_id'),
+ 'ScalableDimension': module.params.get('scalable_dimension'),
+ 'PolicyType': module.params.get('policy_type'),
+ 'StepScalingPolicyConfiguration': module.params.get('step_scaling_policy_configuration'),
+ 'TargetTrackingScalingPolicyConfiguration': module.params.get('target_tracking_scaling_policy_configuration')
+ }
+
+ if changed:
+ try:
+ if (module.params.get('step_scaling_policy_configuration')):
+ connection.put_scaling_policy(
+ PolicyName=scaling_policy['PolicyName'],
+ ServiceNamespace=scaling_policy['ServiceNamespace'],
+ ResourceId=scaling_policy['ResourceId'],
+ ScalableDimension=scaling_policy['ScalableDimension'],
+ PolicyType=scaling_policy['PolicyType'],
+ StepScalingPolicyConfiguration=scaling_policy['StepScalingPolicyConfiguration']
+ )
+ elif (module.params.get('target_tracking_scaling_policy_configuration')):
+ connection.put_scaling_policy(
+ PolicyName=scaling_policy['PolicyName'],
+ ServiceNamespace=scaling_policy['ServiceNamespace'],
+ ResourceId=scaling_policy['ResourceId'],
+ ScalableDimension=scaling_policy['ScalableDimension'],
+ PolicyType=scaling_policy['PolicyType'],
+ TargetTrackingScalingPolicyConfiguration=scaling_policy['TargetTrackingScalingPolicyConfiguration']
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to create scaling policy")
+
+ try:
+ response = connection.describe_scaling_policies(
+ ServiceNamespace=module.params.get('service_namespace'),
+ ResourceId=module.params.get('resource_id'),
+ ScalableDimension=module.params.get('scalable_dimension'),
+ PolicyNames=[module.params.get('policy_name')],
+ MaxResults=1
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe scaling policies")
+
+ if (response['ScalingPolicies']):
+ snaked_response = camel_dict_to_snake_dict(response['ScalingPolicies'][0])
+ else:
+ snaked_response = {}
+
+ return {"changed": changed, "response": snaked_response}
+
+
+def main():
+ argument_spec = dict(
+ state=dict(type='str', required=True, choices=['present', 'absent']),
+ policy_name=dict(type='str', required=True),
+ service_namespace=dict(type='str', required=True, choices=['appstream', 'dynamodb', 'ec2', 'ecs', 'elasticmapreduce']),
+ resource_id=dict(type='str', required=True),
+ scalable_dimension=dict(type='str',
+ required=True,
+ choices=['ecs:service:DesiredCount',
+ 'ec2:spot-fleet-request:TargetCapacity',
+ 'elasticmapreduce:instancegroup:InstanceCount',
+ 'appstream:fleet:DesiredCapacity',
+ 'dynamodb:table:ReadCapacityUnits',
+ 'dynamodb:table:WriteCapacityUnits',
+ 'dynamodb:index:ReadCapacityUnits',
+ 'dynamodb:index:WriteCapacityUnits']),
+ policy_type=dict(type='str', required=True, choices=['StepScaling', 'TargetTrackingScaling']),
+ step_scaling_policy_configuration=dict(type='dict'),
+ target_tracking_scaling_policy_configuration=dict(
+ type='dict',
+ options=dict(
+ CustomizedMetricSpecification=dict(type='dict'),
+ DisableScaleIn=dict(type='bool'),
+ PredefinedMetricSpecification=dict(type='dict'),
+ ScaleInCooldown=dict(type='int'),
+ ScaleOutCooldown=dict(type='int'),
+ TargetValue=dict(type='float'),
+ )
+ ),
+ minimum_tasks=dict(type='int'),
+ maximum_tasks=dict(type='int'),
+ override_task_capacity=dict(type='bool'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ connection = module.client('application-autoscaling')
+
+ # Remove any target_tracking_scaling_policy_configuration suboptions that are None
+ policy_config_options = [
+ 'CustomizedMetricSpecification', 'DisableScaleIn', 'PredefinedMetricSpecification', 'ScaleInCooldown', 'ScaleOutCooldown', 'TargetValue'
+ ]
+ if isinstance(module.params['target_tracking_scaling_policy_configuration'], dict):
+ for option in policy_config_options:
+ if module.params['target_tracking_scaling_policy_configuration'][option] is None:
+ module.params['target_tracking_scaling_policy_configuration'].pop(option)
+
+ if module.params.get("state") == 'present':
+ # A scalable target must be registered prior to creating a scaling policy
+ scalable_target_result = create_scalable_target(connection, module)
+ policy_result = create_scaling_policy(connection, module)
+ # Merge the results of the scalable target creation and policy deletion/creation
+ # There's no risk in overriding values since mutual keys have the same values in our case
+ merged_result = merge_results(scalable_target_result, policy_result)
+ module.exit_json(**merged_result)
+ else:
+ policy_result = delete_scaling_policy(connection, module)
+ module.exit_json(**policy_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_complete_lifecycle_action.py b/ansible_collections/community/aws/plugins/modules/autoscaling_complete_lifecycle_action.py
new file mode 100644
index 000000000..8f585a102
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/autoscaling_complete_lifecycle_action.py
@@ -0,0 +1,102 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: autoscaling_complete_lifecycle_action
+short_description: Completes the lifecycle action of an instance
+description:
+ - Used to complete the lifecycle action for the specified instance with the specified result.
+version_added: "4.1.0"
+author:
+ - Saleh Abbas (@salehabbas) <saleh.abbas@thetradedesk.com>
+options:
+ asg_name:
+ description:
+ - The name of the Auto Scaling Group which the instance belongs to.
+ type: str
+ required: true
+ lifecycle_hook_name:
+ description:
+ - The name of the lifecycle hook to complete.
+ type: str
+ required: true
+ lifecycle_action_result:
+ description:
+ - The action for the lifecycle hook to take.
+ choices: ['CONTINUE', 'ABANDON']
+ type: str
+ required: true
+ instance_id:
+ description:
+ - The ID of the instance.
+ type: str
+ required: true
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+# Complete the lifecycle action
+- aws_asg_complete_lifecycle_action:
+ asg_name: my-auto-scaling-group
+ lifecycle_hook_name: my-lifecycle-hook
+ lifecycle_action_result: CONTINUE
+ instance_id: i-123knm1l2312
+'''
+
+RETURN = '''
+---
+status:
+ description: How things went
+ returned: success
+ type: str
+ sample: ["OK"]
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+def main():
+ argument_spec = dict(
+ asg_name=dict(required=True, type='str'),
+ lifecycle_hook_name=dict(required=True, type='str'),
+ lifecycle_action_result=dict(required=True, type='str', choices=['CONTINUE', 'ABANDON']),
+ instance_id=dict(required=True, type='str')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ asg_name = module.params.get('asg_name')
+ lifecycle_hook_name = module.params.get('lifecycle_hook_name')
+ lifecycle_action_result = module.params.get('lifecycle_action_result')
+ instance_id = module.params.get('instance_id')
+
+ autoscaling = module.client('autoscaling')
+ try:
+ results = autoscaling.complete_lifecycle_action(
+ LifecycleHookName=lifecycle_hook_name,
+ AutoScalingGroupName=asg_name,
+ LifecycleActionResult=lifecycle_action_result,
+ InstanceId=instance_id
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to completes the lifecycle action")
+
+ module.exit_json(results=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh.py b/ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh.py
new file mode 100644
index 000000000..94c2bb38c
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: autoscaling_instance_refresh
+version_added: 3.2.0
+short_description: Start or cancel an EC2 Auto Scaling Group (ASG) instance refresh in AWS
+description:
+ - Start or cancel an EC2 Auto Scaling Group instance refresh in AWS.
+ - Can be used with M(community.aws.autoscaling_instance_refresh_info) to track the subsequent progress.
+ - Prior to release 5.0.0 this module was called C(community.aws.ec2_asg_instance_refresh).
+ The usage did not change.
+author:
+ - "Dan Khersonsky (@danquixote)"
+options:
+ state:
+ description:
+ - Desired state of the ASG.
+ type: str
+ required: true
+ choices: [ 'started', 'cancelled' ]
+ name:
+ description:
+ - The name of the auto scaling group you are searching for.
+ type: str
+ required: true
+ strategy:
+ description:
+ - The strategy to use for the instance refresh. The only valid value is C(Rolling).
+ - A rolling update is an update that is applied to all instances in an Auto Scaling group until all instances have been updated.
+ - A rolling update can fail due to failed health checks or if instances are on standby or are protected from scale in.
+ - If the rolling update process fails, any instances that were already replaced are not rolled back to their previous configuration.
+ type: str
+ default: 'Rolling'
+ preferences:
+ description:
+ - Set of preferences associated with the instance refresh request.
+ - If not provided, the default values are used.
+ - For I(min_healthy_percentage), the default value is C(90).
+ - For I(instance_warmup), the default is to use the value specified for the health check grace period for the Auto Scaling group.
+ - Can not be specified when I(state) is set to 'cancelled'.
+ required: false
+ suboptions:
+ min_healthy_percentage:
+ description:
+ - Total percent of capacity in ASG that must remain healthy during instance refresh to allow operation to continue.
+ - It is rounded up to the nearest integer.
+ type: int
+ default: 90
+ instance_warmup:
+ description:
+ - The number of seconds until a newly launched instance is configured and ready to use.
+ - During this time, Amazon EC2 Auto Scaling does not immediately move on to the next replacement.
+ - The default is to use the value for the health check grace period defined for the group.
+ type: int
+ type: dict
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Start a refresh
+ community.aws.autoscaling_instance_refresh:
+ name: some-asg
+ state: started
+
+- name: Cancel a refresh
+ community.aws.autoscaling_instance_refresh:
+ name: some-asg
+ state: cancelled
+
+- name: Start a refresh and pass preferences
+ community.aws.autoscaling_instance_refresh:
+ name: some-asg
+ state: started
+ preferences:
+ min_healthy_percentage: 91
+ instance_warmup: 60
+
+'''
+
+RETURN = '''
+---
+instance_refresh_id:
+ description: instance refresh id
+ returned: success
+ type: str
+ sample: "08b91cf7-8fa6-48af-b6a6-d227f40f1b9b"
+auto_scaling_group_name:
+ description: Name of autoscaling group
+ returned: success
+ type: str
+ sample: "public-webapp-production-1"
+status:
+ description:
+ - The current state of the group when DeleteAutoScalingGroup is in progress.
+ - The following are the possible statuses
+ - Pending -- The request was created, but the operation has not started.
+ - InProgress -- The operation is in progress.
+ - Successful -- The operation completed successfully.
+ - Failed -- The operation failed to complete. You can troubleshoot using the status reason and the scaling activities.
+ - Cancelling --
+ - An ongoing operation is being cancelled.
+ - Cancellation does not roll back any replacements that have already been completed,
+ - but it prevents new replacements from being started.
+ - Cancelled -- The operation is cancelled.
+ returned: success
+ type: str
+ sample: "Pending"
+start_time:
+ description: The date and time this ASG was created, in ISO 8601 format.
+ returned: success
+ type: str
+ sample: "2015-11-25T00:05:36.309Z"
+end_time:
+ description: The date and time this ASG was created, in ISO 8601 format.
+ returned: success
+ type: str
+ sample: "2015-11-25T00:05:36.309Z"
+percentage_complete:
+ description: the % of completeness
+ returned: success
+ type: int
+ sample: 100
+instances_to_update:
+ description: num. of instance to update
+ returned: success
+ type: int
+ sample: 5
+'''
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+
+def start_or_cancel_instance_refresh(conn, module):
+ """
+ Args:
+ conn (boto3.AutoScaling.Client): Valid Boto3 ASG client.
+ module: AnsibleAWSModule object
+
+ Returns:
+ {
+ "instance_refreshes": [
+ {
+ 'auto_scaling_group_name': 'ansible-test-hermes-63642726-asg',
+ 'instance_refresh_id': '6507a3e5-4950-4503-8978-e9f2636efc09',
+ 'instances_to_update': 1,
+ 'percentage_complete': 0,
+ "preferences": {
+ "instance_warmup": 60,
+ "min_healthy_percentage": 90,
+ "skip_matching": false
+ },
+ 'start_time': '2021-02-04T03:39:40+00:00',
+ 'status': 'Cancelling',
+ 'status_reason': 'Replacing instances before cancelling.',
+ }
+ ]
+ }
+ """
+
+ asg_state = module.params.get('state')
+ asg_name = module.params.get('name')
+ preferences = module.params.get('preferences')
+
+ args = {}
+ args['AutoScalingGroupName'] = asg_name
+ if asg_state == 'started':
+ args['Strategy'] = module.params.get('strategy')
+ if preferences:
+ if asg_state == 'cancelled':
+ module.fail_json(msg='can not pass preferences dict when canceling a refresh')
+ _prefs = scrub_none_parameters(preferences)
+ args['Preferences'] = snake_dict_to_camel_dict(_prefs, capitalize_first=True)
+ cmd_invocations = {
+ 'cancelled': conn.cancel_instance_refresh,
+ 'started': conn.start_instance_refresh,
+ }
+ try:
+ if module.check_mode:
+ if asg_state == 'started':
+ ongoing_refresh = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name).get('InstanceRefreshes', '[]')
+ if ongoing_refresh:
+ module.exit_json(changed=False, msg='In check_mode - Instance Refresh is already in progress, can not start new instance refresh.')
+ else:
+ module.exit_json(changed=True, msg='Would have started instance refresh if not in check mode.')
+ elif asg_state == 'cancelled':
+ ongoing_refresh = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name).get('InstanceRefreshes', '[]')[0]
+ if ongoing_refresh.get('Status', '') in ['Cancelling', 'Cancelled']:
+ module.exit_json(changed=False, msg='In check_mode - Instance Refresh already cancelled or is pending cancellation.')
+ elif not ongoing_refresh:
+ module.exit_json(chaned=False, msg='In check_mode - No active referesh found, nothing to cancel.')
+ else:
+ module.exit_json(changed=True, msg='Would have cancelled instance refresh if not in check mode.')
+ result = cmd_invocations[asg_state](aws_retry=True, **args)
+ instance_refreshes = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name, InstanceRefreshIds=[result['InstanceRefreshId']])
+ result = dict(
+ instance_refreshes=camel_dict_to_snake_dict(instance_refreshes['InstanceRefreshes'][0])
+ )
+ return module.exit_json(**result)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(
+ e,
+ msg='Failed to {0} InstanceRefresh'.format(
+ asg_state.replace('ed', '')
+ )
+ )
+
+
+def main():
+
+ argument_spec = dict(
+ state=dict(
+ type='str',
+ required=True,
+ choices=['started', 'cancelled'],
+ ),
+ name=dict(required=True),
+ strategy=dict(
+ type='str',
+ default='Rolling',
+ required=False
+ ),
+ preferences=dict(
+ type='dict',
+ required=False,
+ options=dict(
+ min_healthy_percentage=dict(type='int', default=90),
+ instance_warmup=dict(type='int'),
+ )
+ ),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ autoscaling = module.client(
+ 'autoscaling',
+ retry_decorator=AWSRetry.jittered_backoff(
+ retries=10,
+ catch_extra_error_codes=['InstanceRefreshInProgress']
+ )
+ )
+
+ start_or_cancel_instance_refresh(autoscaling, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh_info.py b/ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh_info.py
new file mode 100644
index 000000000..3037d0b52
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh_info.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: autoscaling_instance_refresh_info
+version_added: 3.2.0
+short_description: Gather information about EC2 Auto Scaling Group (ASG) Instance Refreshes in AWS
+description:
+ - Describes one or more instance refreshes.
+ - You can determine the status of a request by looking at the I(status) parameter.
+ - Prior to release 5.0.0 this module was called C(community.aws.ec2_asg_instance_refresh_info).
+ The usage did not change.
+author: "Dan Khersonsky (@danquixote)"
+options:
+ name:
+ description:
+ - The name of the Auto Scaling group.
+ type: str
+ required: true
+ ids:
+ description:
+ - One or more instance refresh IDs.
+ type: list
+ elements: str
+ default: []
+ next_token:
+ description:
+ - The token for the next set of items to return. (You received this token from a previous call.)
+ type: str
+ max_records:
+ description:
+ - The maximum number of items to return with this call. The default value is 50 and the maximum value is 100.
+ type: int
+ required: false
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Find an refresh by ASG name
+ community.aws.autoscaling_instance_refresh_info:
+ name: somename-asg
+
+- name: Find an refresh by ASG name and one or more refresh-IDs
+ community.aws.autoscaling_instance_refresh_info:
+ name: somename-asg
+ ids: ['some-id-123']
+ register: asgs
+
+- name: Find an refresh by ASG name and set max_records
+ community.aws.autoscaling_instance_refresh_info:
+ name: somename-asg
+ max_records: 4
+ register: asgs
+
+- name: Find an refresh by ASG name and NextToken, if received from a previous call
+ community.aws.autoscaling_instance_refresh_info:
+ name: somename-asg
+ next_token: 'some-token-123'
+ register: asgs
+'''
+
+RETURN = '''
+---
+instance_refresh_id:
+ description: instance refresh id
+ returned: success
+ type: str
+ sample: "08b91cf7-8fa6-48af-b6a6-d227f40f1b9b"
+auto_scaling_group_name:
+ description: Name of autoscaling group
+ returned: success
+ type: str
+ sample: "public-webapp-production-1"
+status:
+ description:
+ - The current state of the group when DeleteAutoScalingGroup is in progress.
+ - The following are the possible statuses
+ - C(Pending) - The request was created, but the operation has not started.
+ - C(InProgress) - The operation is in progress.
+ - C(Successful) - The operation completed successfully.
+ - C(Failed) - The operation failed to complete.
+ You can troubleshoot using the status reason and the scaling activities.
+ - C(Cancelling) - An ongoing operation is being cancelled.
+ Cancellation does not roll back any replacements that have already been
+ completed, but it prevents new replacements from being started.
+ - C(Cancelled) - The operation is cancelled.'
+ returned: success
+ type: str
+ sample: "Pending"
+start_time:
+ description: The date and time this ASG was created, in ISO 8601 format.
+ returned: success
+ type: str
+ sample: "2015-11-25T00:05:36.309Z"
+end_time:
+ description: The date and time this ASG was created, in ISO 8601 format.
+ returned: success
+ type: str
+ sample: "2015-11-25T00:05:36.309Z"
+percentage_complete:
+ description: the % of completeness
+ returned: success
+ type: int
+ sample: 100
+instances_to_update:
+ description: num. of instance to update
+ returned: success
+ type: int
+ sample: 5
+'''
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def find_asg_instance_refreshes(conn, module):
+ """
+ Args:
+ conn (boto3.AutoScaling.Client): Valid Boto3 ASG client.
+ module: AnsibleAWSModule object
+
+ Returns:
+ {
+ "instance_refreshes": [
+ {
+ 'auto_scaling_group_name': 'ansible-test-hermes-63642726-asg',
+ 'instance_refresh_id': '6507a3e5-4950-4503-8978-e9f2636efc09',
+ 'instances_to_update': 1,
+ 'percentage_complete': 0,
+ "preferences": {
+ "instance_warmup": 60,
+ "min_healthy_percentage": 90,
+ "skip_matching": false
+ },
+ 'start_time': '2021-02-04T03:39:40+00:00',
+ 'status': 'Cancelled',
+ 'status_reason': 'Cancelled due to user request.',
+ }
+ ],
+ 'next_token': 'string'
+ }
+ """
+
+ asg_name = module.params.get('name')
+ asg_ids = module.params.get('ids')
+ asg_next_token = module.params.get('next_token')
+ asg_max_records = module.params.get('max_records')
+
+ args = {}
+ args['AutoScalingGroupName'] = asg_name
+ if asg_ids:
+ args['InstanceRefreshIds'] = asg_ids
+ if asg_next_token:
+ args['NextToken'] = asg_next_token
+ if asg_max_records:
+ args['MaxRecords'] = asg_max_records
+
+ try:
+ instance_refreshes_result = {}
+ response = conn.describe_instance_refreshes(**args)
+ if 'InstanceRefreshes' in response:
+ instance_refreshes_dict = dict(
+ instance_refreshes=response['InstanceRefreshes'], next_token=response.get('next_token', ''))
+ instance_refreshes_result = camel_dict_to_snake_dict(
+ instance_refreshes_dict)
+
+ while 'NextToken' in response:
+ args['NextToken'] = response['NextToken']
+ response = conn.describe_instance_refreshes(**args)
+ if 'InstanceRefreshes' in response:
+ instance_refreshes_dict = camel_dict_to_snake_dict(dict(
+ instance_refreshes=response['InstanceRefreshes'], next_token=response.get('next_token', '')))
+ instance_refreshes_result.update(instance_refreshes_dict)
+
+ return module.exit_json(**instance_refreshes_result)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to describe InstanceRefreshes')
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(required=True, type='str'),
+ ids=dict(required=False, default=[], elements='str', type='list'),
+ next_token=dict(required=False, default=None, type='str', no_log=True),
+ max_records=dict(required=False, type='int'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ autoscaling = module.client(
+ 'autoscaling',
+ retry_decorator=AWSRetry.jittered_backoff(retries=10)
+ )
+ find_asg_instance_refreshes(autoscaling, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config.py b/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config.py
new file mode 100644
index 000000000..1b13d1027
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config.py
@@ -0,0 +1,694 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: autoscaling_launch_config
+version_added: 1.0.0
+
+short_description: Create or delete AWS Autoscaling Launch Configurations
+
+description:
+ - Can create or delete AWS Autoscaling Configurations.
+ - Works with the M(community.aws.autoscaling_group) module to manage Autoscaling Groups.
+ - Prior to release 5.0.0 this module was called C(community.aws.ec2_lc).
+ The usage did not change.
+
+notes:
+ - Amazon ASG Autoscaling Launch Configurations are immutable once created, so modifying the configuration after it is changed will not modify the
+ launch configuration on AWS. You must create a new config and assign it to the ASG instead.
+
+author:
+ - "Gareth Rushgrove (@garethr)"
+ - "Willem van Ketwich (@wilvk)"
+
+options:
+ state:
+ description:
+ - Register or deregister the instance.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ name:
+ description:
+ - Unique name for configuration.
+ required: true
+ type: str
+ instance_type:
+ description:
+ - Instance type to use for the instance.
+ - Required when creating a new Launch Configuration.
+ type: str
+ image_id:
+ description:
+ - The AMI unique identifier to be used for the group.
+ type: str
+ key_name:
+ description:
+ - The SSH key name to be used for access to managed instances.
+ type: str
+ security_groups:
+ description:
+ - A list of security groups to apply to the instances.
+ - You can specify either security group names or IDs or a mix.
+ type: list
+ elements: str
+ default: []
+ volumes:
+ description:
+ - A list dictionaries defining the volumes to create.
+ - For any volume, a volume size less than C(1) will be interpreted as a request not to create the volume.
+ type: list
+ elements: dict
+ suboptions:
+ device_name:
+ type: str
+ description:
+ - The name for the volume (For example C(/dev/sda)).
+ required: true
+ no_device:
+ type: bool
+ description:
+ - When I(no_device=true) the device will not be created.
+ snapshot:
+ type: str
+ description:
+ - The ID of an EBS snapshot to copy when creating the volume.
+ - Mutually exclusive with the I(ephemeral) parameter.
+ ephemeral:
+ type: str
+ description:
+ - Whether the volume should be ephemeral.
+ - Data on ephemeral volumes is lost when the instance is stopped.
+ - Mutually exclusive with the I(snapshot) parameter.
+ volume_size:
+ type: int
+ description:
+ - The size of the volume (in GiB).
+ - Required unless one of I(ephemeral), I(snapshot) or I(no_device) is set.
+ volume_type:
+ type: str
+ description:
+ - The type of volume to create.
+ - See
+ U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) for more information on the available volume types.
+ delete_on_termination:
+ type: bool
+ default: false
+ description:
+ - Whether the volume should be automatically deleted when the instance
+ is terminated.
+ iops:
+ type: int
+ description:
+ - The number of IOPS per second to provision for the volume.
+ - Required when I(volume_type=io1).
+ throughput:
+ type: int
+ description:
+ - The throughput to provision for a gp3 volume.
+ - Valid Range is a minimum value of 125 and a maximum value of 1000.
+ version_added: 3.1.0
+ encrypted:
+ type: bool
+ default: false
+ description:
+ - Whether the volume should be encrypted using the 'aws/ebs' KMS CMK.
+ user_data:
+ description:
+ - Opaque blob of data which is made available to the ec2 instance. Mutually exclusive with I(user_data_path).
+ type: str
+ user_data_path:
+ description:
+ - Path to the file that contains userdata for the ec2 instances. Mutually exclusive with I(user_data).
+ type: path
+ kernel_id:
+ description:
+ - Kernel id for the EC2 instance.
+ type: str
+ spot_price:
+ description:
+ - The spot price you are bidding. Only applies for an autoscaling group with spot instances.
+ type: float
+ instance_monitoring:
+ description:
+ - Specifies whether instances are launched with detailed monitoring.
+ type: bool
+ default: false
+ assign_public_ip:
+ description:
+ - Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP address
+ to each instance launched in a Amazon VPC.
+ type: bool
+ ramdisk_id:
+ description:
+ - A RAM disk id for the instances.
+ type: str
+ instance_profile_name:
+ description:
+ - The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instances.
+ type: str
+ ebs_optimized:
+ description:
+ - Specifies whether the instance is optimized for EBS I/O (true) or not (false).
+ default: false
+ type: bool
+ classic_link_vpc_id:
+ description:
+ - Id of ClassicLink enabled VPC
+ type: str
+ classic_link_vpc_security_groups:
+ description:
+ - A list of security group IDs with which to associate the ClassicLink VPC instances.
+ type: list
+ elements: str
+ vpc_id:
+ description:
+ - VPC ID, used when resolving security group names to IDs.
+ type: str
+ instance_id:
+ description:
+ - The Id of a running instance to use as a basis for a launch configuration. Can be used in place of I(image_id) and I(instance_type).
+ type: str
+ placement_tenancy:
+ description:
+ - Determines whether the instance runs on single-tenant hardware or not.
+ - When not set AWS will default to C(default).
+ type: str
+ choices: ['default', 'dedicated']
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+
+- name: create a launch configuration with an encrypted volume
+ community.aws.autoscaling_launch_config:
+ name: special
+ image_id: ami-XXX
+ key_name: default
+ security_groups: ['group', 'group2' ]
+ instance_type: t1.micro
+ volumes:
+ - device_name: /dev/sda1
+ volume_size: 100
+ volume_type: io1
+ iops: 3000
+ delete_on_termination: true
+ encrypted: true
+ - device_name: /dev/sdb
+ ephemeral: ephemeral0
+
+- name: create a launch configuration using a running instance id as a basis
+ community.aws.autoscaling_launch_config:
+ name: special
+ instance_id: i-00a48b207ec59e948
+ key_name: default
+ security_groups: ['launch-wizard-2' ]
+ volumes:
+ - device_name: /dev/sda1
+ volume_size: 120
+ volume_type: io1
+ iops: 3000
+ delete_on_termination: true
+
+- name: create a launch configuration to omit the /dev/sdf EBS device that is included in the AMI image
+ community.aws.autoscaling_launch_config:
+ name: special
+ image_id: ami-XXX
+ key_name: default
+ security_groups: ['group', 'group2' ]
+ instance_type: t1.micro
+ volumes:
+ - device_name: /dev/sdf
+ no_device: true
+
+- name: Use EBS snapshot ID for volume
+ block:
+ - name: Set Volume Facts
+ ansible.builtin.set_fact:
+ volumes:
+ - device_name: /dev/sda1
+ volume_size: 20
+ ebs:
+ snapshot: snap-XXXX
+ volume_type: gp2
+ delete_on_termination: true
+ encrypted: false
+
+ - name: Create launch configuration
+ community.aws.autoscaling_launch_config:
+ name: lc1
+ image_id: ami-xxxx
+ assign_public_ip: true
+ instance_type: t2.medium
+ key_name: my-key
+ security_groups: "['sg-xxxx']"
+ volumes: "{{ volumes }}"
+ register: lc_info
+'''
+
+RETURN = r'''
+arn:
+ description: The Amazon Resource Name of the launch configuration.
+ returned: when I(state=present)
+ type: str
+ sample: arn:aws:autoscaling:us-east-1:123456789012:launchConfiguration:888d9b58-d93a-40c4-90cf-759197a2621a:launchConfigurationName/launch_config_name
+changed:
+ description: Whether the state of the launch configuration has changed.
+ returned: always
+ type: bool
+ sample: false
+created_time:
+ description: The creation date and time for the launch configuration.
+ returned: when I(state=present)
+ type: str
+ sample: '2017-11-03 23:46:44.841000'
+image_id:
+ description: The ID of the Amazon Machine Image used by the launch configuration.
+ returned: when I(state=present)
+ type: str
+ sample: ami-9be6f38c
+instance_type:
+ description: The instance type for the instances.
+ returned: when I(state=present)
+ type: str
+ sample: t1.micro
+name:
+ description: The name of the launch configuration.
+ returned: when I(state=present)
+ type: str
+ sample: launch_config_name
+result:
+ description: The specification details for the launch configuration.
+ returned: when I(state=present)
+ type: complex
+ contains:
+ PlacementTenancy:
+ description: The tenancy of the instances, either default or dedicated.
+ returned: when I(state=present)
+ type: str
+ sample: default
+ associate_public_ip_address:
+ description: (EC2-VPC) Indicates whether to assign a public IP address to each instance.
+ returned: when I(state=present)
+ type: bool
+ sample: false
+ block_device_mappings:
+ description: A block device mapping, which specifies the block devices.
+ returned: when I(state=present)
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the EC2 instance (for example, /dev/sdh or xvdh).
+ returned: when I(state=present)
+ type: str
+ sample: /dev/sda1
+ ebs:
+ description: The information about the Amazon EBS volume.
+ returned: when I(state=present)
+ type: complex
+ contains:
+ snapshot_id:
+ description: The ID of the snapshot.
+ returned: when I(state=present)
+ type: str
+ volume_size:
+ description: The volume size, in GiB.
+ returned: when I(state=present)
+ type: str
+ sample: '100'
+ virtual_name:
+ description: The name of the virtual device (for example, ephemeral0).
+ returned: when I(state=present)
+ type: str
+ sample: ephemeral0
+ classic_link_vpc_id:
+ description: The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to.
+ returned: when I(state=present)
+ type: str
+ classic_link_vpc_security_groups:
+ description: The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId.
+ returned: when I(state=present)
+ type: list
+ sample: []
+ created_time:
+ description: The creation date and time for the launch configuration.
+ returned: when I(state=present)
+ type: str
+ sample: '2017-11-03 23:46:44.841000'
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: when I(state=present)
+ type: bool
+ sample: true
+ ebs_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O C(true) or not C(false).
+ returned: when I(state=present)
+ type: bool
+ sample: false
+ image_id:
+ description: The ID of the Amazon Machine Image used by the launch configuration.
+ returned: when I(state=present)
+ type: str
+ sample: ami-9be6f38c
+ instance_monitoring:
+ description: Indicates whether instances in this group are launched with detailed C(true) or basic C(false) monitoring.
+ returned: when I(state=present)
+ type: bool
+ sample: true
+ instance_profile_name:
+ description: The name or Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance.
+ returned: when I(state=present)
+ type: str
+ sample: null
+ instance_type:
+ description: The instance type for the instances.
+ returned: when I(state=present)
+ type: str
+ sample: t1.micro
+ iops:
+ description: The number of I/O operations per second (IOPS) to provision for the volume.
+ returned: when I(state=present)
+ type: int
+ kernel_id:
+ description: The ID of the kernel associated with the AMI.
+ returned: when I(state=present)
+ type: str
+ sample: ''
+ key_name:
+ description: The name of the key pair.
+ returned: when I(state=present)
+ type: str
+ sample: testkey
+ launch_configuration_arn:
+ description: The Amazon Resource Name (ARN) of the launch configuration.
+ returned: when I(state=present)
+ type: str
+ sample: arn:aws:autoscaling:us-east-1:123456789012:launchConfiguration:888d9b58-d93a-40c4-90cf-759197a2621a:launchConfigurationName/launch_config_name
+ member:
+ description: ""
+ returned: when I(state=present)
+ type: str
+ sample: "\n "
+ name:
+ description: The name of the launch configuration.
+ returned: when I(state=present)
+ type: str
+ sample: launch_config_name
+ ramdisk_id:
+ description: The ID of the RAM disk associated with the AMI.
+ returned: when I(state=present)
+ type: str
+ sample: ''
+ security_groups:
+ description: The security groups to associate with the instances.
+ returned: when I(state=present)
+ type: list
+ sample:
+ - sg-5e27db2f
+ spot_price:
+ description: The price to bid when launching Spot Instances.
+ returned: when I(state=present)
+ type: float
+ use_block_device_types:
+ description: Indicates whether to suppress a device mapping.
+ returned: when I(state=present)
+ type: bool
+ sample: false
+ user_data:
+ description: The user data available to the instances.
+ returned: when I(state=present)
+ type: str
+ sample: ''
+ volume_type:
+ description: The volume type (one of standard, io1, gp2).
+ returned: when I(state=present)
+ type: str
+ sample: io1
+security_groups:
+ description: The security groups to associate with the instances.
+ returned: when I(state=present)
+ type: list
+ sample:
+ - sg-5e27db2f
+'''
+
+
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names
+
+
+def create_block_device_meta(module, volume):
+ if 'snapshot' not in volume and 'ephemeral' not in volume and 'no_device' not in volume:
+ if 'volume_size' not in volume:
+ module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume')
+ if 'snapshot' in volume:
+ if volume.get('volume_type') == 'io1' and 'iops' not in volume:
+ module.fail_json(msg='io1 volumes must have an iops value set')
+ if 'ephemeral' in volume:
+ if 'snapshot' in volume:
+ module.fail_json(msg='Cannot set both ephemeral and snapshot')
+
+ return_object = {}
+
+ if 'ephemeral' in volume:
+ return_object['VirtualName'] = volume.get('ephemeral')
+
+ if 'device_name' in volume:
+ return_object['DeviceName'] = volume.get('device_name')
+
+ if 'no_device' in volume:
+ return_object['NoDevice'] = volume.get('no_device')
+
+ if any(key in volume for key in ['snapshot', 'volume_size', 'volume_type', 'delete_on_termination', 'iops', 'throughput', 'encrypted']):
+ return_object['Ebs'] = {}
+
+ if 'snapshot' in volume:
+ return_object['Ebs']['SnapshotId'] = volume.get('snapshot')
+
+ if 'volume_size' in volume:
+ return_object['Ebs']['VolumeSize'] = int(volume.get('volume_size', 0))
+
+ if 'volume_type' in volume:
+ return_object['Ebs']['VolumeType'] = volume.get('volume_type')
+
+ if 'delete_on_termination' in volume:
+ return_object['Ebs']['DeleteOnTermination'] = volume.get('delete_on_termination', False)
+
+ if 'iops' in volume:
+ return_object['Ebs']['Iops'] = volume.get('iops')
+
+ if 'throughput' in volume:
+ if volume.get('volume_type') != 'gp3':
+ module.fail_json(msg='The throughput parameter is supported only for GP3 volumes.')
+ return_object['Ebs']['Throughput'] = volume.get('throughput')
+
+ if 'encrypted' in volume:
+ return_object['Ebs']['Encrypted'] = volume.get('encrypted')
+
+ return return_object
+
+
+def create_launch_config(connection, module):
+ name = module.params.get('name')
+ vpc_id = module.params.get('vpc_id')
+ try:
+ ec2_connection = module.client('ec2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+ try:
+ security_groups = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), ec2_connection, vpc_id=vpc_id, boto3=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to get Security Group IDs')
+ except ValueError as e:
+ module.fail_json(msg="Failed to get Security Group IDs", exception=traceback.format_exc())
+ user_data = module.params.get('user_data')
+ user_data_path = module.params.get('user_data_path')
+ volumes = module.params['volumes']
+ instance_monitoring = module.params.get('instance_monitoring')
+ assign_public_ip = module.params.get('assign_public_ip')
+ instance_profile_name = module.params.get('instance_profile_name')
+ ebs_optimized = module.params.get('ebs_optimized')
+ classic_link_vpc_id = module.params.get('classic_link_vpc_id')
+ classic_link_vpc_security_groups = module.params.get('classic_link_vpc_security_groups')
+
+ block_device_mapping = []
+
+ convert_list = ['image_id', 'instance_type', 'instance_type', 'instance_id', 'placement_tenancy', 'key_name', 'kernel_id', 'ramdisk_id', 'spot_price']
+
+ launch_config = (snake_dict_to_camel_dict(dict((k.capitalize(), str(v)) for k, v in module.params.items() if v is not None and k in convert_list)))
+
+ if user_data_path:
+ try:
+ with open(user_data_path, 'r') as user_data_file:
+ user_data = user_data_file.read()
+ except IOError as e:
+ module.fail_json(msg="Failed to open file for reading", exception=traceback.format_exc())
+
+ if volumes:
+ for volume in volumes:
+ if 'device_name' not in volume:
+ module.fail_json(msg='Device name must be set for volume')
+ # Minimum volume size is 1GiB. We'll use volume size explicitly set to 0 to be a signal not to create this volume
+ if 'volume_size' not in volume or int(volume['volume_size']) > 0:
+ block_device_mapping.append(create_block_device_meta(module, volume))
+
+ try:
+ launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe launch configuration by name")
+
+ changed = False
+ result = {}
+
+ launch_config['LaunchConfigurationName'] = name
+
+ if security_groups is not None:
+ launch_config['SecurityGroups'] = security_groups
+
+ if classic_link_vpc_id is not None:
+ launch_config['ClassicLinkVPCId'] = classic_link_vpc_id
+
+ if instance_monitoring is not None:
+ launch_config['InstanceMonitoring'] = {'Enabled': instance_monitoring}
+
+ if classic_link_vpc_security_groups is not None:
+ launch_config['ClassicLinkVPCSecurityGroups'] = classic_link_vpc_security_groups
+
+ if block_device_mapping:
+ launch_config['BlockDeviceMappings'] = block_device_mapping
+
+ if instance_profile_name is not None:
+ launch_config['IamInstanceProfile'] = instance_profile_name
+
+ if assign_public_ip is not None:
+ launch_config['AssociatePublicIpAddress'] = assign_public_ip
+
+ if user_data is not None:
+ launch_config['UserData'] = user_data
+
+ if ebs_optimized is not None:
+ launch_config['EbsOptimized'] = ebs_optimized
+
+ if len(launch_configs) == 0:
+ try:
+ connection.create_launch_configuration(**launch_config)
+ launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations')
+ changed = True
+ if launch_configs:
+ launch_config = launch_configs[0]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to create launch configuration")
+
+ result = (dict((k, v) for k, v in launch_config.items()
+ if k not in ['Connection', 'CreatedTime', 'InstanceMonitoring', 'BlockDeviceMappings']))
+
+ result['CreatedTime'] = to_text(launch_config.get('CreatedTime'))
+
+ try:
+ result['InstanceMonitoring'] = module.boolean(launch_config.get('InstanceMonitoring').get('Enabled'))
+ except AttributeError:
+ result['InstanceMonitoring'] = False
+
+ result['BlockDeviceMappings'] = []
+
+ for block_device_mapping in launch_config.get('BlockDeviceMappings', []):
+ result['BlockDeviceMappings'].append(dict(device_name=block_device_mapping.get('DeviceName'), virtual_name=block_device_mapping.get('VirtualName')))
+ if block_device_mapping.get('Ebs') is not None:
+ result['BlockDeviceMappings'][-1]['ebs'] = dict(
+ snapshot_id=block_device_mapping.get('Ebs').get('SnapshotId'), volume_size=block_device_mapping.get('Ebs').get('VolumeSize'))
+
+ if user_data_path:
+ result['UserData'] = "hidden" # Otherwise, we dump binary to the user's terminal
+
+ return_object = {
+ 'Name': result.get('LaunchConfigurationName'),
+ 'CreatedTime': result.get('CreatedTime'),
+ 'ImageId': result.get('ImageId'),
+ 'Arn': result.get('LaunchConfigurationARN'),
+ 'SecurityGroups': result.get('SecurityGroups'),
+ 'InstanceType': result.get('InstanceType'),
+ 'Result': result
+ }
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(return_object))
+
+
+def delete_launch_config(connection, module):
+ try:
+ name = module.params.get('name')
+ launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations')
+ if launch_configs:
+ connection.delete_launch_configuration(LaunchConfigurationName=launch_configs[0].get('LaunchConfigurationName'))
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to delete launch configuration")
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ image_id=dict(),
+ instance_id=dict(),
+ key_name=dict(),
+ security_groups=dict(default=[], type='list', elements='str'),
+ user_data=dict(),
+ user_data_path=dict(type='path'),
+ kernel_id=dict(),
+ volumes=dict(type='list', elements='dict'),
+ instance_type=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ spot_price=dict(type='float'),
+ ramdisk_id=dict(),
+ instance_profile_name=dict(),
+ ebs_optimized=dict(default=False, type='bool'),
+ instance_monitoring=dict(default=False, type='bool'),
+ assign_public_ip=dict(type='bool'),
+ classic_link_vpc_security_groups=dict(type='list', elements='str'),
+ classic_link_vpc_id=dict(),
+ vpc_id=dict(),
+ placement_tenancy=dict(choices=['default', 'dedicated'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[['user_data', 'user_data_path']],
+ )
+
+ try:
+ connection = module.client('autoscaling')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="unable to establish connection")
+
+ state = module.params.get('state')
+
+ if state == 'present':
+ create_launch_config(connection, module)
+ elif state == 'absent':
+ delete_launch_config(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_find.py b/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_find.py
new file mode 100644
index 000000000..ae8f187c0
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_find.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+# encoding: utf-8
+
+# (c) 2015, Jose Armesto <jose@armesto.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: autoscaling_launch_config_find
+version_added: 1.0.0
+short_description: Find AWS Autoscaling Launch Configurations
+description:
+ - Returns list of matching Launch Configurations for a given name, along with other useful information.
+ - Results can be sorted and sliced.
+ - Based on the work by Tom Bamford U(https://github.com/tombamford).
+ - Prior to release 5.0.0 this module was called C(community.aws.ec2_lc_find).
+ The usage did not change.
+author:
+ - "Jose Armesto (@fiunchinho)"
+options:
+ name_regex:
+ description:
+ - A Launch Configuration to match.
+ - It'll be compiled as regex.
+ required: True
+ type: str
+ sort_order:
+ description:
+ - Order in which to sort results.
+ choices: ['ascending', 'descending']
+ default: 'ascending'
+ type: str
+ limit:
+ description:
+ - How many results to show.
+ - Corresponds to Python slice notation like list[:limit].
+ type: int
+extends_documentation_fragment:
+ - amazon.aws.ec2
+ - amazon.aws.aws
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Search for the Launch Configurations that start with "app"
+ community.aws.autoscaling_launch_config_find:
+ name_regex: app.*
+ sort_order: descending
+ limit: 2
+'''
+
+RETURN = '''
+image_id:
+ description: AMI id
+ returned: when Launch Configuration was found
+ type: str
+ sample: "ami-0d75df7e"
+user_data:
+ description: User data used to start instance
+ returned: when Launch Configuration was found
+ type: str
+ sample: "ZXhwb3J0IENMT1VE"
+name:
+ description: Name of the Launch Configuration
+ returned: when Launch Configuration was found
+ type: str
+ sample: "myapp-v123"
+arn:
+ description: Name of the AMI
+ returned: when Launch Configuration was found
+ type: str
+ sample: "arn:aws:autoscaling:eu-west-1:12345:launchConfiguration:d82f050e-e315:launchConfigurationName/yourproject"
+instance_type:
+ description: Type of ec2 instance
+ returned: when Launch Configuration was found
+ type: str
+ sample: "t2.small"
+created_time:
+ description: When it was created
+ returned: when Launch Configuration was found
+ type: str
+ sample: "2016-06-29T14:59:22.222000+00:00"
+ebs_optimized:
+ description: Launch Configuration EBS optimized property
+ returned: when Launch Configuration was found
+ type: bool
+ sample: False
+instance_monitoring:
+ description: Launch Configuration instance monitoring property
+ returned: when Launch Configuration was found
+ type: str
+ sample: {"Enabled": false}
+classic_link_vpc_security_groups:
+ description: Launch Configuration classic link vpc security groups property
+ returned: when Launch Configuration was found
+ type: list
+ sample: []
+block_device_mappings:
+ description: Launch Configuration block device mappings property
+ returned: when Launch Configuration was found
+ type: list
+ sample: []
+keyname:
+ description: Launch Configuration ssh key
+ returned: when Launch Configuration was found
+ type: str
+ sample: mykey
+security_groups:
+ description: Launch Configuration security groups
+ returned: when Launch Configuration was found
+ type: list
+ sample: []
+kernel_id:
+ description: Launch Configuration kernel to use
+ returned: when Launch Configuration was found
+ type: str
+ sample: ''
+ram_disk_id:
+ description: Launch Configuration ram disk property
+ returned: when Launch Configuration was found
+ type: str
+ sample: ''
+associate_public_address:
+ description: Assign public address or not
+ returned: when Launch Configuration was found
+ type: bool
+ sample: True
+...
+'''
+import re
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+def find_launch_configs(client, module):
+ name_regex = module.params.get('name_regex')
+ sort_order = module.params.get('sort_order')
+ limit = module.params.get('limit')
+
+ paginator = client.get_paginator('describe_launch_configurations')
+
+ response_iterator = paginator.paginate(
+ PaginationConfig={
+ 'MaxItems': 1000,
+ 'PageSize': 100
+ }
+ )
+
+ results = []
+
+ for response in response_iterator:
+ response['LaunchConfigurations'] = filter(lambda lc: re.compile(name_regex).match(lc['LaunchConfigurationName']),
+ response['LaunchConfigurations'])
+
+ for lc in response['LaunchConfigurations']:
+ data = {
+ 'name': lc['LaunchConfigurationName'],
+ 'arn': lc['LaunchConfigurationARN'],
+ 'created_time': lc['CreatedTime'],
+ 'user_data': lc['UserData'],
+ 'instance_type': lc['InstanceType'],
+ 'image_id': lc['ImageId'],
+ 'ebs_optimized': lc['EbsOptimized'],
+ 'instance_monitoring': lc['InstanceMonitoring'],
+ 'classic_link_vpc_security_groups': lc['ClassicLinkVPCSecurityGroups'],
+ 'block_device_mappings': lc['BlockDeviceMappings'],
+ 'keyname': lc['KeyName'],
+ 'security_groups': lc['SecurityGroups'],
+ 'kernel_id': lc['KernelId'],
+ 'ram_disk_id': lc['RamdiskId'],
+ 'associate_public_address': lc.get('AssociatePublicIpAddress', False),
+ }
+
+ results.append(data)
+
+ results.sort(key=lambda e: e['name'], reverse=(sort_order == 'descending'))
+
+ if limit:
+ results = results[:int(limit)]
+
+ module.exit_json(changed=False, results=results)
+
+
+def main():
+ argument_spec = dict(
+ name_regex=dict(required=True),
+ sort_order=dict(required=False, default='ascending', choices=['ascending', 'descending']),
+ limit=dict(required=False, type='int'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ )
+
+ try:
+ client = module.client('autoscaling')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ find_launch_configs(client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_info.py b/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_info.py
new file mode 100644
index 000000000..1c98d7588
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_info.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: autoscaling_launch_config_info
+version_added: 1.0.0
+short_description: Gather information about AWS Autoscaling Launch Configurations
+description:
+ - Gather information about AWS Autoscaling Launch Configurations.
+ - Prior to release 5.0.0 this module was called C(community.aws.ec2_lc_info).
+ The usage did not change.
+author:
+ - "Loïc Latreille (@psykotox)"
+options:
+ name:
+ description:
+ - A name or a list of name to match.
+ default: []
+ type: list
+ elements: str
+ sort:
+ description:
+ - Optional attribute which with to sort the results.
+ choices: ['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']
+ type: str
+ sort_order:
+ description:
+ - Order in which to sort results.
+ - Only used when the 'sort' parameter is specified.
+ choices: ['ascending', 'descending']
+ default: 'ascending'
+ type: str
+ sort_start:
+ description:
+ - Which result to start with (when sorting).
+ - Corresponds to Python slice notation.
+ type: int
+ sort_end:
+ description:
+ - Which result to end with (when sorting).
+ - Corresponds to Python slice notation.
+ type: int
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all launch configurations
+ community.aws.autoscaling_launch_config_info:
+
+- name: Gather information about launch configuration with name "example"
+ community.aws.autoscaling_launch_config_info:
+ name: example
+
+- name: Gather information sorted by created_time from most recent to least recent
+ community.aws.autoscaling_launch_config_info:
+ sort: created_time
+ sort_order: descending
+'''
+
+RETURN = r'''
+block_device_mapping:
+ description: Block device mapping for the instances of launch configuration.
+ type: list
+ returned: always
+ sample: "[{
+ 'device_name': '/dev/xvda':,
+ 'ebs': {
+ 'delete_on_termination': true,
+ 'volume_size': 8,
+ 'volume_type': 'gp2'
+ }]"
+classic_link_vpc_security_groups:
+ description: IDs of one or more security groups for the VPC specified in classic_link_vpc_id.
+ type: str
+ returned: always
+ sample:
+created_time:
+ description: The creation date and time for the launch configuration.
+ type: str
+ returned: always
+ sample: "2016-05-27T13:47:44.216000+00:00"
+ebs_optimized:
+ description: EBS I/O optimized C(true) or not C(false).
+ type: bool
+ returned: always
+ sample: true,
+image_id:
+ description: ID of the Amazon Machine Image (AMI).
+ type: str
+ returned: always
+ sample: "ami-12345678"
+instance_monitoring:
+ description: Launched with detailed monitoring or not.
+ type: dict
+ returned: always
+ sample: "{
+ 'enabled': true
+ }"
+instance_type:
+ description: Instance type.
+ type: str
+ returned: always
+ sample: "t2.micro"
+kernel_id:
+ description: ID of the kernel associated with the AMI.
+ type: str
+ returned: always
+ sample:
+key_name:
+ description: Name of the key pair.
+ type: str
+ returned: always
+ sample: "user_app"
+launch_configuration_arn:
+ description: Amazon Resource Name (ARN) of the launch configuration.
+ type: str
+ returned: always
+ sample: "arn:aws:autoscaling:us-east-1:123456798012:launchConfiguration:ba785e3a-dd42-6f02-4585-ea1a2b458b3d:launchConfigurationName/lc-app"
+launch_configuration_name:
+ description: Name of the launch configuration.
+ type: str
+ returned: always
+ sample: "lc-app"
+ramdisk_id:
+ description: ID of the RAM disk associated with the AMI.
+ type: str
+ returned: always
+ sample:
+security_groups:
+ description: Security groups to associated.
+ type: list
+ returned: always
+ sample: "[
+ 'web'
+ ]"
+user_data:
+ description: User data available.
+ type: str
+ returned: always
+'''
+
+try:
+ import botocore
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+def list_launch_configs(connection, module):
+
+ launch_config_name = module.params.get("name")
+ sort = module.params.get('sort')
+ sort_order = module.params.get('sort_order')
+ sort_start = module.params.get('sort_start')
+ sort_end = module.params.get('sort_end')
+
+ try:
+ pg = connection.get_paginator('describe_launch_configurations')
+ launch_configs = pg.paginate(LaunchConfigurationNames=launch_config_name).build_full_result()
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to list launch configs")
+
+ snaked_launch_configs = []
+ for launch_config in launch_configs['LaunchConfigurations']:
+ snaked_launch_configs.append(camel_dict_to_snake_dict(launch_config))
+
+ for launch_config in snaked_launch_configs:
+ if 'CreatedTime' in launch_config:
+ launch_config['CreatedTime'] = str(launch_config['CreatedTime'])
+
+ if sort:
+ snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order == 'descending'))
+
+ if sort and sort_start and sort_end:
+ snaked_launch_configs = snaked_launch_configs[sort_start:sort_end]
+ elif sort and sort_start:
+ snaked_launch_configs = snaked_launch_configs[sort_start:]
+ elif sort and sort_end:
+ snaked_launch_configs = snaked_launch_configs[:sort_end]
+
+ module.exit_json(launch_configurations=snaked_launch_configs)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=False, default=[], type='list', elements='str'),
+ sort=dict(required=False, default=None,
+ choices=['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']),
+ sort_order=dict(required=False, default='ascending',
+ choices=['ascending', 'descending']),
+ sort_start=dict(required=False, type='int'),
+ sort_end=dict(required=False, type='int'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ connection = module.client('autoscaling')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ list_launch_configs(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_lifecycle_hook.py b/ansible_collections/community/aws/plugins/modules/autoscaling_lifecycle_hook.py
new file mode 100644
index 000000000..cf07b7681
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/autoscaling_lifecycle_hook.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: autoscaling_lifecycle_hook
+version_added: 1.0.0
+short_description: Create, delete or update AWS ASG Lifecycle Hooks
+description:
+ - Will create a new hook when I(state=present) and no given Hook is found.
+ - Will update an existing hook when I(state=present) and a Hook is found, but current and provided parameters differ.
+ - Will delete the hook when I(state=absent) and a Hook is found.
+ - Prior to release 5.0.0 this module was called C(community.aws.ec2_asg_lifecycle_hook).
+ The usage did not change.
+author:
+ - Igor 'Tsigankov' Eyrich (@tsiganenok) <tsiganenok@gmail.com>
+options:
+ state:
+ description:
+ - Create or delete Lifecycle Hook.
+ - When I(state=present) updates existing hook or creates a new hook if not found.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ lifecycle_hook_name:
+ description:
+ - The name of the lifecycle hook.
+ required: true
+ type: str
+ autoscaling_group_name:
+ description:
+ - The name of the Auto Scaling group to which you want to assign the lifecycle hook.
+ required: true
+ type: str
+ transition:
+ description:
+ - The instance state to which you want to attach the lifecycle hook.
+ - Required when I(state=present).
+ choices: ['autoscaling:EC2_INSTANCE_TERMINATING', 'autoscaling:EC2_INSTANCE_LAUNCHING']
+ type: str
+ role_arn:
+ description:
+ - The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target.
+ type: str
+ notification_target_arn:
+ description:
+ - The ARN of the notification target that Auto Scaling will use to notify you when an
+ instance is in the transition state for the lifecycle hook.
+ - This target can be either an SQS queue or an SNS topic.
+ - If you specify an empty string, this overrides the current ARN.
+ type: str
+ notification_meta_data:
+ description:
+ - Contains additional information that you want to include any time Auto Scaling sends a message to the notification target.
+ type: str
+ heartbeat_timeout:
+ description:
+ - The amount of time, in seconds, that can elapse before the lifecycle hook times out.
+ When the lifecycle hook times out, Auto Scaling performs the default action.
+ You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat.
+ - By default Amazon AWS will use C(3600) (1 hour).
+ type: int
+ default_result:
+ description:
+ - Defines the action the Auto Scaling group should take when the lifecycle hook timeout
+ elapses or if an unexpected failure occurs.
+ choices: ['ABANDON', 'CONTINUE']
+ default: ABANDON
+ type: str
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+- name: Create / Update lifecycle hook
+ community.aws.autoscaling_lifecycle_hook:
+ region: eu-central-1
+ state: present
+ autoscaling_group_name: example
+ lifecycle_hook_name: example
+ transition: autoscaling:EC2_INSTANCE_LAUNCHING
+ heartbeat_timeout: 7000
+ default_result: ABANDON
+
+- name: Delete lifecycle hook
+ community.aws.autoscaling_lifecycle_hook:
+ region: eu-central-1
+ state: absent
+ autoscaling_group_name: example
+ lifecycle_hook_name: example
+'''
+
+RETURN = '''
+---
+auto_scaling_group_name:
+ description: The unique name of the auto scaling group.
+ returned: success
+ type: str
+ sample: "myasg"
+default_result:
+ description: Defines the action the Auto Scaling group should take when the lifecycle hook timeout elapses or if an unexpected failure occurs.
+ returned: success
+ type: str
+ sample: CONTINUE
+global_timeout:
+ description: The maximum time, in seconds, that an instance can remain in a C(Pending:Wait) or C(Terminating:Wait) state.
+ returned: success
+ type: int
+ sample: 172800
+heartbeat_timeout:
+ description: The maximum time, in seconds, that can elapse before the lifecycle hook times out.
+ returned: success
+ type: int
+ sample: 3600
+lifecycle_hook_name:
+ description: The name of the lifecycle hook.
+ returned: success
+ type: str
+ sample: "mylifecyclehook"
+lifecycle_transition:
+ description: The instance state to which lifecycle hook should be attached.
+ returned: success
+ type: str
+ sample: "autoscaling:EC2_INSTANCE_LAUNCHING"
+'''
+
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+
+def create_lifecycle_hook(connection, module):
+
+ lch_name = module.params.get('lifecycle_hook_name')
+ asg_name = module.params.get('autoscaling_group_name')
+ transition = module.params.get('transition')
+ role_arn = module.params.get('role_arn')
+ notification_target_arn = module.params.get('notification_target_arn')
+ notification_meta_data = module.params.get('notification_meta_data')
+ heartbeat_timeout = module.params.get('heartbeat_timeout')
+ default_result = module.params.get('default_result')
+
+ return_object = {}
+ return_object['changed'] = False
+
+ lch_params = {
+ 'LifecycleHookName': lch_name,
+ 'AutoScalingGroupName': asg_name,
+ 'LifecycleTransition': transition
+ }
+
+ if role_arn:
+ lch_params['RoleARN'] = role_arn
+
+ if notification_target_arn:
+ lch_params['NotificationTargetARN'] = notification_target_arn
+
+ if notification_meta_data:
+ lch_params['NotificationMetadata'] = notification_meta_data
+
+ if heartbeat_timeout:
+ lch_params['HeartbeatTimeout'] = heartbeat_timeout
+
+ if default_result:
+ lch_params['DefaultResult'] = default_result
+
+ try:
+ existing_hook = connection.describe_lifecycle_hooks(
+ AutoScalingGroupName=asg_name,
+ LifecycleHookNames=[lch_name]
+ )['LifecycleHooks']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get Lifecycle Hook")
+
+ if not existing_hook:
+ try:
+ if module.check_mode:
+ module.exit_json(changed=True, msg="Would have created AutoScalingGroup Lifecycle Hook if not in check_mode.")
+ return_object['changed'] = True
+ connection.put_lifecycle_hook(**lch_params)
+ return_object['lifecycle_hook_info'] = connection.describe_lifecycle_hooks(
+ AutoScalingGroupName=asg_name, LifecycleHookNames=[lch_name])['LifecycleHooks']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to create LifecycleHook")
+
+ else:
+ added, removed, modified, same = dict_compare(lch_params, existing_hook[0])
+ if modified:
+ try:
+ if module.check_mode:
+ module.exit_json(changed=True, msg="Would have modified AutoScalingGroup Lifecycle Hook if not in check_mode.")
+ return_object['changed'] = True
+ connection.put_lifecycle_hook(**lch_params)
+ return_object['lifecycle_hook_info'] = connection.describe_lifecycle_hooks(
+ AutoScalingGroupName=asg_name, LifecycleHookNames=[lch_name])['LifecycleHooks']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to create LifecycleHook")
+
+ module.exit_json(**camel_dict_to_snake_dict(return_object))
+
+
+def dict_compare(d1, d2):
+ d1_keys = set(d1.keys())
+ d2_keys = set(d2.keys())
+ intersect_keys = d1_keys.intersection(d2_keys)
+ added = d1_keys - d2_keys
+ removed = d2_keys - d1_keys
+ modified = False
+ for key in d1:
+ if d1[key] != d2[key]:
+ modified = True
+ break
+
+ same = set(o for o in intersect_keys if d1[o] == d2[o])
+ return added, removed, modified, same
+
+
+def delete_lifecycle_hook(connection, module):
+
+ lch_name = module.params.get('lifecycle_hook_name')
+ asg_name = module.params.get('autoscaling_group_name')
+
+ return_object = {}
+ return_object['changed'] = False
+
+ try:
+ all_hooks = connection.describe_lifecycle_hooks(
+ AutoScalingGroupName=asg_name
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get Lifecycle Hooks")
+
+ for hook in all_hooks['LifecycleHooks']:
+ if hook['LifecycleHookName'] == lch_name:
+ lch_params = {
+ 'LifecycleHookName': lch_name,
+ 'AutoScalingGroupName': asg_name
+ }
+
+ try:
+ if module.check_mode:
+ module.exit_json(changed=True, msg="Would have deleted AutoScalingGroup Lifecycle Hook if not in check_mode.")
+ connection.delete_lifecycle_hook(**lch_params)
+ return_object['changed'] = True
+ return_object['lifecycle_hook_removed'] = {'LifecycleHookName': lch_name, 'AutoScalingGroupName': asg_name}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to delete LifecycleHook")
+ else:
+ pass
+
+ module.exit_json(**camel_dict_to_snake_dict(return_object))
+
+
+def main():
+ argument_spec = dict(
+ autoscaling_group_name=dict(required=True, type='str'),
+ lifecycle_hook_name=dict(required=True, type='str'),
+ transition=dict(type='str', choices=['autoscaling:EC2_INSTANCE_TERMINATING', 'autoscaling:EC2_INSTANCE_LAUNCHING']),
+ role_arn=dict(type='str'),
+ notification_target_arn=dict(type='str'),
+ notification_meta_data=dict(type='str'),
+ heartbeat_timeout=dict(type='int'),
+ default_result=dict(default='ABANDON', choices=['ABANDON', 'CONTINUE']),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[['state', 'present', ['transition']]],
+ )
+
+ state = module.params.get('state')
+
+ connection = module.client('autoscaling')
+
+ changed = False
+
+ if state == 'present':
+ create_lifecycle_hook(connection, module)
+ elif state == 'absent':
+ delete_lifecycle_hook(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_policy.py b/ansible_collections/community/aws/plugins/modules/autoscaling_policy.py
new file mode 100644
index 000000000..a29389b0e
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/autoscaling_policy.py
@@ -0,0 +1,607 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: autoscaling_policy
+short_description: Create or delete AWS scaling policies for Autoscaling groups
+version_added: 1.0.0
+description:
+ - Can create or delete scaling policies for autoscaling groups.
+ - Referenced autoscaling groups must already exist.
+ - Prior to release 5.0.0 this module was called C(community.aws.ec2_scaling_policy).
+ The usage did not change.
+author:
+ - Zacharie Eakin (@zeekin)
+ - Will Thames (@willthames)
+options:
+ state:
+ type: str
+ description:
+ - Register or deregister the policy.
+ choices: ['present', 'absent']
+ default: 'present'
+ name:
+ type: str
+ description:
+ - Unique name for the scaling policy.
+ required: true
+ asg_name:
+ type: str
+ description:
+ - Name of the associated autoscaling group.
+ - Required if I(state) is C(present).
+ adjustment_type:
+ type: str
+ description:
+ - The type of change in capacity of the autoscaling group.
+ - Required if I(state) is C(present).
+ choices:
+ - ChangeInCapacity
+ - ExactCapacity
+ - PercentChangeInCapacity
+ scaling_adjustment:
+ type: int
+ description:
+ - The amount by which the autoscaling group is adjusted by the policy.
+ - A negative number has the effect of scaling down the ASG.
+ - Units are numbers of instances for C(ExactCapacity) or C(ChangeInCapacity) or percent
+ of existing instances for C(PercentChangeInCapacity).
+ - Required when I(policy_type) is C(SimpleScaling).
+ min_adjustment_step:
+ type: int
+ description:
+ - Minimum amount of adjustment when policy is triggered.
+ - Only used when I(adjustment_type) is C(PercentChangeInCapacity).
+ cooldown:
+ type: int
+ description:
+ - The minimum period of time (in seconds) between which autoscaling actions can take place.
+ - Only used when I(policy_type) is C(SimpleScaling).
+ policy_type:
+ type: str
+ description:
+ - Auto scaling adjustment policy.
+ choices:
+ - StepScaling
+ - SimpleScaling
+ - TargetTrackingScaling
+ default: SimpleScaling
+ metric_aggregation:
+ type: str
+ description:
+ - The aggregation type for the CloudWatch metrics.
+ - Only used when I(policy_type) is not C(SimpleScaling).
+ choices:
+ - Minimum
+ - Maximum
+ - Average
+ default: Average
+ step_adjustments:
+ type: list
+ description:
+ - List of dicts containing I(lower_bound), I(upper_bound) and I(scaling_adjustment).
+ - Intervals must not overlap or have a gap between them.
+ - At most, one item can have an undefined I(lower_bound).
+ If any item has a negative lower_bound, then there must be a step adjustment with an undefined I(lower_bound).
+ - At most, one item can have an undefined I(upper_bound).
+ If any item has a positive upper_bound, then there must be a step adjustment with an undefined I(upper_bound).
+ - The bounds are the amount over the alarm threshold at which the adjustment will trigger.
+ This means that for an alarm threshold of 50, triggering at 75 requires a lower bound of 25.
+ See U(http://docs.aws.amazon.com/AutoScaling/latest/APIReference/API_StepAdjustment.html).
+ elements: dict
+ suboptions:
+ lower_bound:
+ type: int
+ description:
+ - The lower bound for the difference between the alarm threshold and
+ the CloudWatch metric.
+ upper_bound:
+ type: int
+ description:
+ - The upper bound for the difference between the alarm threshold and
+ the CloudWatch metric.
+ scaling_adjustment:
+ type: int
+ description:
+ - The amount by which to scale.
+ required: true
+ target_tracking_config:
+ type: dict
+ description:
+ - Allows you to specify a I(target_tracking_config) for autoscaling policies in AWS.
+ - I(target_tracking_config) can accept nested dicts for I(customized_metric_spec) or I(predefined_metric_spec).
+ Each specification aligns with their boto3 equivalent.
+ - Required when I(TargetTrackingScaling) policy is specified.
+ version_added: 4.1.0
+ suboptions:
+ customized_metric_spec:
+ type: dict
+ description:
+ - Specify a dict will be passed in as a call for C(TargetTrackingConfiguration).
+ suboptions:
+ metric_name:
+ type: str
+ description:
+ - The name of the metric.
+ required: true
+ namespace:
+ type: str
+ description:
+ - The namespace of the metric.
+ required: true
+ statistic:
+ type: str
+ description:
+ - The statistic of the metric.
+ required: true
+ choices:
+ - Average
+ - Minimum
+ - Maximum
+ - SampleCount
+ - Sum
+ dimensions:
+ type: list
+ description:
+ - The dimensions of the metric. The element of the list should be a dict.
+ elements: dict
+ unit:
+ type: str
+ description:
+ - The unit of the metric. Reference AmazonCloudWatch API for valid Units.
+ predefined_metric_spec:
+ type: dict
+ description:
+ - Specify a dict will be passed in as a call for I(TargetTrackingConfiguration).
+ suboptions:
+ predefined_metric_type:
+ type: str
+ required: true
+ description:
+ - Required if C(predefined_metric_spec) is used.
+ choices:
+ - ASGAverageCPUUtilization
+ - ASGAverageNetworkIn
+ - ASGAverageNetworkOut
+ - ALBRequestCountPerTarget
+ resource_label:
+ type: str
+ description:
+ - Uniquely identifies a specific ALB target group from which to determine the average request count served by your Auto Scaling group.
+ - You can't specify a resource label unless the target group is attached to the Auto Scaling group.
+ target_value:
+ type: float
+ description:
+ - Specify a float number for target utilization.
+ - Required when I(target_tracking_config) is specified.
+ required: true
+ disable_scalein:
+ type: bool
+ description:
+ - Indicate whether scaling in by the target tracking scaling policy is disabled.
+ estimated_instance_warmup:
+ type: int
+ description:
+ - The estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics.
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+EXAMPLES = '''
+- name: Simple Scale Down policy
+ community.aws.autoscaling_policy:
+ state: present
+ region: US-XXX
+ name: "scaledown-policy"
+ adjustment_type: "ChangeInCapacity"
+ asg_name: "application-asg"
+ scaling_adjustment: -1
+ min_adjustment_step: 1
+ cooldown: 300
+
+# For an alarm with a breach threshold of 20, the
+# following creates a stepped policy:
+# From 20-40 (0-20 above threshold), increase by 50% of existing capacity
+# From 41-infinity, increase by 100% of existing capacity
+- community.aws.autoscaling_policy:
+ state: present
+ region: US-XXX
+ name: "step-scale-up-policy"
+ policy_type: StepScaling
+ metric_aggregation: Maximum
+ step_adjustments:
+ - upper_bound: 20
+ scaling_adjustment: 50
+ - lower_bound: 20
+ scaling_adjustment: 100
+ adjustment_type: "PercentChangeInCapacity"
+ asg_name: "application-asg"
+
+- name: create TargetTracking predefined policy
+ ec2_scaling_policy:
+ name: "predefined-policy-1"
+ policy_type: TargetTrackingScaling
+ target_tracking_config:
+ predefined_metric_spec:
+ predefined_metric_type: ASGAverageCPUUtilization
+ target_value: 98.0
+ asg_name: "asg-test-1"
+ register: result
+
+- name: create TargetTracking predefined policy with resource_label
+ ec2_scaling_policy:
+ name: "predefined-policy-1"
+ policy_type: TargetTrackingScaling
+ target_tracking_config:
+ predefined_metric_spec:
+ predefined_metric_type: ALBRequestCountPerTarget
+ resource_label: app/my-alb/778d41231d141a0f/targetgroup/my-alb-target-group/942f017f100becff
+ target_value: 98.0
+ asg_name: "asg-test-1"
+ register: result
+
+- name: create TargetTrackingScaling custom policy
+ ec2_scaling_policy:
+ name: "custom-policy-1"
+ policy_type: TargetTrackingScaling
+ target_tracking_config:
+ customized_metric_spec:
+ metric_name: metric_1
+ namespace: namespace_1
+ statistic: Minimum
+ unit: Gigabits
+ dimensions: [{'Name': 'dimension1', 'Value': 'value1'}]
+ disable_scalein: true
+ target_value: 98.0
+ asg_name: asg-test-1
+ register: result
+'''
+
+RETURN = '''
+adjustment_type:
+ description: Scaling policy adjustment type.
+ returned: always
+ type: str
+ sample: PercentChangeInCapacity
+alarms:
+ description: Cloudwatch alarms related to the policy.
+ returned: always
+ type: complex
+ contains:
+ alarm_name:
+ description: Name of the Cloudwatch alarm.
+ returned: always
+ type: str
+ sample: cpu-very-high
+ alarm_arn:
+ description: ARN of the Cloudwatch alarm.
+ returned: always
+ type: str
+ sample: arn:aws:cloudwatch:us-east-2:1234567890:alarm:cpu-very-high
+arn:
+ description: ARN of the scaling policy. Provided for backward compatibility, value is the same as I(policy_arn).
+ returned: always
+ type: str
+ sample: arn:aws:autoscaling:us-east-2:123456789012:scalingPolicy:59e37526-bd27-42cf-adca-5cd3d90bc3b9:autoScalingGroupName/app-asg:policyName/app-policy
+as_name:
+ description: Auto Scaling Group name. Provided for backward compatibility, value is the same as I(auto_scaling_group_name).
+ returned: always
+ type: str
+ sample: app-asg
+auto_scaling_group_name:
+ description: Name of Auto Scaling Group.
+ returned: always
+ type: str
+ sample: app-asg
+metric_aggregation_type:
+ description: Method used to aggregate metrics.
+ returned: when I(policy_type) is C(StepScaling)
+ type: str
+ sample: Maximum
+name:
+ description: Name of the scaling policy. Provided for backward compatibility, value is the same as I(policy_name).
+ returned: always
+ type: str
+ sample: app-policy
+policy_arn:
+ description: ARN of scaling policy.
+ returned: always
+ type: str
+ sample: arn:aws:autoscaling:us-east-2:123456789012:scalingPolicy:59e37526-bd27-42cf-adca-5cd3d90bc3b9:autoScalingGroupName/app-asg:policyName/app-policy
+policy_name:
+ description: Name of scaling policy.
+ returned: always
+ type: str
+ sample: app-policy
+policy_type:
+ description: Type of auto scaling policy.
+ returned: always
+ type: str
+ sample: StepScaling
+scaling_adjustment:
+ description: Adjustment to make when alarm is triggered.
+ returned: When I(policy_type) is C(SimpleScaling)
+ type: int
+ sample: 1
+step_adjustments:
+ description: List of step adjustments.
+ returned: always
+ type: complex
+ contains:
+ metric_interval_lower_bound:
+ description: Lower bound for metric interval.
+ returned: if step has a lower bound
+ type: float
+ sample: 20.0
+ metric_interval_upper_bound:
+ description: Upper bound for metric interval.
+ returned: if step has an upper bound
+ type: float
+ sample: 40.0
+ scaling_adjustment:
+ description: Adjustment to make if this step is reached.
+ returned: always
+ type: int
+ sample: 50
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by imported AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+
+def build_target_specification(target_tracking_config):
+
+ # Initialize an empty dict() for building TargetTrackingConfiguration policies,
+ # which will be returned
+ targetTrackingConfig = dict()
+
+ if target_tracking_config.get('target_value'):
+ targetTrackingConfig['TargetValue'] = target_tracking_config['target_value']
+
+ if target_tracking_config.get('disable_scalein'):
+ targetTrackingConfig['DisableScaleIn'] = target_tracking_config['disable_scalein']
+ else:
+ # Accounting for boto3 response
+ targetTrackingConfig['DisableScaleIn'] = False
+
+ if target_tracking_config['predefined_metric_spec'] is not None:
+ # Build spec for predefined_metric_spec
+ targetTrackingConfig['PredefinedMetricSpecification'] = dict()
+ if target_tracking_config['predefined_metric_spec'].get('predefined_metric_type'):
+ targetTrackingConfig['PredefinedMetricSpecification']['PredefinedMetricType'] = \
+ target_tracking_config['predefined_metric_spec']['predefined_metric_type']
+
+ if target_tracking_config['predefined_metric_spec'].get('resource_label'):
+ targetTrackingConfig['PredefinedMetricSpecification']['ResourceLabel'] = \
+ target_tracking_config['predefined_metric_spec']['resource_label']
+
+ elif target_tracking_config['customized_metric_spec'] is not None:
+ # Build spec for customized_metric_spec
+ targetTrackingConfig['CustomizedMetricSpecification'] = dict()
+ if target_tracking_config['customized_metric_spec'].get('metric_name'):
+ targetTrackingConfig['CustomizedMetricSpecification']['MetricName'] = \
+ target_tracking_config['customized_metric_spec']['metric_name']
+
+ if target_tracking_config['customized_metric_spec'].get('namespace'):
+ targetTrackingConfig['CustomizedMetricSpecification']['Namespace'] = \
+ target_tracking_config['customized_metric_spec']['namespace']
+
+ if target_tracking_config['customized_metric_spec'].get('dimensions'):
+ targetTrackingConfig['CustomizedMetricSpecification']['Dimensions'] = \
+ target_tracking_config['customized_metric_spec']['dimensions']
+
+ if target_tracking_config['customized_metric_spec'].get('statistic'):
+ targetTrackingConfig['CustomizedMetricSpecification']['Statistic'] = \
+ target_tracking_config['customized_metric_spec']['statistic']
+
+ if target_tracking_config['customized_metric_spec'].get('unit'):
+ targetTrackingConfig['CustomizedMetricSpecification']['Unit'] = \
+ target_tracking_config['customized_metric_spec']['unit']
+
+ return targetTrackingConfig
+
+
+def create_scaling_policy(connection, module):
+ changed = False
+ asg_name = module.params['asg_name']
+ policy_type = module.params['policy_type']
+ policy_name = module.params['name']
+
+ if policy_type == 'TargetTrackingScaling':
+ params = dict(PolicyName=policy_name,
+ PolicyType=policy_type,
+ AutoScalingGroupName=asg_name)
+ else:
+ params = dict(PolicyName=policy_name,
+ PolicyType=policy_type,
+ AutoScalingGroupName=asg_name,
+ AdjustmentType=module.params['adjustment_type'])
+
+ # min_adjustment_step attribute is only relevant if the adjustment_type
+ # is set to percentage change in capacity, so it is a special case
+ if module.params['adjustment_type'] == 'PercentChangeInCapacity':
+ if module.params['min_adjustment_step']:
+ params['MinAdjustmentMagnitude'] = module.params['min_adjustment_step']
+
+ if policy_type == 'SimpleScaling':
+ # can't use required_if because it doesn't allow multiple criteria -
+ # it's only required if policy is SimpleScaling and state is present
+ if not module.params['scaling_adjustment']:
+ module.fail_json(msg='scaling_adjustment is required when policy_type is SimpleScaling '
+ 'and state is present')
+ params['ScalingAdjustment'] = module.params['scaling_adjustment']
+ if module.params['cooldown']:
+ params['Cooldown'] = module.params['cooldown']
+
+ elif policy_type == 'StepScaling':
+ if not module.params['step_adjustments']:
+ module.fail_json(msg='step_adjustments is required when policy_type is StepScaling'
+ 'and state is present')
+ params['StepAdjustments'] = []
+ for step_adjustment in module.params['step_adjustments']:
+ step_adjust_params = dict(
+ ScalingAdjustment=step_adjustment['scaling_adjustment'])
+ if step_adjustment.get('lower_bound'):
+ step_adjust_params['MetricIntervalLowerBound'] = step_adjustment['lower_bound']
+ if step_adjustment.get('upper_bound'):
+ step_adjust_params['MetricIntervalUpperBound'] = step_adjustment['upper_bound']
+ params['StepAdjustments'].append(step_adjust_params)
+ if module.params['metric_aggregation']:
+ params['MetricAggregationType'] = module.params['metric_aggregation']
+ if module.params['estimated_instance_warmup']:
+ params['EstimatedInstanceWarmup'] = module.params['estimated_instance_warmup']
+
+ elif policy_type == 'TargetTrackingScaling':
+ if not module.params['target_tracking_config']:
+ module.fail_json(msg='target_tracking_config is required when policy_type is '
+ 'TargetTrackingScaling and state is present')
+ else:
+ params['TargetTrackingConfiguration'] = build_target_specification(module.params.get('target_tracking_config'))
+ if module.params['estimated_instance_warmup']:
+ params['EstimatedInstanceWarmup'] = module.params['estimated_instance_warmup']
+
+ # Ensure idempotency with policies
+ try:
+ policies = connection.describe_policies(aws_retry=True,
+ AutoScalingGroupName=asg_name,
+ PolicyNames=[policy_name])['ScalingPolicies']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(
+ e, msg="Failed to obtain autoscaling policy %s" % policy_name)
+
+ before = after = {}
+ if not policies:
+ changed = True
+ else:
+ policy = policies[0]
+ for key in params:
+ if params[key] != policy.get(key):
+ changed = True
+ before[key] = params[key]
+ after[key] = policy.get(key)
+
+ if changed:
+ try:
+ connection.put_scaling_policy(aws_retry=True, **params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to create autoscaling policy")
+
+ try:
+ policies = connection.describe_policies(aws_retry=True,
+ AutoScalingGroupName=asg_name,
+ PolicyNames=[policy_name])['ScalingPolicies']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(
+ e, msg="Failed to obtain autoscaling policy %s" % policy_name)
+
+ policy = camel_dict_to_snake_dict(policies[0])
+ # Backward compatible return values
+ policy['arn'] = policy['policy_arn']
+ policy['as_name'] = policy['auto_scaling_group_name']
+ policy['name'] = policy['policy_name']
+
+ if before and after:
+ module.exit_json(changed=changed, diff=dict(
+ before=before, after=after), **policy)
+ else:
+ module.exit_json(changed=changed, **policy)
+
+
+def delete_scaling_policy(connection, module):
+ policy_name = module.params.get('name')
+
+ try:
+ policy = connection.describe_policies(
+ aws_retry=True, PolicyNames=[policy_name])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(
+ e, msg="Failed to obtain autoscaling policy %s" % policy_name)
+
+ if policy['ScalingPolicies']:
+ try:
+ connection.delete_policy(aws_retry=True,
+ AutoScalingGroupName=policy['ScalingPolicies'][0]['AutoScalingGroupName'],
+ PolicyName=policy_name)
+ module.exit_json(changed=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to delete autoscaling policy")
+
+ module.exit_json(changed=False)
+
+
+def main():
+ step_adjustment_spec = dict(
+ lower_bound=dict(type='int'),
+ upper_bound=dict(type='int'),
+ scaling_adjustment=dict(type='int', required=True)
+ )
+
+ predefined_metric_spec = dict(
+ predefined_metric_type=dict(type='str', choices=['ASGAverageCPUUtilization',
+ 'ASGAverageNetworkIn',
+ 'ASGAverageNetworkOut',
+ 'ALBRequestCountPerTarget'], required=True),
+ resource_label=dict(type='str')
+ )
+ customized_metric_spec = dict(
+ metric_name=dict(type='str', required=True),
+ namespace=dict(type='str', required=True),
+ statistic=dict(type='str', required=True, choices=['Average', 'Minimum', 'Maximum', 'SampleCount', 'Sum']),
+ dimensions=dict(type='list', elements='dict'),
+ unit=dict(type='str')
+ )
+
+ target_tracking_spec = dict(
+ disable_scalein=dict(type='bool'),
+ target_value=dict(type='float', required=True),
+ predefined_metric_spec=dict(type='dict',
+ options=predefined_metric_spec),
+ customized_metric_spec=dict(type='dict',
+ options=customized_metric_spec)
+ )
+
+ argument_spec = dict(
+ name=dict(required=True),
+ adjustment_type=dict(choices=['ChangeInCapacity', 'ExactCapacity', 'PercentChangeInCapacity']),
+ asg_name=dict(),
+ scaling_adjustment=dict(type='int'),
+ min_adjustment_step=dict(type='int'),
+ cooldown=dict(type='int'),
+ state=dict(default='present', choices=['present', 'absent']),
+ metric_aggregation=dict(default='Average', choices=[
+ 'Minimum', 'Maximum', 'Average']),
+ policy_type=dict(default='SimpleScaling', choices=[
+ 'SimpleScaling', 'StepScaling', 'TargetTrackingScaling']),
+ target_tracking_config=dict(type='dict', options=target_tracking_spec),
+ step_adjustments=dict(
+ type='list', options=step_adjustment_spec, elements='dict'),
+ estimated_instance_warmup=dict(type='int')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[['state', 'present', ['asg_name']]])
+
+ connection = module.client(
+ 'autoscaling', retry_decorator=AWSRetry.jittered_backoff())
+ state = module.params.get('state')
+
+ if state == 'present':
+ create_scaling_policy(connection, module)
+ elif state == 'absent':
+ delete_scaling_policy(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_scheduled_action.py b/ansible_collections/community/aws/plugins/modules/autoscaling_scheduled_action.py
new file mode 100644
index 000000000..f1433c522
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/autoscaling_scheduled_action.py
@@ -0,0 +1,324 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Based off of https://github.com/mmochan/ansible-aws-ec2-asg-scheduled-actions/blob/master/library/ec2_asg_scheduled_action.py
+# (c) 2016, Mike Mochan <@mmochan>
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: autoscaling_scheduled_action
+version_added: 2.2.0
+short_description: Create, modify and delete ASG scheduled scaling actions
+description:
+ - The module will create a new scheduled action when I(state=present) and no given action is found.
+ - The module will update a new scheduled action when I(state=present) and the given action is found.
+ - The module will delete a new scheduled action when I(state=absent) and the given action is found.
+ - Prior to release 5.0.0 this module was called C(community.aws.ec2_asg_scheduled_action).
+ The usage did not change.
+options:
+ autoscaling_group_name:
+ description:
+ - The name of the autoscaling group to add a scheduled action to.
+ type: str
+ required: true
+ scheduled_action_name:
+ description:
+ - The name of the scheduled action.
+ type: str
+ required: true
+ start_time:
+ description:
+ - Start time for the action.
+ type: str
+ end_time:
+ description:
+ - End time for the action.
+ type: str
+ time_zone:
+ description:
+ - Time zone to run against.
+ type: str
+ recurrence:
+ description:
+ - Cron style schedule to repeat the action on.
+ - Required when I(state=present).
+ type: str
+ min_size:
+ description:
+ - ASG min capacity.
+ type: int
+ max_size:
+ description:
+ - ASG max capacity.
+ type: int
+ desired_capacity:
+ description:
+ - ASG desired capacity.
+ type: int
+ state:
+ description:
+ - Create / update or delete scheduled action.
+ type: str
+ required: false
+ default: present
+ choices: ['present', 'absent']
+author: Mark Woolley(@marknet15)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Create a scheduled action for a autoscaling group.
+- name: Create a minimal scheduled action for autoscaling group
+ community.aws.autoscaling_scheduled_action:
+ region: eu-west-1
+ autoscaling_group_name: test_asg
+ scheduled_action_name: test_scheduled_action
+ start_time: 2021 October 25 08:00 UTC
+ recurrence: 40 22 * * 1-5
+ desired_capacity: 10
+ state: present
+ register: scheduled_action
+
+- name: Create a scheduled action for autoscaling group
+ community.aws.autoscaling_scheduled_action:
+ region: eu-west-1
+ autoscaling_group_name: test_asg
+ scheduled_action_name: test_scheduled_action
+ start_time: 2021 October 25 08:00 UTC
+ end_time: 2021 October 25 08:00 UTC
+ time_zone: Europe/London
+ recurrence: 40 22 * * 1-5
+ min_size: 10
+ max_size: 15
+ desired_capacity: 10
+ state: present
+ register: scheduled_action
+
+- name: Delete scheduled action
+ community.aws.autoscaling_scheduled_action:
+ region: eu-west-1
+ autoscaling_group_name: test_asg
+ scheduled_action_name: test_scheduled_action
+ state: absent
+'''
+
+RETURN = r'''
+scheduled_action_name:
+ description: The name of the scheduled action.
+ returned: when I(state=present)
+ type: str
+ sample: test_scheduled_action
+start_time:
+ description: Start time for the action.
+ returned: when I(state=present)
+ type: str
+ sample: '2021 October 25 08:00 UTC'
+end_time:
+ description: End time for the action.
+ returned: when I(state=present)
+ type: str
+ sample: '2021 October 25 08:00 UTC'
+time_zone:
+ description: The ID of the Amazon Machine Image used by the launch configuration.
+ returned: when I(state=present)
+ type: str
+ sample: Europe/London
+recurrence:
+ description: Cron style schedule to repeat the action on.
+ returned: when I(state=present)
+ type: str
+ sample: '40 22 * * 1-5'
+min_size:
+ description: ASG min capacity.
+ returned: when I(state=present)
+ type: int
+ sample: 1
+max_size:
+ description: ASG max capacity.
+ returned: when I(state=present)
+ type: int
+ sample: 2
+desired_capacity:
+ description: ASG desired capacity.
+ returned: when I(state=present)
+ type: int
+ sample: 1
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+try:
+ from dateutil.parser import parse as timedate_parse
+ HAS_DATEUTIL = True
+except ImportError:
+ HAS_DATEUTIL = False
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+def format_request():
+ params = dict(
+ AutoScalingGroupName=module.params.get('autoscaling_group_name'),
+ ScheduledActionName=module.params.get('scheduled_action_name'),
+ Recurrence=module.params.get('recurrence')
+ )
+
+ # Some of these params are optional
+ if module.params.get('desired_capacity') is not None:
+ params['DesiredCapacity'] = module.params.get('desired_capacity')
+
+ if module.params.get('min_size') is not None:
+ params['MinSize'] = module.params.get('min_size')
+
+ if module.params.get('max_size') is not None:
+ params['MaxSize'] = module.params.get('max_size')
+
+ if module.params.get('time_zone') is not None:
+ params['TimeZone'] = module.params.get('time_zone')
+
+ if module.params.get('start_time') is not None:
+ params['StartTime'] = module.params.get('start_time')
+
+ if module.params.get('end_time') is not None:
+ params['EndTime'] = module.params.get('end_time')
+
+ return params
+
+
+def delete_scheduled_action(current_actions):
+ if current_actions == []:
+ return False
+
+ if module.check_mode:
+ return True
+
+ params = dict(
+ AutoScalingGroupName=module.params.get('autoscaling_group_name'),
+ ScheduledActionName=module.params.get('scheduled_action_name')
+ )
+
+ try:
+ client.delete_scheduled_action(aws_retry=True, **params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+ return True
+
+
+def get_scheduled_actions():
+ params = dict(
+ AutoScalingGroupName=module.params.get('autoscaling_group_name'),
+ ScheduledActionNames=[module.params.get('scheduled_action_name')]
+ )
+
+ try:
+ actions = client.describe_scheduled_actions(aws_retry=True, **params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+ current_actions = actions.get("ScheduledUpdateGroupActions")
+
+ return current_actions
+
+
+def put_scheduled_update_group_action(current_actions):
+ changed = False
+ changes = dict()
+ params = format_request()
+
+ if len(current_actions) < 1:
+ changed = True
+ else:
+ # To correctly detect changes convert the start_time & end_time to datetime object
+ if "StartTime" in params:
+ params["StartTime"] = timedate_parse(params["StartTime"])
+ if "EndTime" in params:
+ params["EndTime"] = timedate_parse(params["EndTime"])
+
+ for k, v in params.items():
+ if current_actions[0].get(k) != v:
+ changes[k] = v
+
+ if changes:
+ changed = True
+
+ if module.check_mode:
+ return changed
+
+ try:
+ client.put_scheduled_update_group_action(aws_retry=True, **params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+ return changed
+
+
+def main():
+ global module
+ global client
+
+ argument_spec = dict(
+ autoscaling_group_name=dict(required=True, type='str'),
+ scheduled_action_name=dict(required=True, type='str'),
+ start_time=dict(default=None, type='str'),
+ end_time=dict(default=None, type='str'),
+ time_zone=dict(default=None, type='str'),
+ recurrence=dict(type='str'),
+ min_size=dict(default=None, type='int'),
+ max_size=dict(default=None, type='int'),
+ desired_capacity=dict(default=None, type='int'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=[['state', 'present', ['recurrence']]],
+ supports_check_mode=True
+ )
+
+ if not HAS_DATEUTIL:
+ module.fail_json(msg='dateutil is required for this module')
+
+ if not module.botocore_at_least("1.20.24"):
+ module.fail_json(msg='botocore version >= 1.20.24 is required for this module')
+
+ client = module.client('autoscaling', retry_decorator=AWSRetry.jittered_backoff())
+ current_actions = get_scheduled_actions()
+ state = module.params.get('state')
+ results = dict()
+
+ if state == 'present':
+ changed = put_scheduled_update_group_action(current_actions)
+ if not module.check_mode:
+ updated_action = get_scheduled_actions()[0]
+ results = dict(
+ scheduled_action_name=updated_action.get('ScheduledActionName'),
+ start_time=updated_action.get('StartTime'),
+ end_time=updated_action.get('EndTime'),
+ time_zone=updated_action.get('TimeZone'),
+ recurrence=updated_action.get('Recurrence'),
+ min_size=updated_action.get('MinSize'),
+ max_size=updated_action.get('MaxSize'),
+ desired_capacity=updated_action.get('DesiredCapacity')
+ )
+ else:
+ changed = delete_scheduled_action(current_actions)
+
+ results['changed'] = changed
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/aws_region_info.py b/ansible_collections/community/aws/plugins/modules/aws_region_info.py
new file mode 100644
index 000000000..126455a8c
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/aws_region_info.py
@@ -0,0 +1,98 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: aws_region_info
+short_description: Gather information about AWS regions
+version_added: 1.0.0
+description:
+ - Gather information about AWS regions.
+author:
+ - 'Henrique Rodrigues (@Sodki)'
+options:
+ filters:
+ description:
+ - A dict of filters to apply.
+ - Each dict item consists of a filter key and a filter value.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRegions.html) for possible filters.
+ - Filter names and values are case sensitive.
+ - You can use underscores instead of dashes (-) in the filter keys.
+ - Filter keys with underscores will take precedence in case of conflict.
+ default: {}
+ type: dict
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all regions
+- community.aws.aws_region_info:
+
+# Gather information about a single region
+- community.aws.aws_region_info:
+ filters:
+ region-name: eu-west-1
+'''
+
+RETURN = '''
+regions:
+ returned: on success
+ description: >
+ Regions that match the provided filters. Each element consists of a dict with all the information related
+ to that region.
+ type: list
+ sample: "[{
+ 'endpoint': 'ec2.us-west-1.amazonaws.com',
+ 'region_name': 'us-west-1'
+ }]"
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(default={}, type='dict')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ # Replace filter key underscores with dashes, for compatibility
+ sanitized_filters = dict(module.params.get('filters'))
+ for k in module.params.get('filters').keys():
+ if "_" in k:
+ sanitized_filters[k.replace('_', '-')] = sanitized_filters[k]
+ del sanitized_filters[k]
+
+ try:
+ regions = connection.describe_regions(
+ aws_retry=True,
+ Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)
+ )
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to describe regions.")
+
+ module.exit_json(regions=[camel_dict_to_snake_dict(r) for r in regions['Regions']])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/batch_compute_environment.py b/ansible_collections/community/aws/plugins/modules/batch_compute_environment.py
new file mode 100644
index 000000000..555cfccbe
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/batch_compute_environment.py
@@ -0,0 +1,483 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Jon Meran <jonathan.meran@sonos.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: batch_compute_environment
+version_added: 1.0.0
+short_description: Manage AWS Batch Compute Environments
+description:
+ - This module allows the management of AWS Batch Compute Environments.
+ - It is idempotent and supports "Check" mode.
+ - Use module M(community.aws.batch_compute_environment) to manage the compute
+ environment, M(community.aws.batch_job_queue) to manage job queues, M(community.aws.batch_job_definition) to manage job definitions.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_batch_compute_environment).
+ The usage did not change.
+author:
+ - Jon Meran (@jonmer85)
+options:
+ compute_environment_name:
+ description:
+ - The name for your compute environment.
+ - Up to 128 letters (uppercase and lowercase), numbers, and underscores are allowed.
+ required: true
+ type: str
+ type:
+ description:
+ - The type of the compute environment.
+ required: true
+ choices: ["MANAGED", "UNMANAGED"]
+ type: str
+ state:
+ description:
+ - Describes the desired state.
+ default: "present"
+ choices: ["present", "absent"]
+ type: str
+ compute_environment_state:
+ description:
+ - The state of the compute environment.
+ - If the state is C(ENABLED), then the compute environment accepts jobs
+ from a queue and can scale out automatically based on queues.
+ default: "ENABLED"
+ choices: ["ENABLED", "DISABLED"]
+ type: str
+ service_role:
+ description:
+ - The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS
+ services on your behalf.
+ required: true
+ type: str
+ compute_resource_type:
+ description:
+ - The type of compute resource.
+ required: true
+ choices: ["EC2", "SPOT"]
+ type: str
+ minv_cpus:
+ description:
+ - The minimum number of EC2 vCPUs that an environment should maintain.
+ required: true
+ type: int
+ maxv_cpus:
+ description:
+ - The maximum number of EC2 vCPUs that an environment can reach.
+ required: true
+ type: int
+ desiredv_cpus:
+ description:
+ - The desired number of EC2 vCPUS in the compute environment.
+ type: int
+ instance_types:
+ description:
+ - The instance types that may be launched.
+ required: true
+ type: list
+ elements: str
+ image_id:
+ description:
+ - The Amazon Machine Image (AMI) ID used for instances launched in the compute environment.
+ type: str
+ subnets:
+ description:
+ - The VPC subnets into which the compute resources are launched.
+ required: true
+ type: list
+ elements: str
+ security_group_ids:
+ description:
+ - The EC2 security groups that are associated with instances launched in the compute environment.
+ required: true
+ type: list
+ elements: str
+ ec2_key_pair:
+ description:
+ - The EC2 key pair that is used for instances launched in the compute environment.
+ type: str
+ instance_role:
+ description:
+ - The Amazon ECS instance role applied to Amazon EC2 instances in a compute environment.
+ required: true
+ type: str
+ tags:
+ description:
+ - Key-value pair tags to be applied to resources that are launched in the compute environment.
+ type: dict
+ bid_percentage:
+ description:
+ - The minimum percentage that a Spot Instance price must be when compared with the On-Demand price for that
+ instance type before instances are launched.
+ - For example, if your bid percentage is 20%, then the Spot price
+ must be below 20% of the current On-Demand price for that EC2 instance.
+ type: int
+ spot_iam_fleet_role:
+ description:
+ - The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT compute environment.
+ type: str
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+- name: My Batch Compute Environment
+ community.aws.batch_compute_environment:
+ compute_environment_name: computeEnvironmentName
+ state: present
+ region: us-east-1
+ compute_environment_state: ENABLED
+ type: MANAGED
+ compute_resource_type: EC2
+ minv_cpus: 0
+ maxv_cpus: 2
+ desiredv_cpus: 1
+ instance_types:
+ - optimal
+ subnets:
+ - my-subnet1
+ - my-subnet2
+ security_group_ids:
+ - my-sg1
+ - my-sg2
+ instance_role: arn:aws:iam::<account>:instance-profile/<role>
+ tags:
+ tag1: value1
+ tag2: value2
+ service_role: arn:aws:iam::<account>:role/service-role/<role>
+ register: aws_batch_compute_environment_action
+
+- name: show results
+ ansible.builtin.debug:
+ var: aws_batch_compute_environment_action
+'''
+
+RETURN = r'''
+---
+output:
+ description: "returns what action was taken, whether something was changed, invocation and response"
+ returned: always
+ sample:
+ batch_compute_environment_action: none
+ changed: false
+ invocation:
+ module_args:
+ aws_access_key: ~
+ aws_secret_key: ~
+ bid_percentage: ~
+ compute_environment_name: <name>
+ compute_environment_state: ENABLED
+ compute_resource_type: EC2
+ desiredv_cpus: 0
+ ec2_key_pair: ~
+ ec2_url: ~
+ image_id: ~
+ instance_role: "arn:aws:iam::..."
+ instance_types:
+ - optimal
+ maxv_cpus: 8
+ minv_cpus: 0
+ profile: ~
+ region: us-east-1
+ security_group_ids:
+ - "*******"
+ security_token: ~
+ service_role: "arn:aws:iam::...."
+ spot_iam_fleet_role: ~
+ state: present
+ subnets:
+ - "******"
+ tags:
+ Environment: <name>
+ Name: <name>
+ type: MANAGED
+ validate_certs: true
+ response:
+ computeEnvironmentArn: "arn:aws:batch:...."
+ computeEnvironmentName: <name>
+ computeResources:
+ desiredvCpus: 0
+ instanceRole: "arn:aws:iam::..."
+ instanceTypes:
+ - optimal
+ maxvCpus: 8
+ minvCpus: 0
+ securityGroupIds:
+ - "******"
+ subnets:
+ - "*******"
+ tags:
+ Environment: <name>
+ Name: <name>
+ type: EC2
+ ecsClusterArn: "arn:aws:ecs:....."
+ serviceRole: "arn:aws:iam::..."
+ state: ENABLED
+ status: VALID
+ statusReason: "ComputeEnvironment Healthy"
+ type: MANAGED
+ type: dict
+'''
+
+import re
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+
+# ---------------------------------------------------------------------------------------------------
+#
+# Helper Functions & classes
+#
+# ---------------------------------------------------------------------------------------------------
+
+def set_api_params(module, module_params):
+ """
+ Sets module parameters to those expected by the boto3 API.
+
+ :param module:
+ :param module_params:
+ :return:
+ """
+ api_params = dict((k, v) for k, v in dict(module.params).items() if k in module_params and v is not None)
+ return snake_dict_to_camel_dict(api_params)
+
+
+def validate_params(module):
+ """
+ Performs basic parameter validation.
+
+ :param module:
+ :return:
+ """
+
+ compute_environment_name = module.params['compute_environment_name']
+
+ # validate compute environment name
+ if not re.search(r'^[\w\_:]+$', compute_environment_name):
+ module.fail_json(
+ msg="Function compute_environment_name {0} is invalid. Names must contain only alphanumeric characters "
+ "and underscores.".format(compute_environment_name)
+ )
+ if not compute_environment_name.startswith('arn:aws:batch:'):
+ if len(compute_environment_name) > 128:
+ module.fail_json(msg='compute_environment_name "{0}" exceeds 128 character limit'
+ .format(compute_environment_name))
+
+ return
+
+
+# ---------------------------------------------------------------------------------------------------
+#
+# Batch Compute Environment functions
+#
+# ---------------------------------------------------------------------------------------------------
+
+def get_current_compute_environment(module, client):
+ try:
+ environments = client.describe_compute_environments(
+ computeEnvironments=[module.params['compute_environment_name']]
+ )
+ if len(environments['computeEnvironments']) > 0:
+ return environments['computeEnvironments'][0]
+ else:
+ return None
+ except ClientError:
+ return None
+
+
+def create_compute_environment(module, client):
+ """
+ Adds a Batch compute environment
+
+ :param module:
+ :param client:
+ :return:
+ """
+
+ changed = False
+
+ # set API parameters
+ params = (
+ 'compute_environment_name', 'type', 'service_role')
+ api_params = set_api_params(module, params)
+
+ if module.params['compute_environment_state'] is not None:
+ api_params['state'] = module.params['compute_environment_state']
+
+ compute_resources_param_list = ('minv_cpus', 'maxv_cpus', 'desiredv_cpus', 'instance_types', 'image_id', 'subnets',
+ 'security_group_ids', 'ec2_key_pair', 'instance_role', 'tags', 'bid_percentage',
+ 'spot_iam_fleet_role')
+ compute_resources_params = set_api_params(module, compute_resources_param_list)
+
+ if module.params['compute_resource_type'] is not None:
+ compute_resources_params['type'] = module.params['compute_resource_type']
+
+ # if module.params['minv_cpus'] is not None:
+ # compute_resources_params['minvCpus'] = module.params['minv_cpus']
+
+ api_params['computeResources'] = compute_resources_params
+
+ try:
+ if not module.check_mode:
+ client.create_compute_environment(**api_params)
+ changed = True
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Error creating compute environment')
+
+ return changed
+
+
+def remove_compute_environment(module, client):
+ """
+ Remove a Batch compute environment
+
+ :param module:
+ :param client:
+ :return:
+ """
+
+ changed = False
+
+ # set API parameters
+ api_params = {'computeEnvironment': module.params['compute_environment_name']}
+
+ try:
+ if not module.check_mode:
+ client.delete_compute_environment(**api_params)
+ changed = True
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Error removing compute environment')
+ return changed
+
+
+def manage_state(module, client):
+ changed = False
+ current_state = 'absent'
+ state = module.params['state']
+ compute_environment_state = module.params['compute_environment_state']
+ compute_environment_name = module.params['compute_environment_name']
+ service_role = module.params['service_role']
+ minv_cpus = module.params['minv_cpus']
+ maxv_cpus = module.params['maxv_cpus']
+ desiredv_cpus = module.params['desiredv_cpus']
+ action_taken = 'none'
+ update_env_response = ''
+
+ check_mode = module.check_mode
+
+ # check if the compute environment exists
+ current_compute_environment = get_current_compute_environment(module, client)
+ response = current_compute_environment
+ if current_compute_environment:
+ current_state = 'present'
+
+ if state == 'present':
+ if current_state == 'present':
+ updates = False
+ # Update Batch Compute Environment configuration
+ compute_kwargs = {'computeEnvironment': compute_environment_name}
+
+ # Update configuration if needed
+ compute_resources = {}
+ if compute_environment_state and current_compute_environment['state'] != compute_environment_state:
+ compute_kwargs.update({'state': compute_environment_state})
+ updates = True
+ if service_role and current_compute_environment['serviceRole'] != service_role:
+ compute_kwargs.update({'serviceRole': service_role})
+ updates = True
+ if minv_cpus is not None and current_compute_environment['computeResources']['minvCpus'] != minv_cpus:
+ compute_resources['minvCpus'] = minv_cpus
+ if maxv_cpus is not None and current_compute_environment['computeResources']['maxvCpus'] != maxv_cpus:
+ compute_resources['maxvCpus'] = maxv_cpus
+ if desiredv_cpus is not None and current_compute_environment['computeResources']['desiredvCpus'] != desiredv_cpus:
+ compute_resources['desiredvCpus'] = desiredv_cpus
+ if len(compute_resources) > 0:
+ compute_kwargs['computeResources'] = compute_resources
+ updates = True
+ if updates:
+ try:
+ if not check_mode:
+ update_env_response = client.update_compute_environment(**compute_kwargs)
+ if not update_env_response:
+ module.fail_json(msg='Unable to get compute environment information after creating')
+ changed = True
+ action_taken = "updated"
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to update environment.")
+
+ else:
+ # Create Batch Compute Environment
+ changed = create_compute_environment(module, client)
+ # Describe compute environment
+ action_taken = 'added'
+ response = get_current_compute_environment(module, client)
+ if not response:
+ module.fail_json(msg='Unable to get compute environment information after creating')
+ else:
+ if current_state == 'present':
+ # remove the compute environment
+ changed = remove_compute_environment(module, client)
+ action_taken = 'deleted'
+ return dict(changed=changed, batch_compute_environment_action=action_taken, response=response)
+
+
+# ---------------------------------------------------------------------------------------------------
+#
+# MAIN
+#
+# ---------------------------------------------------------------------------------------------------
+
+def main():
+ """
+ Main entry point.
+
+ :return dict: changed, batch_compute_environment_action, response
+ """
+
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ compute_environment_name=dict(required=True),
+ type=dict(required=True, choices=['MANAGED', 'UNMANAGED']),
+ compute_environment_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']),
+ service_role=dict(required=True),
+ compute_resource_type=dict(required=True, choices=['EC2', 'SPOT']),
+ minv_cpus=dict(type='int', required=True),
+ maxv_cpus=dict(type='int', required=True),
+ desiredv_cpus=dict(type='int'),
+ instance_types=dict(type='list', required=True, elements='str'),
+ image_id=dict(),
+ subnets=dict(type='list', required=True, elements='str'),
+ security_group_ids=dict(type='list', required=True, elements='str'),
+ ec2_key_pair=dict(no_log=False),
+ instance_role=dict(required=True),
+ tags=dict(type='dict'),
+ bid_percentage=dict(type='int'),
+ spot_iam_fleet_role=dict(),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = module.client('batch')
+
+ validate_params(module)
+
+ results = manage_state(module, client)
+
+ module.exit_json(**camel_dict_to_snake_dict(results, ignore_list=['Tags']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/batch_job_definition.py b/ansible_collections/community/aws/plugins/modules/batch_job_definition.py
new file mode 100644
index 000000000..79ace0534
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/batch_job_definition.py
@@ -0,0 +1,458 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Jon Meran <jonathan.meran@sonos.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: batch_job_definition
+version_added: 1.0.0
+short_description: Manage AWS Batch Job Definitions
+description:
+ - This module allows the management of AWS Batch Job Definitions.
+ - It is idempotent and supports "Check" mode.
+ - Use module M(community.aws.batch_compute_environment) to manage the compute
+ environment, M(community.aws.batch_job_queue) to manage job queues, M(community.aws.batch_job_definition) to manage job definitions.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_batch_job_definition).
+ The usage did not change.
+author:
+ - Jon Meran (@jonmer85)
+options:
+ job_definition_arn:
+ description:
+ - The ARN for the job definition.
+ type: str
+ job_definition_name:
+ description:
+ - The name for the job definition.
+ required: true
+ type: str
+ state:
+ description:
+ - Describes the desired state.
+ default: "present"
+ choices: ["present", "absent"]
+ type: str
+ type:
+ description:
+ - The type of job definition.
+ required: true
+ type: str
+ parameters:
+ description:
+ - Default parameter substitution placeholders to set in the job definition. Parameters are specified as a
+ key-value pair mapping. Parameters in a SubmitJob request override any corresponding parameter defaults from
+ the job definition.
+ type: dict
+ image:
+ description:
+ - >
+ The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker
+ Hub registry are available by default. Other repositories are specified with C(repository-url/image-name:tag).
+ Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes,
+ and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker
+ Remote API and the IMAGE parameter of docker run.
+ required: true
+ type: str
+ vcpus:
+ description:
+ - The number of vCPUs reserved for the container. This parameter maps to CpuShares in the Create a container
+ section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to
+ 1,024 CPU shares.
+ required: true
+ type: int
+ memory:
+ description:
+ - The hard limit (in MiB) of memory to present to the container. If your container attempts to exceed the memory
+ specified here, the container is killed. This parameter maps to Memory in the Create a container section of the
+ Docker Remote API and the --memory option to docker run.
+ required: true
+ type: int
+ command:
+ description:
+ - The command that is passed to the container. This parameter maps to Cmd in the Create a container section of
+ the Docker Remote API and the COMMAND parameter to docker run. For more information,
+ see U(https://docs.docker.com/engine/reference/builder/#cmd).
+ type: list
+ elements: str
+ default: []
+ job_role_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the IAM role that the container can assume for AWS permissions.
+ type: str
+ volumes:
+ description:
+ - A list of data volumes used in a job.
+ suboptions:
+ host:
+ description:
+ - The contents of the host parameter determine whether your data volume persists on the host container
+ instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host
+ path for your data volume, but the data is not guaranteed to persist after the containers associated with
+ it stop running.
+ This is a dictionary with one property, sourcePath - The path on the host container
+ instance that is presented to the container. If this parameter is empty,then the Docker daemon has assigned
+ a host path for you. If the host parameter contains a sourcePath file location, then the data volume
+ persists at the specified location on the host container instance until you delete it manually. If the
+ sourcePath value does not exist on the host container instance, the Docker daemon creates it. If the
+ location does exist, the contents of the source path folder are exported.
+ name:
+ description:
+ - The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are
+ allowed. This name is referenced in the sourceVolume parameter of container definition mountPoints.
+ type: list
+ elements: dict
+ default: []
+ environment:
+ description:
+ - The environment variables to pass to a container. This parameter maps to Env in the Create a container section
+ of the Docker Remote API and the --env option to docker run.
+ suboptions:
+ name:
+ description:
+ - The name of the key value pair. For environment variables, this is the name of the environment variable.
+ value:
+ description:
+ - The value of the key value pair. For environment variables, this is the value of the environment variable.
+ type: list
+ elements: dict
+ default: []
+ mount_points:
+ description:
+ - The mount points for data volumes in your container. This parameter maps to Volumes in the Create a container
+ section of the Docker Remote API and the --volume option to docker run.
+ suboptions:
+ containerPath:
+ description:
+ - The path on the container at which to mount the host volume.
+ readOnly:
+ description:
+ - If this value is true , the container has read-only access to the volume; otherwise, the container can write
+ to the volume. The default value is C(false).
+ sourceVolume:
+ description:
+ - The name of the volume to mount.
+ type: list
+ elements: dict
+ default: []
+ readonly_root_filesystem:
+ description:
+ - When this parameter is true, the container is given read-only access to its root file system. This parameter
+ maps to ReadonlyRootfs in the Create a container section of the Docker Remote API and the --read-only option
+ to docker run.
+ type: str
+ privileged:
+ description:
+ - When this parameter is true, the container is given elevated privileges on the host container instance
+ (similar to the root user). This parameter maps to Privileged in the Create a container section of the
+ Docker Remote API and the --privileged option to docker run.
+ type: str
+ ulimits:
+ description:
+ - A list of ulimits to set in the container. This parameter maps to Ulimits in the Create a container section
+ of the Docker Remote API and the --ulimit option to docker run.
+ suboptions:
+ hardLimit:
+ description:
+ - The hard limit for the ulimit type.
+ name:
+ description:
+ - The type of the ulimit.
+ softLimit:
+ description:
+ - The soft limit for the ulimit type.
+ type: list
+ elements: dict
+ default: []
+ user:
+ description:
+ - The user name to use inside the container. This parameter maps to User in the Create a container section of
+ the Docker Remote API and the --user option to docker run.
+ type: str
+ attempts:
+ description:
+ - Retry strategy - The number of times to move a job to the RUNNABLE status. You may specify between 1 and 10
+ attempts. If attempts is greater than one, the job is retried if it fails until it has moved to RUNNABLE that
+ many times.
+ type: int
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+---
+- name: My Batch Job Definition
+ community.aws.batch_job_definition:
+ job_definition_name: My Batch Job Definition
+ state: present
+ type: container
+ parameters:
+ Param1: Val1
+ Param2: Val2
+ image: <Docker Image URL>
+ vcpus: 1
+ memory: 512
+ command:
+ - python
+ - run_my_script.py
+ - arg1
+ job_role_arn: <Job Role ARN>
+ attempts: 3
+ register: job_definition_create_result
+
+- name: show results
+ ansible.builtin.debug: var=job_definition_create_result
+'''
+
+RETURN = r'''
+---
+output:
+ description: "returns what action was taken, whether something was changed, invocation and response"
+ returned: always
+ sample:
+ aws_batch_job_definition_action: none
+ changed: false
+ response:
+ job_definition_arn: "arn:aws:batch:...."
+ job_definition_name: <name>
+ status: INACTIVE
+ type: container
+ type: dict
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.batch import cc, set_api_params
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+
+# ---------------------------------------------------------------------------------------------------
+#
+# Helper Functions & classes
+#
+# ---------------------------------------------------------------------------------------------------
+
+# logger = logging.getLogger()
+# logging.basicConfig(filename='ansible_debug.log')
+# logger.setLevel(logging.DEBUG)
+
+
+def validate_params(module, batch_client):
+ """
+ Performs basic parameter validation.
+
+ :param module:
+ :param batch_client:
+ :return:
+ """
+ return
+
+
+# ---------------------------------------------------------------------------------------------------
+#
+# Batch Job Definition functions
+#
+# ---------------------------------------------------------------------------------------------------
+
+def get_current_job_definition(module, batch_client):
+ try:
+ environments = batch_client.describe_job_definitions(
+ jobDefinitionName=module.params['job_definition_name']
+ )
+ if len(environments['jobDefinitions']) > 0:
+ latest_revision = max(map(lambda d: d['revision'], environments['jobDefinitions']))
+ latest_definition = next((x for x in environments['jobDefinitions'] if x['revision'] == latest_revision),
+ None)
+ return latest_definition
+ return None
+ except ClientError:
+ return None
+
+
+def create_job_definition(module, batch_client):
+ """
+ Adds a Batch job definition
+
+ :param module:
+ :param batch_client:
+ :return:
+ """
+
+ changed = False
+
+ # set API parameters
+ api_params = set_api_params(module, get_base_params())
+ container_properties_params = set_api_params(module, get_container_property_params())
+ retry_strategy_params = set_api_params(module, get_retry_strategy_params())
+
+ api_params['retryStrategy'] = retry_strategy_params
+ api_params['containerProperties'] = container_properties_params
+
+ try:
+ if not module.check_mode:
+ batch_client.register_job_definition(**api_params)
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Error registering job definition')
+
+ return changed
+
+
+def get_retry_strategy_params():
+ return ('attempts',)
+
+
+def get_container_property_params():
+ return ('image', 'vcpus', 'memory', 'command', 'job_role_arn', 'volumes', 'environment', 'mount_points',
+ 'readonly_root_filesystem', 'privileged', 'ulimits', 'user')
+
+
+def get_base_params():
+ return 'job_definition_name', 'type', 'parameters'
+
+
+def get_compute_environment_order_list(module):
+ compute_environment_order_list = []
+ for ceo in module.params['compute_environment_order']:
+ compute_environment_order_list.append(dict(order=ceo['order'], computeEnvironment=ceo['compute_environment']))
+ return compute_environment_order_list
+
+
+def remove_job_definition(module, batch_client):
+ """
+ Remove a Batch job definition
+
+ :param module:
+ :param batch_client:
+ :return:
+ """
+
+ changed = False
+
+ try:
+ if not module.check_mode:
+ batch_client.deregister_job_definition(jobDefinition=module.params['job_definition_arn'])
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Error removing job definition')
+ return changed
+
+
+def job_definition_equal(module, current_definition):
+ equal = True
+
+ for param in get_base_params():
+ if module.params.get(param) != current_definition.get(cc(param)):
+ equal = False
+ break
+
+ for param in get_container_property_params():
+ if module.params.get(param) != current_definition.get('containerProperties').get(cc(param)):
+ equal = False
+ break
+
+ for param in get_retry_strategy_params():
+ if module.params.get(param) != current_definition.get('retryStrategy').get(cc(param)):
+ equal = False
+ break
+
+ return equal
+
+
+def manage_state(module, batch_client):
+ changed = False
+ current_state = 'absent'
+ state = module.params['state']
+ job_definition_name = module.params['job_definition_name']
+ action_taken = 'none'
+ response = None
+
+ check_mode = module.check_mode
+
+ # check if the job definition exists
+ current_job_definition = get_current_job_definition(module, batch_client)
+ if current_job_definition:
+ current_state = 'present'
+
+ if state == 'present':
+ if current_state == 'present':
+ # check if definition has changed and register a new version if necessary
+ if not job_definition_equal(module, current_job_definition):
+ create_job_definition(module, batch_client)
+ action_taken = 'updated with new version'
+ changed = True
+ else:
+ # Create Job definition
+ changed = create_job_definition(module, batch_client)
+ action_taken = 'added'
+
+ response = get_current_job_definition(module, batch_client)
+ if not response:
+ module.fail_json(msg='Unable to get job definition information after creating/updating')
+ else:
+ if current_state == 'present':
+ # remove the Job definition
+ changed = remove_job_definition(module, batch_client)
+ action_taken = 'deregistered'
+ return dict(changed=changed, batch_job_definition_action=action_taken, response=response)
+
+
+# ---------------------------------------------------------------------------------------------------
+#
+# MAIN
+#
+# ---------------------------------------------------------------------------------------------------
+
+def main():
+ """
+ Main entry point.
+
+ :return dict: ansible facts
+ """
+
+ argument_spec = dict(
+ state=dict(required=False, default='present', choices=['present', 'absent']),
+ job_definition_name=dict(required=True),
+ job_definition_arn=dict(),
+ type=dict(required=True),
+ parameters=dict(type='dict'),
+ image=dict(required=True),
+ vcpus=dict(type='int', required=True),
+ memory=dict(type='int', required=True),
+ command=dict(type='list', default=[], elements='str'),
+ job_role_arn=dict(),
+ volumes=dict(type='list', default=[], elements='dict'),
+ environment=dict(type='list', default=[], elements='dict'),
+ mount_points=dict(type='list', default=[], elements='dict'),
+ readonly_root_filesystem=dict(),
+ privileged=dict(),
+ ulimits=dict(type='list', default=[], elements='dict'),
+ user=dict(),
+ attempts=dict(type='int')
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ batch_client = module.client('batch')
+
+ validate_params(module, batch_client)
+
+ results = manage_state(module, batch_client)
+
+ module.exit_json(**camel_dict_to_snake_dict(results))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/batch_job_queue.py b/ansible_collections/community/aws/plugins/modules/batch_job_queue.py
new file mode 100644
index 000000000..ef48896a4
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/batch_job_queue.py
@@ -0,0 +1,308 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Jon Meran <jonathan.meran@sonos.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: batch_job_queue
+version_added: 1.0.0
+short_description: Manage AWS Batch Job Queues
+description:
+ - This module allows the management of AWS Batch Job Queues.
+ - It is idempotent and supports "Check" mode.
+ - Use module M(community.aws.batch_compute_environment) to manage the compute
+ environment, M(community.aws.batch_job_queue) to manage job queues, M(community.aws.batch_job_definition) to manage job definitions.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_batch_job_queue).
+ The usage did not change.
+author:
+ - Jon Meran (@jonmer85)
+options:
+ job_queue_name:
+ description:
+ - The name for the job queue.
+ required: true
+ type: str
+ state:
+ description:
+ - Describes the desired state.
+ default: "present"
+ choices: ["present", "absent"]
+ type: str
+ job_queue_state:
+ description:
+ - The state of the job queue. If the job queue state is ENABLED, it is able to accept jobs.
+ default: "ENABLED"
+ choices: ["ENABLED", "DISABLED"]
+ type: str
+ priority:
+ description:
+ - The priority of the job queue. Job queues with a higher priority (or a lower integer value for the priority
+ parameter) are evaluated first when associated with same compute environment. Priority is determined in
+ ascending order, for example, a job queue with a priority value of 1 is given scheduling preference over a job
+ queue with a priority value of 10.
+ required: true
+ type: int
+ compute_environment_order:
+ description:
+ - The set of compute environments mapped to a job queue and their order relative to each other. The job
+ scheduler uses this parameter to determine which compute environment should execute a given job. Compute
+ environments must be in the VALID state before you can associate them with a job queue. You can associate up to
+ 3 compute environments with a job queue.
+ required: true
+ type: list
+ elements: dict
+ suboptions:
+ order:
+ type: int
+ description: The relative priority of the environment.
+ compute_environment:
+ type: str
+ description: The name of the compute environment.
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+- name: My Batch Job Queue
+ community.aws.batch_job_queue:
+ job_queue_name: jobQueueName
+ state: present
+ region: us-east-1
+ job_queue_state: ENABLED
+ priority: 1
+ compute_environment_order:
+ - order: 1
+ compute_environment: my_compute_env1
+ - order: 2
+ compute_environment: my_compute_env2
+ register: batch_job_queue_action
+
+- name: show results
+ ansible.builtin.debug:
+ var: batch_job_queue_action
+'''
+
+RETURN = r'''
+---
+output:
+ description: "returns what action was taken, whether something was changed, invocation and response"
+ returned: always
+ sample:
+ batch_job_queue_action: updated
+ changed: false
+ response:
+ job_queue_arn: "arn:aws:batch:...."
+ job_queue_name: <name>
+ priority: 1
+ state: DISABLED
+ status: UPDATING
+ status_reason: "JobQueue Healthy"
+ type: dict
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.batch import set_api_params
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+# ---------------------------------------------------------------------------------------------------
+#
+# Helper Functions & classes
+#
+# ---------------------------------------------------------------------------------------------------
+
+
+def validate_params(module):
+ """
+ Performs basic parameter validation.
+
+ :param module:
+ """
+ return
+
+
+# ---------------------------------------------------------------------------------------------------
+#
+# Batch Job Queue functions
+#
+# ---------------------------------------------------------------------------------------------------
+
+def get_current_job_queue(module, client):
+ try:
+ environments = client.describe_job_queues(
+ jobQueues=[module.params['job_queue_name']]
+ )
+ return environments['jobQueues'][0] if len(environments['jobQueues']) > 0 else None
+ except ClientError:
+ return None
+
+
+def create_job_queue(module, client):
+ """
+ Adds a Batch job queue
+
+ :param module:
+ :param client:
+ :return:
+ """
+
+ changed = False
+
+ # set API parameters
+ params = ('job_queue_name', 'priority')
+ api_params = set_api_params(module, params)
+
+ if module.params['job_queue_state'] is not None:
+ api_params['state'] = module.params['job_queue_state']
+
+ api_params['computeEnvironmentOrder'] = get_compute_environment_order_list(module)
+
+ try:
+ if not module.check_mode:
+ client.create_job_queue(**api_params)
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Error creating compute environment')
+
+ return changed
+
+
+def get_compute_environment_order_list(module):
+ compute_environment_order_list = []
+ for ceo in module.params['compute_environment_order']:
+ compute_environment_order_list.append(dict(order=ceo['order'], computeEnvironment=ceo['compute_environment']))
+ return compute_environment_order_list
+
+
+def remove_job_queue(module, client):
+ """
+ Remove a Batch job queue
+
+ :param module:
+ :param client:
+ :return:
+ """
+
+ changed = False
+
+ # set API parameters
+ api_params = {'jobQueue': module.params['job_queue_name']}
+
+ try:
+ if not module.check_mode:
+ client.delete_job_queue(**api_params)
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Error removing job queue')
+ return changed
+
+
+def manage_state(module, client):
+ changed = False
+ current_state = 'absent'
+ state = module.params['state']
+ job_queue_state = module.params['job_queue_state']
+ job_queue_name = module.params['job_queue_name']
+ priority = module.params['priority']
+ action_taken = 'none'
+ response = None
+
+ check_mode = module.check_mode
+
+ # check if the job queue exists
+ current_job_queue = get_current_job_queue(module, client)
+ if current_job_queue:
+ current_state = 'present'
+
+ if state == 'present':
+ if current_state == 'present':
+ updates = False
+ # Update Batch Job Queue configuration
+ job_kwargs = {'jobQueue': job_queue_name}
+
+ # Update configuration if needed
+ if job_queue_state and current_job_queue['state'] != job_queue_state:
+ job_kwargs.update({'state': job_queue_state})
+ updates = True
+ if priority is not None and current_job_queue['priority'] != priority:
+ job_kwargs.update({'priority': priority})
+ updates = True
+
+ new_compute_environment_order_list = get_compute_environment_order_list(module)
+ if new_compute_environment_order_list != current_job_queue['computeEnvironmentOrder']:
+ job_kwargs['computeEnvironmentOrder'] = new_compute_environment_order_list
+ updates = True
+
+ if updates:
+ try:
+ if not check_mode:
+ client.update_job_queue(**job_kwargs)
+ changed = True
+ action_taken = "updated"
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to update job queue")
+
+ else:
+ # Create Job Queue
+ changed = create_job_queue(module, client)
+ action_taken = 'added'
+
+ # Describe job queue
+ response = get_current_job_queue(module, client)
+ if not response:
+ module.fail_json(msg='Unable to get job queue information after creating/updating')
+ else:
+ if current_state == 'present':
+ # remove the Job Queue
+ changed = remove_job_queue(module, client)
+ action_taken = 'deleted'
+ return dict(changed=changed, batch_job_queue_action=action_taken, response=response)
+
+
+# ---------------------------------------------------------------------------------------------------
+#
+# MAIN
+#
+# ---------------------------------------------------------------------------------------------------
+
+def main():
+ """
+ Main entry point.
+
+ :return dict: changed, batch_job_queue_action, response
+ """
+
+ argument_spec = dict(
+ state=dict(required=False, default='present', choices=['present', 'absent']),
+ job_queue_name=dict(required=True),
+ job_queue_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']),
+ priority=dict(type='int', required=True),
+ compute_environment_order=dict(type='list', required=True, elements='dict'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = module.client('batch')
+
+ validate_params(module)
+
+ results = manage_state(module, client)
+
+ module.exit_json(**camel_dict_to_snake_dict(results))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/cloudformation_exports_info.py b/ansible_collections/community/aws/plugins/modules/cloudformation_exports_info.py
new file mode 100644
index 000000000..f7e71e2f8
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/cloudformation_exports_info.py
@@ -0,0 +1,84 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: cloudformation_exports_info
+short_description: Read a value from CloudFormation Exports
+version_added: 1.0.0
+description:
+ - Module retrieves a value from CloudFormation Exports
+author:
+ - "Michael Moyle (@mmoyle)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+- name: Get Exports
+ community.aws.cloudformation_exports_info:
+ profile: 'my_aws_profile'
+ region: 'my_region'
+ register: cf_exports
+- ansible.builtin.debug:
+ msg: "{{ cf_exports }}"
+'''
+
+RETURN = '''
+export_items:
+ description: A dictionary of Exports items names and values.
+ returned: Always
+ type: dict
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+try:
+ from botocore.exceptions import ClientError
+ from botocore.exceptions import BotoCoreError
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+
+@AWSRetry.exponential_backoff()
+def list_exports(cloudformation_client):
+ '''Get Exports Names and Values and return in dictionary '''
+ list_exports_paginator = cloudformation_client.get_paginator('list_exports')
+ exports = list_exports_paginator.paginate().build_full_result()['Exports']
+ export_items = dict()
+
+ for item in exports:
+ export_items[item['Name']] = item['Value']
+
+ return export_items
+
+
+def main():
+ argument_spec = dict()
+ result = dict(
+ changed=False,
+ original_message=''
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ cloudformation_client = module.client('cloudformation')
+
+ try:
+ result['export_items'] = list_exports(cloudformation_client)
+
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e)
+
+ result.update()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/cloudformation_stack_set.py b/ansible_collections/community/aws/plugins/modules/cloudformation_stack_set.py
new file mode 100644
index 000000000..c6771db5e
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/cloudformation_stack_set.py
@@ -0,0 +1,753 @@
+#!/usr/bin/python
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: cloudformation_stack_set
+version_added: 1.0.0
+short_description: Manage groups of CloudFormation stacks
+description:
+ - Launches/updates/deletes AWS CloudFormation Stack Sets.
+notes:
+ - To make an individual stack, you want the M(amazon.aws.cloudformation) module.
+options:
+ name:
+ description:
+ - Name of the CloudFormation stack set.
+ required: true
+ type: str
+ description:
+ description:
+ - A description of what this stack set creates.
+ type: str
+ parameters:
+ description:
+ - A list of hashes of all the template variables for the stack. The value can be a string or a dict.
+ - Dict can be used to set additional template parameter attributes like UsePreviousValue (see example).
+ default: {}
+ type: dict
+ state:
+ description:
+ - If I(state=present), stack will be created. If I(state=present) and if stack exists and template has changed, it will be updated.
+ If I(state=absent), stack will be removed.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ template:
+ description:
+ - The local path of the CloudFormation template.
+ - This must be the full path to the file, relative to the working directory. If using roles this may look
+ like C(roles/cloudformation/files/cloudformation-example.json).
+ - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
+ must be specified (but only one of them).
+ - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url)
+ are specified, the previous template will be reused.
+ type: path
+ template_body:
+ description:
+ - Template body. Use this to pass in the actual body of the CloudFormation template.
+ - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
+ must be specified (but only one of them).
+ - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url)
+ are specified, the previous template will be reused.
+ type: str
+ template_url:
+ description:
+ - Location of file containing the template body.
+ - The URL must point to a template (max size 307,200 bytes) located in an S3 bucket in the same region
+ as the stack.
+ - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
+ must be specified (but only one of them).
+ - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url)
+ are specified, the previous template will be reused.
+ type: str
+ purge_stacks:
+ description:
+ - Only applicable when I(state=absent). Sets whether, when deleting a stack set, the stack instances should also be deleted.
+ - By default, instances will be deleted. To keep stacks when stack set is deleted set I(purge_stacks=false).
+ type: bool
+ default: true
+ wait:
+ description:
+ - Whether or not to wait for stack operation to complete. This includes waiting for stack instances to reach UPDATE_COMPLETE status.
+ - If you choose not to wait, this module will not notify when stack operations fail because it will not wait for them to finish.
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - How long to wait (in seconds) for stacks to complete create/update/delete operations.
+ default: 900
+ type: int
+ capabilities:
+ description:
+ - Capabilities allow stacks to create and modify IAM resources, which may include adding users or roles.
+ - Currently the only available values are 'CAPABILITY_IAM' and 'CAPABILITY_NAMED_IAM'. Either or both may be provided.
+ - >
+ The following resources require that one or both of these parameters is specified: AWS::IAM::AccessKey,
+ AWS::IAM::Group, AWS::IAM::InstanceProfile, AWS::IAM::Policy, AWS::IAM::Role, AWS::IAM::User, AWS::IAM::UserToGroupAddition
+ type: list
+ elements: str
+ choices:
+ - 'CAPABILITY_IAM'
+ - 'CAPABILITY_NAMED_IAM'
+ regions:
+ description:
+ - A list of AWS regions to create instances of a stack in. The I(region) parameter chooses where the Stack Set is created, and I(regions)
+ specifies the region for stack instances.
+ - At least one region must be specified to create a stack set. On updates, if fewer regions are specified only the specified regions will
+ have their stack instances updated.
+ type: list
+ elements: str
+ accounts:
+ description:
+ - A list of AWS accounts in which to create instance of CloudFormation stacks.
+ - At least one region must be specified to create a stack set. On updates, if fewer regions are specified only the specified regions will
+ have their stack instances updated.
+ type: list
+ elements: str
+ administration_role_arn:
+ description:
+ - ARN of the administration role, meaning the role that CloudFormation Stack Sets use to assume the roles in your child accounts.
+ - This defaults to C(arn:aws:iam::{{ account ID }}:role/AWSCloudFormationStackSetAdministrationRole) where C({{ account ID }}) is replaced with the
+ account number of the current IAM role/user/STS credentials.
+ aliases:
+ - admin_role_arn
+ - admin_role
+ - administration_role
+ type: str
+ execution_role_name:
+ description:
+ - ARN of the execution role, meaning the role that CloudFormation Stack Sets assumes in your child accounts.
+ - This MUST NOT be an ARN, and the roles must exist in each child account specified.
+ - The default name for the execution role is C(AWSCloudFormationStackSetExecutionRole)
+ aliases:
+ - exec_role_name
+ - exec_role
+ - execution_role
+ type: str
+ tags:
+ description:
+ - Dictionary of tags to associate with stack and its resources during stack creation.
+ - Can be updated later, updating tags removes previous entries.
+ type: dict
+ failure_tolerance:
+ description:
+ - Settings to change what is considered "failed" when running stack instance updates, and how many to do at a time.
+ type: dict
+ default: {}
+ suboptions:
+ fail_count:
+ description:
+ - The number of accounts, per region, for which this operation can fail before CloudFormation
+ stops the operation in that region.
+ - You must specify one of I(fail_count) and I(fail_percentage).
+ type: int
+ fail_percentage:
+ type: int
+ description:
+ - The percentage of accounts, per region, for which this stack operation can fail before CloudFormation
+ stops the operation in that region.
+ - You must specify one of I(fail_count) and I(fail_percentage).
+ parallel_percentage:
+ type: int
+ description:
+ - The maximum percentage of accounts in which to perform this operation at one time.
+ - You must specify one of I(parallel_count) and I(parallel_percentage).
+ - Note that this setting lets you specify the maximum for operations.
+ For large deployments, under certain circumstances the actual percentage may be lower.
+ parallel_count:
+ type: int
+ description:
+ - The maximum number of accounts in which to perform this operation at one time.
+ - I(parallel_count) may be at most one more than the I(fail_count).
+ - You must specify one of I(parallel_count) and I(parallel_percentage).
+ - Note that this setting lets you specify the maximum for operations.
+ For large deployments, under certain circumstances the actual count may be lower.
+
+author: "Ryan Scott Brown (@ryansb)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+- name: Create a stack set with instances in two accounts
+ community.aws.cloudformation_stack_set:
+ name: my-stack
+ description: Test stack in two accounts
+ state: present
+ template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
+ accounts:
+ - 123456789012
+ - 234567890123
+ regions:
+ - us-east-1
+
+- name: on subsequent calls, templates are optional but parameters and tags can be altered
+ community.aws.cloudformation_stack_set:
+ name: my-stack
+ state: present
+ parameters:
+ InstanceName: my_stacked_instance
+ tags:
+ foo: bar
+ test: stack
+ accounts:
+ - 123456789012
+ - 234567890123
+ regions:
+ - us-east-1
+
+- name: The same type of update, but wait for the update to complete in all stacks
+ community.aws.cloudformation_stack_set:
+ name: my-stack
+ state: present
+ wait: true
+ parameters:
+ InstanceName: my_restacked_instance
+ tags:
+ foo: bar
+ test: stack
+ accounts:
+ - 123456789012
+ - 234567890123
+ regions:
+ - us-east-1
+
+- name: Register new accounts (create new stack instances) with an existing stack set.
+ community.aws.cloudformation_stack_set:
+ name: my-stack
+ state: present
+ wait: true
+ parameters:
+ InstanceName: my_restacked_instance
+ tags:
+ foo: bar
+ test: stack
+ accounts:
+ - 123456789012
+ - 234567890123
+ - 345678901234
+ regions:
+ - us-east-1
+'''
+
+RETURN = r'''
+operations_log:
+ type: list
+ description: Most recent events in CloudFormation's event log. This may be from a previous run in some cases.
+ returned: always
+ sample:
+ - action: CREATE
+ creation_timestamp: '2018-06-18T17:40:46.372000+00:00'
+ end_timestamp: '2018-06-18T17:41:24.560000+00:00'
+ operation_id: Ansible-StackInstance-Create-0ff2af5b-251d-4fdb-8b89-1ee444eba8b8
+ status: FAILED
+ stack_instances:
+ - account: '1234567890'
+ region: us-east-1
+ stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929
+ status: OUTDATED
+ status_reason: Account 1234567890 should have 'AWSCloudFormationStackSetAdministrationRole' role with trust relationship to CloudFormation service.
+
+operations:
+ description: All operations initiated by this run of the cloudformation_stack_set module
+ returned: always
+ type: list
+ sample:
+ - action: CREATE
+ administration_role_arn: arn:aws:iam::1234567890:role/AWSCloudFormationStackSetAdministrationRole
+ creation_timestamp: '2018-06-18T17:40:46.372000+00:00'
+ end_timestamp: '2018-06-18T17:41:24.560000+00:00'
+ execution_role_name: AWSCloudFormationStackSetExecutionRole
+ operation_id: Ansible-StackInstance-Create-0ff2af5b-251d-4fdb-8b89-1ee444eba8b8
+ operation_preferences:
+ region_order:
+ - us-east-1
+ - us-east-2
+ stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929
+ status: FAILED
+stack_instances:
+ description: CloudFormation stack instances that are members of this stack set. This will also include their region and account ID.
+ returned: state == present
+ type: list
+ sample:
+ - account: '1234567890'
+ region: us-east-1
+ stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929
+ status: OUTDATED
+ status_reason: >
+ Account 1234567890 should have 'AWSCloudFormationStackSetAdministrationRole' role with trust relationship to CloudFormation service.
+ - account: '1234567890'
+ region: us-east-2
+ stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929
+ status: OUTDATED
+ status_reason: Cancelled since failure tolerance has exceeded
+stack_set:
+ type: dict
+ description: Facts about the currently deployed stack set, its parameters, and its tags
+ returned: state == present
+ sample:
+ administration_role_arn: arn:aws:iam::1234567890:role/AWSCloudFormationStackSetAdministrationRole
+ capabilities: []
+ description: test stack PRIME
+ execution_role_name: AWSCloudFormationStackSetExecutionRole
+ parameters: []
+ stack_set_arn: arn:aws:cloudformation:us-east-1:1234567890:stackset/TestStackPrime:19f3f684-aae9-467-ba36-e09f92cf5929
+ stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929
+ stack_set_name: TestStackPrime
+ status: ACTIVE
+ tags:
+ Some: Thing
+ an: other
+ template_body: |
+ AWSTemplateFormatVersion: "2010-09-09"
+ Parameters: {}
+ Resources:
+ Bukkit:
+ Type: "AWS::S3::Bucket"
+ Properties: {}
+ other:
+ Type: "AWS::SNS::Topic"
+ Properties: {}
+
+''' # NOQA
+
+import datetime
+import itertools
+import time
+import uuid
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ # handled by AnsibleAWSModule
+ pass
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def create_stack_set(module, stack_params, cfn):
+ try:
+ cfn.create_stack_set(aws_retry=True, **stack_params)
+ return await_stack_set_exists(cfn, stack_params['StackSetName'])
+ except (ClientError, BotoCoreError) as err:
+ module.fail_json_aws(err, msg="Failed to create stack set {0}.".format(stack_params.get('StackSetName')))
+
+
+def update_stack_set(module, stack_params, cfn):
+ # if the state is present and the stack already exists, we try to update it.
+ # AWS will tell us if the stack template and parameters are the same and
+ # don't need to be updated.
+ try:
+ cfn.update_stack_set(**stack_params)
+ except is_boto3_error_code('StackSetNotFound') as err: # pylint: disable=duplicate-except
+ module.fail_json_aws(err, msg="Failed to find stack set. Check the name & region.")
+ except is_boto3_error_code('StackInstanceNotFound') as err: # pylint: disable=duplicate-except
+ module.fail_json_aws(err, msg="One or more stack instances were not found for this stack set. Double check "
+ "the `accounts` and `regions` parameters.")
+ except is_boto3_error_code('OperationInProgressException') as err: # pylint: disable=duplicate-except
+ module.fail_json_aws(
+ err, msg="Another operation is already in progress on this stack set - please try again later. When making "
+ "multiple cloudformation_stack_set calls, it's best to enable `wait: true` to avoid unfinished op errors.")
+ except (ClientError, BotoCoreError) as err: # pylint: disable=duplicate-except
+ module.fail_json_aws(err, msg="Could not update stack set.")
+ if module.params.get('wait'):
+ await_stack_set_operation(
+ module, cfn, operation_id=stack_params['OperationId'],
+ stack_set_name=stack_params['StackSetName'],
+ max_wait=module.params.get('wait_timeout'),
+ )
+
+ return True
+
+
+def compare_stack_instances(cfn, stack_set_name, accounts, regions):
+ instance_list = cfn.list_stack_instances(
+ aws_retry=True,
+ StackSetName=stack_set_name,
+ )['Summaries']
+ desired_stack_instances = set(itertools.product(accounts, regions))
+ existing_stack_instances = set((i['Account'], i['Region']) for i in instance_list)
+ # new stacks, existing stacks, unspecified stacks
+ return (desired_stack_instances - existing_stack_instances), existing_stack_instances, (existing_stack_instances - desired_stack_instances)
+
+
+@AWSRetry.jittered_backoff(retries=3, delay=4)
+def stack_set_facts(cfn, stack_set_name):
+ try:
+ ss = cfn.describe_stack_set(StackSetName=stack_set_name)['StackSet']
+ ss['Tags'] = boto3_tag_list_to_ansible_dict(ss['Tags'])
+ return ss
+ except cfn.exceptions.from_code('StackSetNotFound'):
+ # Return None if the stack doesn't exist
+ return
+
+
+def await_stack_set_operation(module, cfn, stack_set_name, operation_id, max_wait):
+ wait_start = datetime.datetime.now()
+ operation = None
+ for i in range(max_wait // 15):
+ try:
+ operation = cfn.describe_stack_set_operation(StackSetName=stack_set_name, OperationId=operation_id)
+ if operation['StackSetOperation']['Status'] not in ('RUNNING', 'STOPPING'):
+ # Stack set has completed operation
+ break
+ except is_boto3_error_code('StackSetNotFound'): # pylint: disable=duplicate-except
+ pass
+ except is_boto3_error_code('OperationNotFound'): # pylint: disable=duplicate-except
+ pass
+ time.sleep(15)
+
+ if operation and operation['StackSetOperation']['Status'] not in ('FAILED', 'STOPPED'):
+ await_stack_instance_completion(
+ module, cfn,
+ stack_set_name=stack_set_name,
+ # subtract however long we waited already
+ max_wait=int(max_wait - (datetime.datetime.now() - wait_start).total_seconds()),
+ )
+ elif operation and operation['StackSetOperation']['Status'] in ('FAILED', 'STOPPED'):
+ pass
+ else:
+ module.warn(
+ "Timed out waiting for operation {0} on stack set {1} after {2} seconds. Returning unfinished operation".format(
+ operation_id, stack_set_name, max_wait
+ )
+ )
+
+
+def await_stack_instance_completion(module, cfn, stack_set_name, max_wait):
+ to_await = None
+ for i in range(max_wait // 15):
+ try:
+ stack_instances = cfn.list_stack_instances(StackSetName=stack_set_name)
+ to_await = [inst for inst in stack_instances['Summaries']
+ if inst['Status'] != 'CURRENT']
+ if not to_await:
+ return stack_instances['Summaries']
+ except is_boto3_error_code('StackSetNotFound'): # pylint: disable=duplicate-except
+ # this means the deletion beat us, or the stack set is not yet propagated
+ pass
+ time.sleep(15)
+
+ module.warn(
+ "Timed out waiting for stack set {0} instances {1} to complete after {2} seconds. Returning unfinished operation".format(
+ stack_set_name, ', '.join(s['StackId'] for s in to_await), max_wait
+ )
+ )
+
+
+def await_stack_set_exists(cfn, stack_set_name):
+ # AWSRetry will retry on `StackSetNotFound` errors for us
+ ss = cfn.describe_stack_set(StackSetName=stack_set_name, aws_retry=True)['StackSet']
+ ss['Tags'] = boto3_tag_list_to_ansible_dict(ss['Tags'])
+ return camel_dict_to_snake_dict(ss, ignore_list=('Tags',))
+
+
+def describe_stack_tree(module, stack_set_name, operation_ids=None):
+ jittered_backoff_decorator = AWSRetry.jittered_backoff(retries=5, delay=3, max_delay=5, catch_extra_error_codes=['StackSetNotFound'])
+ cfn = module.client('cloudformation', retry_decorator=jittered_backoff_decorator)
+ result = dict()
+ result['stack_set'] = camel_dict_to_snake_dict(
+ cfn.describe_stack_set(
+ StackSetName=stack_set_name,
+ aws_retry=True,
+ )['StackSet']
+ )
+ result['stack_set']['tags'] = boto3_tag_list_to_ansible_dict(result['stack_set']['tags'])
+ result['operations_log'] = sorted(
+ camel_dict_to_snake_dict(
+ cfn.list_stack_set_operations(
+ StackSetName=stack_set_name,
+ aws_retry=True,
+ )
+ )['summaries'],
+ key=lambda x: x['creation_timestamp']
+ )
+ result['stack_instances'] = sorted(
+ [
+ camel_dict_to_snake_dict(i) for i in
+ cfn.list_stack_instances(StackSetName=stack_set_name)['Summaries']
+ ],
+ key=lambda i: i['region'] + i['account']
+ )
+
+ if operation_ids:
+ result['operations'] = []
+ for op_id in operation_ids:
+ try:
+ result['operations'].append(camel_dict_to_snake_dict(
+ cfn.describe_stack_set_operation(
+ StackSetName=stack_set_name,
+ OperationId=op_id,
+ )['StackSetOperation']
+ ))
+ except is_boto3_error_code('OperationNotFoundException'): # pylint: disable=duplicate-except
+ pass
+ return result
+
+
+def get_operation_preferences(module):
+ params = dict()
+ if module.params.get('regions'):
+ params['RegionOrder'] = list(module.params['regions'])
+ for param, api_name in {
+ 'fail_count': 'FailureToleranceCount',
+ 'fail_percentage': 'FailureTolerancePercentage',
+ 'parallel_percentage': 'MaxConcurrentPercentage',
+ 'parallel_count': 'MaxConcurrentCount',
+ }.items():
+ if module.params.get('failure_tolerance', {}).get(param):
+ params[api_name] = module.params.get('failure_tolerance', {}).get(param)
+ return params
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=900),
+ state=dict(default='present', choices=['present', 'absent']),
+ purge_stacks=dict(type='bool', default=True),
+ parameters=dict(type='dict', default={}),
+ template=dict(type='path'),
+ template_url=dict(),
+ template_body=dict(),
+ capabilities=dict(type='list', elements='str', choices=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']),
+ regions=dict(type='list', elements='str'),
+ accounts=dict(type='list', elements='str'),
+ failure_tolerance=dict(
+ type='dict',
+ default={},
+ options=dict(
+ fail_count=dict(type='int'),
+ fail_percentage=dict(type='int'),
+ parallel_percentage=dict(type='int'),
+ parallel_count=dict(type='int'),
+ ),
+ mutually_exclusive=[
+ ['fail_count', 'fail_percentage'],
+ ['parallel_count', 'parallel_percentage'],
+ ],
+ ),
+ administration_role_arn=dict(aliases=['admin_role_arn', 'administration_role', 'admin_role']),
+ execution_role_name=dict(aliases=['execution_role', 'exec_role', 'exec_role_name']),
+ tags=dict(type='dict'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[['template_url', 'template', 'template_body']],
+ supports_check_mode=True
+ )
+
+ # Wrap the cloudformation client methods that this module uses with
+ # automatic backoff / retry for throttling error codes
+ jittered_backoff_decorator = AWSRetry.jittered_backoff(retries=10, delay=3, max_delay=30, catch_extra_error_codes=['StackSetNotFound'])
+ cfn = module.client('cloudformation', retry_decorator=jittered_backoff_decorator)
+ existing_stack_set = stack_set_facts(cfn, module.params['name'])
+
+ operation_uuid = to_native(uuid.uuid4())
+ operation_ids = []
+ # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
+ stack_params = {}
+ state = module.params['state']
+ if state == 'present' and not module.params['accounts']:
+ module.fail_json(
+ msg="Can't create a stack set without choosing at least one account. "
+ "To get the ID of the current account, use the aws_caller_info module."
+ )
+
+ module.params['accounts'] = [to_native(a) for a in module.params['accounts']]
+
+ stack_params['StackSetName'] = module.params['name']
+ if module.params.get('description'):
+ stack_params['Description'] = module.params['description']
+
+ if module.params.get('capabilities'):
+ stack_params['Capabilities'] = module.params['capabilities']
+
+ if module.params['template'] is not None:
+ with open(module.params['template'], 'r') as tpl:
+ stack_params['TemplateBody'] = tpl.read()
+ elif module.params['template_body'] is not None:
+ stack_params['TemplateBody'] = module.params['template_body']
+ elif module.params['template_url'] is not None:
+ stack_params['TemplateURL'] = module.params['template_url']
+ else:
+ # no template is provided, but if the stack set exists already, we can use the existing one.
+ if existing_stack_set:
+ stack_params['UsePreviousTemplate'] = True
+ else:
+ module.fail_json(
+ msg="The Stack Set {0} does not exist, and no template was provided. Provide one of `template`, "
+ "`template_body`, or `template_url`".format(module.params['name'])
+ )
+
+ stack_params['Parameters'] = []
+ for k, v in module.params['parameters'].items():
+ if isinstance(v, dict):
+ # set parameter based on a dict to allow additional CFN Parameter Attributes
+ param = dict(ParameterKey=k)
+
+ if 'value' in v:
+ param['ParameterValue'] = to_native(v['value'])
+
+ if 'use_previous_value' in v and bool(v['use_previous_value']):
+ param['UsePreviousValue'] = True
+ param.pop('ParameterValue', None)
+
+ stack_params['Parameters'].append(param)
+ else:
+ # allow default k/v configuration to set a template parameter
+ stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)})
+
+ if module.params.get('tags') and isinstance(module.params.get('tags'), dict):
+ stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags'])
+
+ if module.params.get('administration_role_arn'):
+ # TODO loosen the semantics here to autodetect the account ID and build the ARN
+ stack_params['AdministrationRoleARN'] = module.params['administration_role_arn']
+ if module.params.get('execution_role_name'):
+ stack_params['ExecutionRoleName'] = module.params['execution_role_name']
+
+ result = {}
+
+ if module.check_mode:
+ if state == 'absent' and existing_stack_set:
+ module.exit_json(changed=True, msg='Stack set would be deleted', meta=[])
+ elif state == 'absent' and not existing_stack_set:
+ module.exit_json(changed=False, msg='Stack set doesn\'t exist', meta=[])
+ elif state == 'present' and not existing_stack_set:
+ module.exit_json(changed=True, msg='New stack set would be created', meta=[])
+ elif state == 'present' and existing_stack_set:
+ new_stacks, existing_stacks, unspecified_stacks = compare_stack_instances(
+ cfn,
+ module.params['name'],
+ module.params['accounts'],
+ module.params['regions'],
+ )
+ if new_stacks:
+ module.exit_json(changed=True, msg='New stack instance(s) would be created', meta=[])
+ elif unspecified_stacks and module.params.get('purge_stack_instances'):
+ module.exit_json(changed=True, msg='Old stack instance(s) would be deleted', meta=[])
+ else:
+ # TODO: need to check the template and other settings for correct check mode
+ module.exit_json(changed=False, msg='No changes detected', meta=[])
+
+ changed = False
+ if state == 'present':
+ if not existing_stack_set:
+ # on create this parameter has a different name, and cannot be referenced later in the job log
+ stack_params['ClientRequestToken'] = 'Ansible-StackSet-Create-{0}'.format(operation_uuid)
+ changed = True
+ create_stack_set(module, stack_params, cfn)
+ else:
+ stack_params['OperationId'] = 'Ansible-StackSet-Update-{0}'.format(operation_uuid)
+ operation_ids.append(stack_params['OperationId'])
+ if module.params.get('regions'):
+ stack_params['OperationPreferences'] = get_operation_preferences(module)
+ changed |= update_stack_set(module, stack_params, cfn)
+
+ await_stack_set_operation(
+ module,
+ cfn,
+ operation_id=stack_params["OperationId"],
+ stack_set_name=stack_params["StackSetName"],
+ max_wait=module.params.get("wait_timeout"),
+ )
+
+ # now create/update any appropriate stack instances
+ new_stack_instances, existing_stack_instances, unspecified_stack_instances = compare_stack_instances(
+ cfn,
+ module.params['name'],
+ module.params['accounts'],
+ module.params['regions'],
+ )
+ if new_stack_instances:
+ operation_ids.append('Ansible-StackInstance-Create-{0}'.format(operation_uuid))
+ changed = True
+ cfn.create_stack_instances(
+ StackSetName=module.params['name'],
+ Accounts=list(set(acct for acct, region in new_stack_instances)),
+ Regions=list(set(region for acct, region in new_stack_instances)),
+ OperationPreferences=get_operation_preferences(module),
+ OperationId=operation_ids[-1],
+ )
+ else:
+ operation_ids.append('Ansible-StackInstance-Update-{0}'.format(operation_uuid))
+ cfn.update_stack_instances(
+ StackSetName=module.params['name'],
+ Accounts=list(set(acct for acct, region in existing_stack_instances)),
+ Regions=list(set(region for acct, region in existing_stack_instances)),
+ OperationPreferences=get_operation_preferences(module),
+ OperationId=operation_ids[-1],
+ )
+ for op in operation_ids:
+ await_stack_set_operation(
+ module, cfn, operation_id=op,
+ stack_set_name=module.params['name'],
+ max_wait=module.params.get('wait_timeout'),
+ )
+
+ elif state == 'absent':
+ if not existing_stack_set:
+ module.exit_json(msg='Stack set {0} does not exist'.format(module.params['name']))
+ if module.params.get('purge_stack_instances') is False:
+ pass
+ try:
+ cfn.delete_stack_set(
+ StackSetName=module.params['name'],
+ )
+ module.exit_json(msg='Stack set {0} deleted'.format(module.params['name']))
+ except is_boto3_error_code('OperationInProgressException') as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Cannot delete stack {0} while there is an operation in progress'.format(module.params['name']))
+ except is_boto3_error_code('StackSetNotEmptyException'): # pylint: disable=duplicate-except
+ delete_instances_op = 'Ansible-StackInstance-Delete-{0}'.format(operation_uuid)
+ cfn.delete_stack_instances(
+ StackSetName=module.params['name'],
+ Accounts=module.params['accounts'],
+ Regions=module.params['regions'],
+ RetainStacks=(not module.params.get('purge_stacks')),
+ OperationId=delete_instances_op
+ )
+ await_stack_set_operation(
+ module, cfn, operation_id=delete_instances_op,
+ stack_set_name=stack_params['StackSetName'],
+ max_wait=module.params.get('wait_timeout'),
+ )
+ try:
+ cfn.delete_stack_set(
+ StackSetName=module.params['name'],
+ )
+ except is_boto3_error_code('StackSetNotEmptyException') as exc: # pylint: disable=duplicate-except
+ # this time, it is likely that either the delete failed or there are more stacks.
+ instances = cfn.list_stack_instances(
+ StackSetName=module.params['name'],
+ )
+ stack_states = ', '.join('(account={Account}, region={Region}, state={Status})'.format(**i) for i in instances['Summaries'])
+ module.fail_json_aws(exc, msg='Could not purge all stacks, or not all accounts/regions were chosen for deletion: ' + stack_states)
+ module.exit_json(changed=True, msg='Stack set {0} deleted'.format(module.params['name']))
+
+ result.update(**describe_stack_tree(module, stack_params['StackSetName'], operation_ids=operation_ids))
+ if any(o['status'] == 'FAILED' for o in result['operations']):
+ module.fail_json(msg="One or more operations failed to execute", **result)
+ module.exit_json(changed=changed, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py b/ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py
new file mode 100644
index 000000000..447fd994e
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py
@@ -0,0 +1,2272 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+
+version_added: 1.0.0
+module: cloudfront_distribution
+
+short_description: Create, update and delete AWS CloudFront distributions
+
+description:
+ - Allows for easy creation, updating and deletion of CloudFront distributions.
+
+author:
+ - Willem van Ketwich (@wilvk)
+ - Will Thames (@willthames)
+
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+ - amazon.aws.tags
+
+options:
+
+ state:
+ description:
+ - The desired state of the distribution.
+ - I(state=present) creates a new distribution or updates an existing distribution.
+ - I(state=absent) deletes an existing distribution.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ distribution_id:
+ description:
+ - The ID of the CloudFront distribution.
+ - This parameter can be exchanged with I(alias) or I(caller_reference) and is used in conjunction with I(e_tag).
+ type: str
+
+ e_tag:
+ description:
+ - A unique identifier of a modified or existing distribution. Used in conjunction with I(distribution_id).
+ - Is determined automatically if not specified.
+ type: str
+
+ caller_reference:
+ description:
+ - A unique identifier for creating and updating CloudFront distributions.
+ - Each caller reference must be unique across all distributions. e.g. a caller reference used in a web
+ distribution cannot be reused in a streaming distribution. This parameter can be used instead of I(distribution_id)
+ to reference an existing distribution. If not specified, this defaults to a datetime stamp of the format
+ C(YYYY-MM-DDTHH:MM:SS.ffffff).
+ type: str
+
+ alias:
+ description:
+ - The name of an alias (CNAME) that is used in a distribution. This is used to effectively reference a distribution by its alias as an alias can only
+ be used by one distribution per AWS account. This variable avoids having to provide the I(distribution_id) as well as
+ the I(e_tag), or I(caller_reference) of an existing distribution.
+ type: str
+
+ aliases:
+ description:
+ - A list of domain name aliases (CNAMEs) as strings to be used for the distribution.
+ - Each alias must be unique across all distribution for the AWS account.
+ type: list
+ elements: str
+ default: []
+
+ purge_aliases:
+ description:
+ - Specifies whether existing aliases will be removed before adding new aliases.
+ - When I(purge_aliases=true), existing aliases are removed and I(aliases) are added.
+ default: false
+ type: bool
+
+ default_root_object:
+ description:
+ - A config element that specifies the path to request when the user requests the origin.
+ - e.g. if specified as 'index.html', this maps to www.example.com/index.html when www.example.com is called by the user.
+ - This prevents the entire distribution origin from being exposed at the root.
+ type: str
+
+ default_origin_domain_name:
+ description:
+ - The domain name to use for an origin if no I(origins) have been specified.
+ - Should only be used on a first run of generating a distribution and not on
+ subsequent runs.
+ - Should not be used in conjunction with I(distribution_id), I(caller_reference) or I(alias).
+ type: str
+
+ default_origin_path:
+ description:
+ - The default origin path to specify for an origin if no I(origins) have been specified. Defaults to empty if not specified.
+ type: str
+
+ origins:
+ type: list
+ elements: dict
+ description:
+ - A config element that is a list of complex origin objects to be specified for the distribution. Used for creating and updating distributions.
+ suboptions:
+ id:
+ description: A unique identifier for the origin or origin group. I(id) must be unique within the distribution.
+ type: str
+ domain_name:
+ description:
+ - The domain name which CloudFront will query as the origin.
+ - For more information see the CloudFront documentation
+ at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesDomainName)
+ type: str
+ origin_path:
+ description: Tells CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin.
+ type: str
+ custom_headers:
+ description:
+ - Custom headers you wish to add to the request before passing it to the origin.
+ - For more information see the CloudFront documentation
+ at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/forward-custom-headers.html).
+ type: list
+ elements: dict
+ suboptions:
+ header_name:
+ description: The name of a header that you want CloudFront to forward to your origin.
+ type: str
+ header_value:
+ description: The value for the header that you specified in the I(header_name) field.
+ type: str
+ s3_origin_access_identity_enabled:
+ description:
+ - Use an origin access identity to configure the origin so that viewers can only access objects in an Amazon S3 bucket through CloudFront.
+ - Will automatically create an Identity for you if no I(s3_origin_config) is specified.
+ - See also U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html).
+ type: bool
+ s3_origin_config:
+ description: Specify origin access identity for S3 origins.
+ type: dict
+ suboptions:
+ origin_access_identity:
+ description: Existing origin access identity in the format C(origin-access-identity/cloudfront/OID_ID).
+ type: str
+ custom_origin_config:
+ description: Connection information about the origin.
+ type: dict
+ suboptions:
+ http_port:
+ description: The HTTP port the custom origin listens on.
+ type: int
+ https_port:
+ description: The HTTPS port the custom origin listens on.
+ type: int
+ origin_protocol_policy:
+ description: The origin protocol policy to apply to your origin.
+ type: str
+ origin_ssl_protocols:
+ description: A list of SSL/TLS protocols that you want CloudFront to use when communicating to the origin over HTTPS.
+ type: list
+ elements: str
+ origin_read_timeout:
+ description: A timeout (in seconds) when reading from your origin.
+ type: int
+ origin_keepalive_timeout:
+ description: A keep-alive timeout (in seconds).
+ type: int
+
+ purge_origins:
+ description: Whether to remove any origins that aren't listed in I(origins).
+ default: false
+ type: bool
+
+ default_cache_behavior:
+ type: dict
+ description:
+ - A dict specifying the default cache behavior of the distribution.
+ - If not specified, the I(target_origin_id) is defined as the I(target_origin_id) of the first valid
+ cache_behavior in I(cache_behaviors) with defaults.
+ suboptions:
+ target_origin_id:
+ description:
+ - The ID of the origin that you want CloudFront to route requests to
+ by default.
+ type: str
+ response_headers_policy_id:
+ description:
+ - The ID of the header policy that CloudFront adds to responses that it sends to viewers.
+ type: str
+ forwarded_values:
+ description:
+ - A dict that specifies how CloudFront handles query strings and cookies.
+ type: dict
+ suboptions:
+ query_string:
+ description:
+ - Indicates whether you want CloudFront to forward query strings
+ to the origin that is associated with this cache behavior.
+ type: bool
+ cookies:
+ description: A dict that specifies whether you want CloudFront to forward cookies to the origin and, if so, which ones.
+ type: dict
+ suboptions:
+ forward:
+ description:
+ - Specifies which cookies to forward to the origin for this cache behavior.
+ - Valid values are C(all), C(none), or C(whitelist).
+ type: str
+ whitelisted_names:
+ type: list
+ elements: str
+ description: A list of cookies to forward to the origin for this cache behavior.
+ headers:
+ description:
+ - A list of headers to forward to the origin for this cache behavior.
+ - To forward all headers use a list containing a single element '*' (C(['*']))
+ type: list
+ elements: str
+ query_string_cache_keys:
+ description:
+ - A list that contains the query string parameters you want CloudFront to use as a basis for caching for a cache behavior.
+ type: list
+ elements: str
+ trusted_signers:
+ description:
+ - A dict that specifies the AWS accounts that you want to allow to create signed URLs for private content.
+ type: dict
+ suboptions:
+ enabled:
+ description: Whether you want to require viewers to use signed URLs to access the files specified by I(target_origin_id)
+ type: bool
+ items:
+ description: A list of trusted signers for this cache behavior.
+ elements: str
+ type: list
+ viewer_protocol_policy:
+ description:
+ - The protocol that viewers can use to access the files in the origin specified by I(target_origin_id).
+ - Valid values are C(allow-all), C(redirect-to-https) and C(https-only).
+ type: str
+ default_ttl:
+ description: The default amount of time that you want objects to stay in CloudFront caches.
+ type: int
+ max_ttl:
+ description: The maximum amount of time that you want objects to stay in CloudFront caches.
+ type: int
+ min_ttl:
+ description: The minimum amount of time that you want objects to stay in CloudFront caches.
+ type: int
+ allowed_methods:
+ description: A dict that controls which HTTP methods CloudFront processes and forwards.
+ type: dict
+ suboptions:
+ items:
+ description: A list of HTTP methods that you want CloudFront to process and forward.
+ type: list
+ elements: str
+ cached_methods:
+ description:
+ - A list of HTTP methods that you want CloudFront to apply caching to.
+ - This can either be C([GET,HEAD]), or C([GET,HEAD,OPTIONS]).
+ type: list
+ elements: str
+ smooth_streaming:
+ description:
+ - Whether you want to distribute media files in the Microsoft Smooth Streaming format.
+ type: bool
+ compress:
+ description:
+ - Whether you want CloudFront to automatically compress files.
+ type: bool
+ lambda_function_associations:
+ description:
+ - A list of Lambda function associations to use for this cache behavior.
+ type: list
+ elements: dict
+ suboptions:
+ lambda_function_arn:
+ description: The ARN of the Lambda function.
+ type: str
+ event_type:
+ description:
+ - Specifies the event type that triggers a Lambda function invocation.
+ - This can be C(viewer-request), C(origin-request), C(origin-response) or C(viewer-response).
+ type: str
+ field_level_encryption_id:
+ description:
+ - The field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data.
+ type: str
+
+ cache_behaviors:
+ type: list
+ elements: dict
+ description:
+ - A list of dictionaries describing the cache behaviors for the distribution.
+ - The order of the list is preserved across runs unless I(purge_cache_behaviors) is enabled.
+ suboptions:
+ path_pattern:
+ description:
+ - The pattern that specifies which requests to apply the behavior to.
+ type: str
+ target_origin_id:
+ description:
+ - The ID of the origin that you want CloudFront to route requests to
+ by default.
+ type: str
+ response_headers_policy_id:
+ description:
+ - The ID of the header policy that CloudFront adds to responses that it sends to viewers.
+ type: str
+ forwarded_values:
+ description:
+ - A dict that specifies how CloudFront handles query strings and cookies.
+ type: dict
+ suboptions:
+ query_string:
+ description:
+ - Indicates whether you want CloudFront to forward query strings
+ to the origin that is associated with this cache behavior.
+ type: bool
+ cookies:
+ description: A dict that specifies whether you want CloudFront to forward cookies to the origin and, if so, which ones.
+ type: dict
+ suboptions:
+ forward:
+ description:
+ - Specifies which cookies to forward to the origin for this cache behavior.
+ - Valid values are C(all), C(none), or C(whitelist).
+ type: str
+ whitelisted_names:
+ type: list
+ elements: str
+ description: A list of cookies to forward to the origin for this cache behavior.
+ headers:
+ description:
+ - A list of headers to forward to the origin for this cache behavior.
+ - To forward all headers use a list containing a single element '*' (C(['*']))
+ type: list
+ elements: str
+ query_string_cache_keys:
+ description:
+ - A list that contains the query string parameters you want CloudFront to use as a basis for caching for a cache behavior.
+ type: list
+ elements: str
+ trusted_signers:
+ description:
+ - A dict that specifies the AWS accounts that you want to allow to create signed URLs for private content.
+ type: dict
+ suboptions:
+ enabled:
+ description: Whether you want to require viewers to use signed URLs to access the files specified by I(path_pattern) and I(target_origin_id)
+ type: bool
+ items:
+ description: A list of trusted signers for this cache behavior.
+ elements: str
+ type: list
+ viewer_protocol_policy:
+ description:
+ - The protocol that viewers can use to access the files in the origin specified by I(target_origin_id) when a request matches I(path_pattern).
+ - Valid values are C(allow-all), C(redirect-to-https) and C(https-only).
+ type: str
+ default_ttl:
+ description: The default amount of time that you want objects to stay in CloudFront caches.
+ type: int
+ max_ttl:
+ description: The maximum amount of time that you want objects to stay in CloudFront caches.
+ type: int
+ min_ttl:
+ description: The minimum amount of time that you want objects to stay in CloudFront caches.
+ type: int
+ allowed_methods:
+ description: A dict that controls which HTTP methods CloudFront processes and forwards.
+ type: dict
+ suboptions:
+ items:
+ description: A list of HTTP methods that you want CloudFront to process and forward.
+ type: list
+ elements: str
+ cached_methods:
+ description:
+ - A list of HTTP methods that you want CloudFront to apply caching to.
+ - This can either be C([GET,HEAD]), or C([GET,HEAD,OPTIONS]).
+ type: list
+ elements: str
+ smooth_streaming:
+ description:
+ - Whether you want to distribute media files in the Microsoft Smooth Streaming format.
+ type: bool
+ compress:
+ description:
+ - Whether you want CloudFront to automatically compress files.
+ type: bool
+ lambda_function_associations:
+ description:
+ - A list of Lambda function associations to use for this cache behavior.
+ type: list
+ elements: dict
+ suboptions:
+ lambda_function_arn:
+ description: The ARN of the Lambda function.
+ type: str
+ event_type:
+ description:
+ - Specifies the event type that triggers a Lambda function invocation.
+ - This can be C(viewer-request), C(origin-request), C(origin-response) or C(viewer-response).
+ type: str
+ field_level_encryption_id:
+ description:
+ - The field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data.
+ type: str
+
+
+ purge_cache_behaviors:
+ description:
+ - Whether to remove any cache behaviors that aren't listed in I(cache_behaviors).
+ - This switch also allows the reordering of I(cache_behaviors).
+ default: false
+ type: bool
+
+ custom_error_responses:
+ type: list
+ elements: dict
+ description:
+ - A config element that is a I(list[]) of complex custom error responses to be specified for the distribution.
+ - This attribute configures custom http error messages returned to the user.
+ suboptions:
+ error_code:
+ type: int
+ description: The error code the custom error page is for.
+ error_caching_min_ttl:
+ type: int
+ description: The length of time (in seconds) that CloudFront will cache status codes for.
+ response_code:
+ type: int
+ description:
+ - The HTTP status code that CloudFront should return to a user when the origin returns the HTTP status code specified by I(error_code).
+ response_page_path:
+ type: str
+ description:
+ - The path to the custom error page that you want CloudFront to return to a viewer when your origin returns
+ the HTTP status code specified by I(error_code).
+
+ purge_custom_error_responses:
+ description: Whether to remove any custom error responses that aren't listed in I(custom_error_responses).
+ default: false
+ type: bool
+
+ comment:
+ description:
+ - A comment that describes the CloudFront distribution.
+ - If not specified, it defaults to a generic message that it has been created with Ansible, and a datetime stamp.
+ type: str
+
+ logging:
+ description:
+ - A config element that is a complex object that defines logging for the distribution.
+ suboptions:
+ enabled:
+ description: When I(enabled=true) CloudFront will log access to an S3 bucket.
+ type: bool
+ include_cookies:
+ description: When I(include_cookies=true) CloudFront will include cookies in the logs.
+ type: bool
+ bucket:
+ description: The S3 bucket to store the log in.
+ type: str
+ prefix:
+ description: A prefix to include in the S3 object names.
+ type: str
+ type: dict
+
+ price_class:
+ description:
+ - A string that specifies the pricing class of the distribution. As per
+ U(https://aws.amazon.com/cloudfront/pricing/)
+ - I(price_class=PriceClass_100) consists of the areas United States, Canada and Europe.
+ - I(price_class=PriceClass_200) consists of the areas United States, Canada, Europe, Japan, India,
+ Hong Kong, Philippines, S. Korea, Singapore & Taiwan.
+ - I(price_class=PriceClass_All) consists of the areas United States, Canada, Europe, Japan, India,
+ South America, Australia, Hong Kong, Philippines, S. Korea, Singapore & Taiwan.
+ - AWS defaults this to C(PriceClass_All).
+ - Valid values are C(PriceClass_100), C(PriceClass_200) and C(PriceClass_All)
+ type: str
+
+ enabled:
+ description:
+ - A boolean value that specifies whether the distribution is enabled or disabled.
+ - Defaults to C(false).
+ type: bool
+
+ viewer_certificate:
+ type: dict
+ description:
+ - A dict that specifies the encryption details of the distribution.
+ suboptions:
+ cloudfront_default_certificate:
+ type: bool
+ description:
+ - If you're using the CloudFront domain name for your distribution, such as C(123456789abcde.cloudfront.net)
+ you should set I(cloudfront_default_certificate=true).
+ - If I(cloudfront_default_certificate=true) do not set I(ssl_support_method).
+ iam_certificate_id:
+ type: str
+ description:
+ - The ID of a certificate stored in IAM to use for HTTPS connections.
+ - If I(iam_certificate_id) is set then you must also specify I(ssl_support_method).
+ acm_certificate_arn:
+ type: str
+ description:
+ - The ID of a certificate stored in ACM to use for HTTPS connections.
+ - If I(acm_certificate_id) is set then you must also specify I(ssl_support_method).
+ ssl_support_method:
+ type: str
+ description:
+ - How CloudFront should serve SSL certificates.
+ - Valid values are C(sni-only) for SNI, and C(vip) if CloudFront is configured to use a dedicated IP for your content.
+ minimum_protocol_version:
+ type: str
+ description:
+ - The security policy that you want CloudFront to use for HTTPS connections.
+ - See U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/secure-connections-supported-viewer-protocols-ciphers.html)
+ for supported security policies.
+
+ restrictions:
+ type: dict
+ description:
+ - A config element that is a complex object that describes how a distribution should restrict it's content.
+ suboptions:
+ geo_restriction:
+ description: Apply a restriction based on the location of the requester.
+ type: dict
+ suboptions:
+ restriction_type:
+ type: str
+ description:
+ - The method that you want to use to restrict distribution of your content by country.
+ - Valid values are C(none), C(whitelist), C(blacklist).
+ items:
+ description:
+ - A list of ISO 3166-1 two letter (Alpha 2) country codes that the
+ restriction should apply to.
+ - 'See the ISO website for a full list of codes U(https://www.iso.org/obp/ui/#search/code/).'
+ type: list
+ elements: str
+
+ web_acl_id:
+ description:
+ - The ID of a Web Application Firewall (WAF) Access Control List (ACL).
+ type: str
+
+ http_version:
+ description:
+ - The version of the http protocol to use for the distribution.
+ - AWS defaults this to C(http2).
+ - Valid values are C(http1.1) and C(http2).
+ type: str
+
+ ipv6_enabled:
+ description:
+ - Determines whether IPv6 support is enabled or not.
+ - Defaults to C(false).
+ type: bool
+
+ wait:
+ description:
+ - Specifies whether the module waits until the distribution has completed processing the creation or update.
+ type: bool
+ default: false
+
+ wait_timeout:
+ description:
+ - Specifies the duration in seconds to wait for a timeout of a cloudfront create or update.
+ default: 1800
+ type: int
+
+'''
+
+EXAMPLES = r'''
+- name: create a basic distribution with defaults and tags
+ community.aws.cloudfront_distribution:
+ state: present
+ default_origin_domain_name: www.my-cloudfront-origin.com
+ tags:
+ Name: example distribution
+ Project: example project
+ Priority: '1'
+
+- name: update a distribution comment by distribution_id
+ community.aws.cloudfront_distribution:
+ state: present
+ distribution_id: E1RP5A2MJ8073O
+ comment: modified by ansible cloudfront.py
+
+- name: update a distribution comment by caller_reference
+ community.aws.cloudfront_distribution:
+ state: present
+ caller_reference: my cloudfront distribution 001
+ comment: modified by ansible cloudfront.py
+
+- name: update a distribution's aliases and comment using the distribution_id as a reference
+ community.aws.cloudfront_distribution:
+ state: present
+ distribution_id: E1RP5A2MJ8073O
+ comment: modified by cloudfront.py again
+ aliases: [ 'www.my-distribution-source.com', 'zzz.aaa.io' ]
+
+- name: update a distribution's aliases and comment using an alias as a reference
+ community.aws.cloudfront_distribution:
+ state: present
+ caller_reference: my test distribution
+ comment: modified by cloudfront.py again
+ aliases:
+ - www.my-distribution-source.com
+ - zzz.aaa.io
+
+- name: update a distribution's comment and aliases and tags and remove existing tags
+ community.aws.cloudfront_distribution:
+ state: present
+ distribution_id: E15BU8SDCGSG57
+ comment: modified by cloudfront.py again
+ aliases:
+ - tested.com
+ tags:
+ Project: distribution 1.2
+ purge_tags: true
+
+- name: create a distribution with an origin, logging and default cache behavior
+ community.aws.cloudfront_distribution:
+ state: present
+ caller_reference: unique test distribution ID
+ origins:
+ - id: 'my test origin-000111'
+ domain_name: www.example.com
+ origin_path: /production
+ custom_headers:
+ - header_name: MyCustomHeaderName
+ header_value: MyCustomHeaderValue
+ default_cache_behavior:
+ target_origin_id: 'my test origin-000111'
+ forwarded_values:
+ query_string: true
+ cookies:
+ forward: all
+ headers:
+ - '*'
+ viewer_protocol_policy: allow-all
+ smooth_streaming: true
+ compress: true
+ allowed_methods:
+ items:
+ - GET
+ - HEAD
+ cached_methods:
+ - GET
+ - HEAD
+ logging:
+ enabled: true
+ include_cookies: false
+ bucket: mylogbucket.s3.amazonaws.com
+ prefix: myprefix/
+ enabled: false
+ comment: this is a CloudFront distribution with logging
+
+- name: delete a distribution
+ community.aws.cloudfront_distribution:
+ state: absent
+ caller_reference: replaceable distribution
+'''
+
+RETURN = r'''
+active_trusted_signers:
+ description: Key pair IDs that CloudFront is aware of for each trusted signer.
+ returned: always
+ type: complex
+ contains:
+ enabled:
+ description: Whether trusted signers are in use.
+ returned: always
+ type: bool
+ sample: false
+ quantity:
+ description: Number of trusted signers.
+ returned: always
+ type: int
+ sample: 1
+ items:
+ description: Number of trusted signers.
+ returned: when there are trusted signers
+ type: list
+ sample:
+ - key_pair_id
+aliases:
+ description: Aliases that refer to the distribution.
+ returned: always
+ type: complex
+ contains:
+ items:
+ description: List of aliases.
+ returned: always
+ type: list
+ sample:
+ - test.example.com
+ quantity:
+ description: Number of aliases.
+ returned: always
+ type: int
+ sample: 1
+arn:
+ description: Amazon Resource Name of the distribution.
+ returned: always
+ type: str
+ sample: arn:aws:cloudfront::123456789012:distribution/E1234ABCDEFGHI
+cache_behaviors:
+ description: CloudFront cache behaviors.
+ returned: always
+ type: complex
+ contains:
+ items:
+ description: List of cache behaviors.
+ returned: always
+ type: complex
+ contains:
+ allowed_methods:
+ description: Methods allowed by the cache behavior.
+ returned: always
+ type: complex
+ contains:
+ cached_methods:
+ description: Methods cached by the cache behavior.
+ returned: always
+ type: complex
+ contains:
+ items:
+ description: List of cached methods.
+ returned: always
+ type: list
+ sample:
+ - HEAD
+ - GET
+ quantity:
+ description: Count of cached methods.
+ returned: always
+ type: int
+ sample: 2
+ items:
+ description: List of methods allowed by the cache behavior.
+ returned: always
+ type: list
+ sample:
+ - HEAD
+ - GET
+ quantity:
+ description: Count of methods allowed by the cache behavior.
+ returned: always
+ type: int
+ sample: 2
+ compress:
+ description: Whether compression is turned on for the cache behavior.
+ returned: always
+ type: bool
+ sample: false
+ default_ttl:
+ description: Default Time to Live of the cache behavior.
+ returned: always
+ type: int
+ sample: 86400
+ forwarded_values:
+ description: Values forwarded to the origin for this cache behavior.
+ returned: always
+ type: complex
+ contains:
+ cookies:
+ description: Cookies to forward to the origin.
+ returned: always
+ type: complex
+ contains:
+ forward:
+ description: Which cookies to forward to the origin for this cache behavior.
+ returned: always
+ type: str
+ sample: none
+ whitelisted_names:
+ description: The names of the cookies to forward to the origin for this cache behavior.
+ returned: when I(forward=whitelist)
+ type: complex
+ contains:
+ quantity:
+ description: Count of cookies to forward.
+ returned: always
+ type: int
+ sample: 1
+ items:
+ description: List of cookies to forward.
+ returned: when list is not empty
+ type: list
+ sample: my_cookie
+ headers:
+ description: Which headers are used to vary on cache retrievals.
+ returned: always
+ type: complex
+ contains:
+ quantity:
+ description: Count of headers to vary on.
+ returned: always
+ type: int
+ sample: 1
+ items:
+ description: List of headers to vary on.
+ returned: when list is not empty
+ type: list
+ sample:
+ - Host
+ query_string:
+ description: Whether the query string is used in cache lookups.
+ returned: always
+ type: bool
+ sample: false
+ query_string_cache_keys:
+ description: Which query string keys to use in cache lookups.
+ returned: always
+ type: complex
+ contains:
+ quantity:
+ description: Count of query string cache keys to use in cache lookups.
+ returned: always
+ type: int
+ sample: 1
+ items:
+ description: List of query string cache keys to use in cache lookups.
+ returned: when list is not empty
+ type: list
+ sample:
+ lambda_function_associations:
+ description: Lambda function associations for a cache behavior.
+ returned: always
+ type: complex
+ contains:
+ quantity:
+ description: Count of lambda function associations.
+ returned: always
+ type: int
+ sample: 1
+ items:
+ description: List of lambda function associations.
+ returned: when list is not empty
+ type: list
+ sample:
+ - lambda_function_arn: arn:aws:lambda:123456789012:us-east-1/lambda/lambda-function
+ event_type: viewer-response
+ max_ttl:
+ description: Maximum Time to Live.
+ returned: always
+ type: int
+ sample: 31536000
+ min_ttl:
+ description: Minimum Time to Live.
+ returned: always
+ type: int
+ sample: 0
+ path_pattern:
+ description: Path pattern that determines this cache behavior.
+ returned: always
+ type: str
+ sample: /path/to/files/*
+ smooth_streaming:
+ description: Whether smooth streaming is enabled.
+ returned: always
+ type: bool
+ sample: false
+ target_origin_id:
+ description: ID of origin reference by this cache behavior.
+ returned: always
+ type: str
+ sample: origin_abcd
+ trusted_signers:
+ description: Trusted signers.
+ returned: always
+ type: complex
+ contains:
+ enabled:
+ description: Whether trusted signers are enabled for this cache behavior.
+ returned: always
+ type: bool
+ sample: false
+ quantity:
+ description: Count of trusted signers.
+ returned: always
+ type: int
+ sample: 1
+ viewer_protocol_policy:
+ description: Policy of how to handle http/https.
+ returned: always
+ type: str
+ sample: redirect-to-https
+ quantity:
+ description: Count of cache behaviors.
+ returned: always
+ type: int
+ sample: 1
+
+caller_reference:
+ description: Idempotency reference given when creating CloudFront distribution.
+ returned: always
+ type: str
+ sample: '1484796016700'
+comment:
+ description: Any comments you want to include about the distribution.
+ returned: always
+ type: str
+ sample: 'my first CloudFront distribution'
+custom_error_responses:
+ description: Custom error responses to use for error handling.
+ returned: always
+ type: complex
+ contains:
+ items:
+ description: List of custom error responses.
+ returned: always
+ type: complex
+ contains:
+ error_caching_min_ttl:
+ description: Minimum time to cache this error response.
+ returned: always
+ type: int
+ sample: 300
+ error_code:
+ description: Origin response code that triggers this error response.
+ returned: always
+ type: int
+ sample: 500
+ response_code:
+ description: Response code to return to the requester.
+ returned: always
+ type: str
+ sample: '500'
+ response_page_path:
+ description: Path that contains the error page to display.
+ returned: always
+ type: str
+ sample: /errors/5xx.html
+ quantity:
+ description: Count of custom error response items
+ returned: always
+ type: int
+ sample: 1
+default_cache_behavior:
+ description: Default cache behavior.
+ returned: always
+ type: complex
+ contains:
+ allowed_methods:
+ description: Methods allowed by the cache behavior.
+ returned: always
+ type: complex
+ contains:
+ cached_methods:
+ description: Methods cached by the cache behavior.
+ returned: always
+ type: complex
+ contains:
+ items:
+ description: List of cached methods.
+ returned: always
+ type: list
+ sample:
+ - HEAD
+ - GET
+ quantity:
+ description: Count of cached methods.
+ returned: always
+ type: int
+ sample: 2
+ items:
+ description: List of methods allowed by the cache behavior.
+ returned: always
+ type: list
+ sample:
+ - HEAD
+ - GET
+ quantity:
+ description: Count of methods allowed by the cache behavior.
+ returned: always
+ type: int
+ sample: 2
+ compress:
+ description: Whether compression is turned on for the cache behavior.
+ returned: always
+ type: bool
+ sample: false
+ default_ttl:
+ description: Default Time to Live of the cache behavior.
+ returned: always
+ type: int
+ sample: 86400
+ forwarded_values:
+ description: Values forwarded to the origin for this cache behavior.
+ returned: always
+ type: complex
+ contains:
+ cookies:
+ description: Cookies to forward to the origin.
+ returned: always
+ type: complex
+ contains:
+ forward:
+ description: Which cookies to forward to the origin for this cache behavior.
+ returned: always
+ type: str
+ sample: none
+ whitelisted_names:
+ description: The names of the cookies to forward to the origin for this cache behavior.
+ returned: when I(forward=whitelist)
+ type: complex
+ contains:
+ quantity:
+ description: Count of cookies to forward.
+ returned: always
+ type: int
+ sample: 1
+ items:
+ description: List of cookies to forward.
+ returned: when list is not empty
+ type: list
+ sample: my_cookie
+ headers:
+ description: Which headers are used to vary on cache retrievals.
+ returned: always
+ type: complex
+ contains:
+ quantity:
+ description: Count of headers to vary on.
+ returned: always
+ type: int
+ sample: 1
+ items:
+ description: List of headers to vary on.
+ returned: when list is not empty
+ type: list
+ sample:
+ - Host
+ query_string:
+ description: Whether the query string is used in cache lookups.
+ returned: always
+ type: bool
+ sample: false
+ query_string_cache_keys:
+ description: Which query string keys to use in cache lookups.
+ returned: always
+ type: complex
+ contains:
+ quantity:
+ description: Count of query string cache keys to use in cache lookups.
+ returned: always
+ type: int
+ sample: 1
+ items:
+ description: List of query string cache keys to use in cache lookups.
+ returned: when list is not empty
+ type: list
+ sample:
+ lambda_function_associations:
+ description: Lambda function associations for a cache behavior.
+ returned: always
+ type: complex
+ contains:
+ quantity:
+ description: Count of lambda function associations.
+ returned: always
+ type: int
+ sample: 1
+ items:
+ description: List of lambda function associations.
+ returned: when list is not empty
+ type: list
+ sample:
+ - lambda_function_arn: arn:aws:lambda:123456789012:us-east-1/lambda/lambda-function
+ event_type: viewer-response
+ max_ttl:
+ description: Maximum Time to Live.
+ returned: always
+ type: int
+ sample: 31536000
+ min_ttl:
+ description: Minimum Time to Live.
+ returned: always
+ type: int
+ sample: 0
+ path_pattern:
+ description: Path pattern that determines this cache behavior.
+ returned: always
+ type: str
+ sample: /path/to/files/*
+ smooth_streaming:
+ description: Whether smooth streaming is enabled.
+ returned: always
+ type: bool
+ sample: false
+ target_origin_id:
+ description: ID of origin reference by this cache behavior.
+ returned: always
+ type: str
+ sample: origin_abcd
+ trusted_signers:
+ description: Trusted signers.
+ returned: always
+ type: complex
+ contains:
+ enabled:
+ description: Whether trusted signers are enabled for this cache behavior.
+ returned: always
+ type: bool
+ sample: false
+ quantity:
+ description: Count of trusted signers.
+ returned: always
+ type: int
+ sample: 1
+ viewer_protocol_policy:
+ description: Policy of how to handle http/https.
+ returned: always
+ type: str
+ sample: redirect-to-https
+default_root_object:
+ description: The object that you want CloudFront to request from your origin (for example, index.html)
+ when a viewer requests the root URL for your distribution.
+ returned: always
+ type: str
+ sample: ''
+diff:
+ description: Difference between previous configuration and new configuration.
+ returned: always
+ type: dict
+ sample: {}
+domain_name:
+ description: Domain name of CloudFront distribution.
+ returned: always
+ type: str
+ sample: d1vz8pzgurxosf.cloudfront.net
+enabled:
+ description: Whether the CloudFront distribution is enabled or not.
+ returned: always
+ type: bool
+ sample: true
+http_version:
+ description: Version of HTTP supported by the distribution.
+ returned: always
+ type: str
+ sample: http2
+id:
+ description: CloudFront distribution ID.
+ returned: always
+ type: str
+ sample: E123456ABCDEFG
+in_progress_invalidation_batches:
+ description: The number of invalidation batches currently in progress.
+ returned: always
+ type: int
+ sample: 0
+is_ipv6_enabled:
+ description: Whether IPv6 is enabled.
+ returned: always
+ type: bool
+ sample: true
+last_modified_time:
+ description: Date and time distribution was last modified.
+ returned: always
+ type: str
+ sample: '2017-10-13T01:51:12.656000+00:00'
+logging:
+ description: Logging information.
+ returned: always
+ type: complex
+ contains:
+ bucket:
+ description: S3 bucket logging destination.
+ returned: always
+ type: str
+ sample: logs-example-com.s3.amazonaws.com
+ enabled:
+ description: Whether logging is enabled.
+ returned: always
+ type: bool
+ sample: true
+ include_cookies:
+ description: Whether to log cookies.
+ returned: always
+ type: bool
+ sample: false
+ prefix:
+ description: Prefix added to logging object names.
+ returned: always
+ type: str
+ sample: cloudfront/test
+origins:
+ description: Origins in the CloudFront distribution.
+ returned: always
+ type: complex
+ contains:
+ items:
+ description: List of origins.
+ returned: always
+ type: complex
+ contains:
+ custom_headers:
+ description: Custom headers passed to the origin.
+ returned: always
+ type: complex
+ contains:
+ quantity:
+ description: Count of headers.
+ returned: always
+ type: int
+ sample: 1
+ custom_origin_config:
+ description: Configuration of the origin.
+ returned: always
+ type: complex
+ contains:
+ http_port:
+ description: Port on which HTTP is listening.
+ returned: always
+ type: int
+ sample: 80
+ https_port:
+ description: Port on which HTTPS is listening.
+ returned: always
+ type: int
+ sample: 443
+ origin_keepalive_timeout:
+ description: Keep-alive timeout.
+ returned: always
+ type: int
+ sample: 5
+ origin_protocol_policy:
+ description: Policy of which protocols are supported.
+ returned: always
+ type: str
+ sample: https-only
+ origin_read_timeout:
+ description: Timeout for reads to the origin.
+ returned: always
+ type: int
+ sample: 30
+ origin_ssl_protocols:
+ description: SSL protocols allowed by the origin.
+ returned: always
+ type: complex
+ contains:
+ items:
+ description: List of SSL protocols.
+ returned: always
+ type: list
+ sample:
+ - TLSv1
+ - TLSv1.1
+ - TLSv1.2
+ quantity:
+ description: Count of SSL protocols.
+ returned: always
+ type: int
+ sample: 3
+ domain_name:
+ description: Domain name of the origin.
+ returned: always
+ type: str
+ sample: test-origin.example.com
+ id:
+ description: ID of the origin.
+ returned: always
+ type: str
+ sample: test-origin.example.com
+ origin_path:
+ description: Subdirectory to prefix the request from the S3 or HTTP origin.
+ returned: always
+ type: str
+ sample: ''
+ s3_origin_config:
+ description: Origin access identity configuration for S3 Origin.
+ returned: when s3_origin_access_identity_enabled is true
+ type: dict
+ contains:
+ origin_access_identity:
+ type: str
+ description: The origin access id as a path.
+ sample: origin-access-identity/cloudfront/EXAMPLEID
+ quantity:
+ description: Count of origins.
+ returned: always
+ type: int
+ sample: 1
+price_class:
+ description: Price class of CloudFront distribution.
+ returned: always
+ type: str
+ sample: PriceClass_All
+restrictions:
+ description: Restrictions in use by CloudFront.
+ returned: always
+ type: complex
+ contains:
+ geo_restriction:
+ description: Controls the countries in which your content is distributed.
+ returned: always
+ type: complex
+ contains:
+ quantity:
+ description: Count of restrictions.
+ returned: always
+ type: int
+ sample: 1
+ items:
+ description: List of country codes allowed or disallowed.
+ returned: always
+ type: list
+ sample: xy
+ restriction_type:
+ description: Type of restriction.
+ returned: always
+ type: str
+ sample: blacklist
+status:
+ description: Status of the CloudFront distribution.
+ returned: always
+ type: str
+ sample: InProgress
+tags:
+ description: Distribution tags.
+ returned: always
+ type: dict
+ sample:
+ Hello: World
+viewer_certificate:
+ description: Certificate used by CloudFront distribution.
+ returned: always
+ type: complex
+ contains:
+ acm_certificate_arn:
+ description: ARN of ACM certificate.
+ returned: when certificate comes from ACM
+ type: str
+ sample: arn:aws:acm:us-east-1:123456789012:certificate/abcd1234-1234-1234-abcd-123456abcdef
+ certificate:
+ description: Reference to certificate.
+ returned: always
+ type: str
+ sample: arn:aws:acm:us-east-1:123456789012:certificate/abcd1234-1234-1234-abcd-123456abcdef
+ certificate_source:
+ description: Where certificate comes from.
+ returned: always
+ type: str
+ sample: acm
+ minimum_protocol_version:
+ description: Minimum SSL/TLS protocol supported by this distribution.
+ returned: always
+ type: str
+ sample: TLSv1
+ ssl_support_method:
+ description: Support for pre-SNI browsers or not.
+ returned: always
+ type: str
+ sample: sni-only
+web_acl_id:
+ description: ID of Web Access Control List (from WAF service).
+ returned: always
+ type: str
+ sample: abcd1234-1234-abcd-abcd-abcd12345678
+'''
+
+from ansible.module_utils._text import to_text, to_native
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager
+from ansible.module_utils.common.dict_transformations import recursive_diff
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, compare_aws_tags, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict
+import datetime
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ try:
+ from ordereddict import OrderedDict
+ except ImportError:
+ pass # caught by AnsibleAWSModule (as python 2.6 + boto3 => ordereddict is installed)
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def change_dict_key_name(dictionary, old_key, new_key):
+ if old_key in dictionary:
+ dictionary[new_key] = dictionary.get(old_key)
+ dictionary.pop(old_key, None)
+ return dictionary
+
+
+def merge_validation_into_config(config, validated_node, node_name):
+ if validated_node is not None:
+ if isinstance(validated_node, dict):
+ config_node = config.get(node_name)
+ if config_node is not None:
+ config_node_items = list(config_node.items())
+ else:
+ config_node_items = []
+ config[node_name] = dict(config_node_items + list(validated_node.items()))
+ if isinstance(validated_node, list):
+ config[node_name] = list(set(config.get(node_name) + validated_node))
+ return config
+
+
+def ansible_list_to_cloudfront_list(list_items=None, include_quantity=True):
+ if list_items is None:
+ list_items = []
+ if not isinstance(list_items, list):
+ raise ValueError('Expected a list, got a {0} with value {1}'.format(type(list_items).__name__, str(list_items)))
+ result = {}
+ if include_quantity:
+ result['quantity'] = len(list_items)
+ if len(list_items) > 0:
+ result['items'] = list_items
+ return result
+
+
+def create_distribution(client, module, config, tags):
+ try:
+ if not tags:
+ return client.create_distribution(aws_retry=True, DistributionConfig=config)['Distribution']
+ else:
+ distribution_config_with_tags = {
+ 'DistributionConfig': config,
+ 'Tags': {
+ 'Items': tags
+ }
+ }
+ return client.create_distribution_with_tags(aws_retry=True, DistributionConfigWithTags=distribution_config_with_tags)['Distribution']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error creating distribution")
+
+
+def delete_distribution(client, module, distribution):
+ try:
+ return client.delete_distribution(aws_retry=True, Id=distribution['Distribution']['Id'], IfMatch=distribution['ETag'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error deleting distribution %s" % to_native(distribution['Distribution']))
+
+
+def update_distribution(client, module, config, distribution_id, e_tag):
+ try:
+ return client.update_distribution(aws_retry=True, DistributionConfig=config, Id=distribution_id, IfMatch=e_tag)['Distribution']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error updating distribution to %s" % to_native(config))
+
+
+def tag_resource(client, module, arn, tags):
+ try:
+ return client.tag_resource(aws_retry=True, Resource=arn, Tags=dict(Items=tags))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error tagging resource")
+
+
+def untag_resource(client, module, arn, tag_keys):
+ try:
+ return client.untag_resource(aws_retry=True, Resource=arn, TagKeys=dict(Items=tag_keys))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error untagging resource")
+
+
+def list_tags_for_resource(client, module, arn):
+ try:
+ response = client.list_tags_for_resource(aws_retry=True, Resource=arn)
+ return boto3_tag_list_to_ansible_dict(response.get('Tags').get('Items'))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error listing tags for resource")
+
+
+def update_tags(client, module, existing_tags, valid_tags, purge_tags, arn):
+ if valid_tags is None:
+ return False
+ changed = False
+ to_add, to_remove = compare_aws_tags(existing_tags, valid_tags, purge_tags)
+ if to_remove:
+ untag_resource(client, module, arn, to_remove)
+ changed = True
+ if to_add:
+ tag_resource(client, module, arn, ansible_dict_to_boto3_tag_list(to_add))
+ changed = True
+ return changed
+
+
+class CloudFrontValidationManager(object):
+ """
+ Manages CloudFront validations
+ """
+
+ def __init__(self, module):
+ self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module)
+ self.module = module
+ self.__default_distribution_enabled = True
+ self.__default_http_port = 80
+ self.__default_https_port = 443
+ self.__default_ipv6_enabled = False
+ self.__default_origin_ssl_protocols = [
+ 'TLSv1',
+ 'TLSv1.1',
+ 'TLSv1.2'
+ ]
+ self.__default_custom_origin_protocol_policy = 'match-viewer'
+ self.__default_custom_origin_read_timeout = 30
+ self.__default_custom_origin_keepalive_timeout = 5
+ self.__default_datetime_string = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')
+ self.__default_cache_behavior_min_ttl = 0
+ self.__default_cache_behavior_max_ttl = 31536000
+ self.__default_cache_behavior_default_ttl = 86400
+ self.__default_cache_behavior_compress = False
+ self.__default_cache_behavior_viewer_protocol_policy = 'allow-all'
+ self.__default_cache_behavior_smooth_streaming = False
+ self.__default_cache_behavior_forwarded_values_forward_cookies = 'none'
+ self.__default_cache_behavior_forwarded_values_query_string = True
+ self.__default_trusted_signers_enabled = False
+ self.__valid_price_classes = set([
+ 'PriceClass_100',
+ 'PriceClass_200',
+ 'PriceClass_All'
+ ])
+ self.__valid_origin_protocol_policies = set([
+ 'http-only',
+ 'match-viewer',
+ 'https-only'
+ ])
+ self.__valid_origin_ssl_protocols = set([
+ 'SSLv3',
+ 'TLSv1',
+ 'TLSv1.1',
+ 'TLSv1.2'
+ ])
+ self.__valid_cookie_forwarding = set([
+ 'none',
+ 'whitelist',
+ 'all'
+ ])
+ self.__valid_viewer_protocol_policies = set([
+ 'allow-all',
+ 'https-only',
+ 'redirect-to-https'
+ ])
+ self.__valid_methods = set([
+ 'GET',
+ 'HEAD',
+ 'POST',
+ 'PUT',
+ 'PATCH',
+ 'OPTIONS',
+ 'DELETE'
+ ])
+ self.__valid_methods_cached_methods = [
+ set([
+ 'GET',
+ 'HEAD'
+ ]),
+ set([
+ 'GET',
+ 'HEAD',
+ 'OPTIONS'
+ ])
+ ]
+ self.__valid_methods_allowed_methods = [
+ self.__valid_methods_cached_methods[0],
+ self.__valid_methods_cached_methods[1],
+ self.__valid_methods
+ ]
+ self.__valid_lambda_function_association_event_types = set([
+ 'viewer-request',
+ 'viewer-response',
+ 'origin-request',
+ 'origin-response'
+ ])
+ self.__valid_viewer_certificate_ssl_support_methods = set([
+ 'sni-only',
+ 'vip'
+ ])
+ self.__valid_viewer_certificate_minimum_protocol_versions = set([
+ 'SSLv3',
+ 'TLSv1',
+ 'TLSv1_2016',
+ 'TLSv1.1_2016',
+ 'TLSv1.2_2018',
+ 'TLSv1.2_2019',
+ 'TLSv1.2_2021'
+ ])
+ self.__valid_viewer_certificate_certificate_sources = set([
+ 'cloudfront',
+ 'iam',
+ 'acm'
+ ])
+ self.__valid_http_versions = set([
+ 'http1.1',
+ 'http2'
+ ])
+ self.__s3_bucket_domain_identifier = '.s3.amazonaws.com'
+
+ def add_missing_key(self, dict_object, key_to_set, value_to_set):
+ if key_to_set not in dict_object and value_to_set is not None:
+ dict_object[key_to_set] = value_to_set
+ return dict_object
+
+ def add_key_else_change_dict_key(self, dict_object, old_key, new_key, value_to_set):
+ if old_key not in dict_object and value_to_set is not None:
+ dict_object[new_key] = value_to_set
+ else:
+ dict_object = change_dict_key_name(dict_object, old_key, new_key)
+ return dict_object
+
+ def add_key_else_validate(self, dict_object, key_name, attribute_name, value_to_set, valid_values, to_aws_list=False):
+ if key_name in dict_object:
+ self.validate_attribute_with_allowed_values(value_to_set, attribute_name, valid_values)
+ else:
+ if to_aws_list:
+ dict_object[key_name] = ansible_list_to_cloudfront_list(value_to_set)
+ elif value_to_set is not None:
+ dict_object[key_name] = value_to_set
+ return dict_object
+
+ def validate_logging(self, logging):
+ try:
+ if logging is None:
+ return None
+ valid_logging = {}
+ if logging and not set(['enabled', 'include_cookies', 'bucket', 'prefix']).issubset(logging):
+ self.module.fail_json(msg="The logging parameters enabled, include_cookies, bucket and prefix must be specified.")
+ valid_logging['include_cookies'] = logging.get('include_cookies')
+ valid_logging['enabled'] = logging.get('enabled')
+ valid_logging['bucket'] = logging.get('bucket')
+ valid_logging['prefix'] = logging.get('prefix')
+ return valid_logging
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating distribution logging")
+
+ def validate_is_list(self, list_to_validate, list_name):
+ if not isinstance(list_to_validate, list):
+ self.module.fail_json(msg='%s is of type %s. Must be a list.' % (list_name, type(list_to_validate).__name__))
+
+ def validate_required_key(self, key_name, full_key_name, dict_object):
+ if key_name not in dict_object:
+ self.module.fail_json(msg="%s must be specified." % full_key_name)
+
+ def validate_origins(self, client, config, origins, default_origin_domain_name,
+ default_origin_path, create_distribution, purge_origins=False):
+ try:
+ if origins is None:
+ if default_origin_domain_name is None and not create_distribution:
+ if purge_origins:
+ return None
+ else:
+ return ansible_list_to_cloudfront_list(config)
+ if default_origin_domain_name is not None:
+ origins = [{
+ 'domain_name': default_origin_domain_name,
+ 'origin_path': default_origin_path or ''
+ }]
+ else:
+ origins = []
+ self.validate_is_list(origins, 'origins')
+ if not origins and default_origin_domain_name is None and create_distribution:
+ self.module.fail_json(msg="Both origins[] and default_origin_domain_name have not been specified. Please specify at least one.")
+ all_origins = OrderedDict()
+ new_domains = list()
+ for origin in config:
+ all_origins[origin.get('domain_name')] = origin
+ for origin in origins:
+ origin = self.validate_origin(client, all_origins.get(origin.get('domain_name'), {}), origin, default_origin_path)
+ all_origins[origin['domain_name']] = origin
+ new_domains.append(origin['domain_name'])
+ if purge_origins:
+ for domain in list(all_origins.keys()):
+ if domain not in new_domains:
+ del all_origins[domain]
+ return ansible_list_to_cloudfront_list(list(all_origins.values()))
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating distribution origins")
+
+ def validate_s3_origin_configuration(self, client, existing_config, origin):
+ if origin.get('s3_origin_config', {}).get('origin_access_identity'):
+ return origin['s3_origin_config']['origin_access_identity']
+
+ if existing_config.get('s3_origin_config', {}).get('origin_access_identity'):
+ return existing_config['s3_origin_config']['origin_access_identity']
+
+ try:
+ comment = "access-identity-by-ansible-%s-%s" % (origin.get('domain_name'), self.__default_datetime_string)
+ caller_reference = "%s-%s" % (origin.get('domain_name'), self.__default_datetime_string)
+ cfoai_config = dict(CloudFrontOriginAccessIdentityConfig=dict(CallerReference=caller_reference,
+ Comment=comment))
+ oai = client.create_cloud_front_origin_access_identity(**cfoai_config)['CloudFrontOriginAccessIdentity']['Id']
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't create Origin Access Identity for id %s" % origin['id'])
+ return "origin-access-identity/cloudfront/%s" % oai
+
+ def validate_origin(self, client, existing_config, origin, default_origin_path):
+ try:
+ origin = self.add_missing_key(origin, 'origin_path', existing_config.get('origin_path', default_origin_path or ''))
+ self.validate_required_key('origin_path', 'origins[].origin_path', origin)
+ origin = self.add_missing_key(origin, 'id', existing_config.get('id', self.__default_datetime_string))
+ if 'custom_headers' in origin and len(origin.get('custom_headers')) > 0:
+ for custom_header in origin.get('custom_headers'):
+ if 'header_name' not in custom_header or 'header_value' not in custom_header:
+ self.module.fail_json(msg="Both origins[].custom_headers.header_name and origins[].custom_headers.header_value must be specified.")
+ origin['custom_headers'] = ansible_list_to_cloudfront_list(origin.get('custom_headers'))
+ else:
+ origin['custom_headers'] = ansible_list_to_cloudfront_list()
+ if self.__s3_bucket_domain_identifier in origin.get('domain_name').lower():
+ if origin.get("s3_origin_access_identity_enabled") is not None:
+ if origin['s3_origin_access_identity_enabled']:
+ s3_origin_config = self.validate_s3_origin_configuration(client, existing_config, origin)
+ else:
+ s3_origin_config = None
+
+ del origin["s3_origin_access_identity_enabled"]
+
+ if s3_origin_config:
+ oai = s3_origin_config
+ else:
+ oai = ""
+
+ origin["s3_origin_config"] = dict(origin_access_identity=oai)
+
+ if 'custom_origin_config' in origin:
+ self.module.fail_json(msg="s3_origin_access_identity_enabled and custom_origin_config are mutually exclusive")
+ else:
+ origin = self.add_missing_key(origin, 'custom_origin_config', existing_config.get('custom_origin_config', {}))
+ custom_origin_config = origin.get('custom_origin_config')
+ custom_origin_config = self.add_key_else_validate(custom_origin_config, 'origin_protocol_policy',
+ 'origins[].custom_origin_config.origin_protocol_policy',
+ self.__default_custom_origin_protocol_policy, self.__valid_origin_protocol_policies)
+ custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_read_timeout', self.__default_custom_origin_read_timeout)
+ custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_keepalive_timeout', self.__default_custom_origin_keepalive_timeout)
+ custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'http_port', 'h_t_t_p_port', self.__default_http_port)
+ custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'https_port', 'h_t_t_p_s_port', self.__default_https_port)
+ if custom_origin_config.get('origin_ssl_protocols', {}).get('items'):
+ custom_origin_config['origin_ssl_protocols'] = custom_origin_config['origin_ssl_protocols']['items']
+ if custom_origin_config.get('origin_ssl_protocols'):
+ self.validate_attribute_list_with_allowed_list(custom_origin_config['origin_ssl_protocols'], 'origins[].origin_ssl_protocols',
+ self.__valid_origin_ssl_protocols)
+ else:
+ custom_origin_config['origin_ssl_protocols'] = self.__default_origin_ssl_protocols
+ custom_origin_config['origin_ssl_protocols'] = ansible_list_to_cloudfront_list(custom_origin_config['origin_ssl_protocols'])
+ return origin
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error validating distribution origin")
+
+ def validate_cache_behaviors(self, config, cache_behaviors, valid_origins, purge_cache_behaviors=False):
+ try:
+ if cache_behaviors is None and valid_origins is not None and purge_cache_behaviors is False:
+ return ansible_list_to_cloudfront_list(config)
+ all_cache_behaviors = OrderedDict()
+ # cache behaviors are order dependent so we don't preserve the existing ordering when purge_cache_behaviors
+ # is true (if purge_cache_behaviors is not true, we can't really know the full new order)
+ if not purge_cache_behaviors:
+ for behavior in config:
+ all_cache_behaviors[behavior['path_pattern']] = behavior
+ for cache_behavior in cache_behaviors:
+ valid_cache_behavior = self.validate_cache_behavior(all_cache_behaviors.get(cache_behavior.get('path_pattern'), {}),
+ cache_behavior, valid_origins)
+ all_cache_behaviors[cache_behavior['path_pattern']] = valid_cache_behavior
+ if purge_cache_behaviors:
+ for target_origin_id in set(all_cache_behaviors.keys()) - set([cb['path_pattern'] for cb in cache_behaviors]):
+ del all_cache_behaviors[target_origin_id]
+ return ansible_list_to_cloudfront_list(list(all_cache_behaviors.values()))
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating distribution cache behaviors")
+
+ def validate_cache_behavior(self, config, cache_behavior, valid_origins, is_default_cache=False):
+ if is_default_cache and cache_behavior is None:
+ cache_behavior = {}
+ if cache_behavior is None and valid_origins is not None:
+ return config
+ cache_behavior = self.validate_cache_behavior_first_level_keys(config, cache_behavior, valid_origins, is_default_cache)
+ cache_behavior = self.validate_forwarded_values(config, cache_behavior.get('forwarded_values'), cache_behavior)
+ cache_behavior = self.validate_allowed_methods(config, cache_behavior.get('allowed_methods'), cache_behavior)
+ cache_behavior = self.validate_lambda_function_associations(config, cache_behavior.get('lambda_function_associations'), cache_behavior)
+ cache_behavior = self.validate_trusted_signers(config, cache_behavior.get('trusted_signers'), cache_behavior)
+ cache_behavior = self.validate_field_level_encryption_id(config, cache_behavior.get('field_level_encryption_id'), cache_behavior)
+ return cache_behavior
+
+ def validate_cache_behavior_first_level_keys(self, config, cache_behavior, valid_origins, is_default_cache):
+ try:
+ cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'min_ttl', 'min_t_t_l',
+ config.get('min_t_t_l', self.__default_cache_behavior_min_ttl))
+ cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'max_ttl', 'max_t_t_l',
+ config.get('max_t_t_l', self.__default_cache_behavior_max_ttl))
+ cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'default_ttl', 'default_t_t_l',
+ config.get('default_t_t_l', self.__default_cache_behavior_default_ttl))
+ cache_behavior = self.add_missing_key(cache_behavior, 'compress', config.get('compress', self.__default_cache_behavior_compress))
+ target_origin_id = cache_behavior.get('target_origin_id', config.get('target_origin_id'))
+ if not target_origin_id:
+ target_origin_id = self.get_first_origin_id_for_default_cache_behavior(valid_origins)
+ if target_origin_id not in [origin['id'] for origin in valid_origins.get('items', [])]:
+ if is_default_cache:
+ cache_behavior_name = 'Default cache behavior'
+ else:
+ cache_behavior_name = 'Cache behavior for path %s' % cache_behavior['path_pattern']
+ self.module.fail_json(msg="%s has target_origin_id pointing to an origin that does not exist." %
+ cache_behavior_name)
+ cache_behavior['target_origin_id'] = target_origin_id
+ cache_behavior = self.add_key_else_validate(cache_behavior, 'viewer_protocol_policy', 'cache_behavior.viewer_protocol_policy',
+ config.get('viewer_protocol_policy',
+ self.__default_cache_behavior_viewer_protocol_policy),
+ self.__valid_viewer_protocol_policies)
+ cache_behavior = self.add_missing_key(cache_behavior, 'smooth_streaming',
+ config.get('smooth_streaming', self.__default_cache_behavior_smooth_streaming))
+ return cache_behavior
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating distribution cache behavior first level keys")
+
+ def validate_forwarded_values(self, config, forwarded_values, cache_behavior):
+ try:
+ if not forwarded_values:
+ forwarded_values = dict()
+ existing_config = config.get('forwarded_values', {})
+ headers = forwarded_values.get('headers', existing_config.get('headers', {}).get('items'))
+ if headers:
+ headers.sort()
+ forwarded_values['headers'] = ansible_list_to_cloudfront_list(headers)
+ if 'cookies' not in forwarded_values:
+ forward = existing_config.get('cookies', {}).get('forward', self.__default_cache_behavior_forwarded_values_forward_cookies)
+ forwarded_values['cookies'] = {'forward': forward}
+ else:
+ existing_whitelist = existing_config.get('cookies', {}).get('whitelisted_names', {}).get('items')
+ whitelist = forwarded_values.get('cookies').get('whitelisted_names', existing_whitelist)
+ if whitelist:
+ self.validate_is_list(whitelist, 'forwarded_values.whitelisted_names')
+ forwarded_values['cookies']['whitelisted_names'] = ansible_list_to_cloudfront_list(whitelist)
+ cookie_forwarding = forwarded_values.get('cookies').get('forward', existing_config.get('cookies', {}).get('forward'))
+ self.validate_attribute_with_allowed_values(cookie_forwarding, 'cache_behavior.forwarded_values.cookies.forward',
+ self.__valid_cookie_forwarding)
+ forwarded_values['cookies']['forward'] = cookie_forwarding
+ query_string_cache_keys = forwarded_values.get('query_string_cache_keys', existing_config.get('query_string_cache_keys', {}).get('items', []))
+ self.validate_is_list(query_string_cache_keys, 'forwarded_values.query_string_cache_keys')
+ forwarded_values['query_string_cache_keys'] = ansible_list_to_cloudfront_list(query_string_cache_keys)
+ forwarded_values = self.add_missing_key(forwarded_values, 'query_string',
+ existing_config.get('query_string', self.__default_cache_behavior_forwarded_values_query_string))
+ cache_behavior['forwarded_values'] = forwarded_values
+ return cache_behavior
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating forwarded values")
+
+ def validate_lambda_function_associations(self, config, lambda_function_associations, cache_behavior):
+ try:
+ if lambda_function_associations is not None:
+ self.validate_is_list(lambda_function_associations, 'lambda_function_associations')
+ for association in lambda_function_associations:
+ association = change_dict_key_name(association, 'lambda_function_arn', 'lambda_function_a_r_n')
+ self.validate_attribute_with_allowed_values(association.get('event_type'), 'cache_behaviors[].lambda_function_associations.event_type',
+ self.__valid_lambda_function_association_event_types)
+ cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list(lambda_function_associations)
+ else:
+ if 'lambda_function_associations' in config:
+ cache_behavior['lambda_function_associations'] = config.get('lambda_function_associations')
+ else:
+ cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list([])
+ return cache_behavior
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating lambda function associations")
+
+ def validate_field_level_encryption_id(self, config, field_level_encryption_id, cache_behavior):
+ if field_level_encryption_id is not None:
+ cache_behavior['field_level_encryption_id'] = field_level_encryption_id
+ elif 'field_level_encryption_id' in config:
+ cache_behavior['field_level_encryption_id'] = config.get('field_level_encryption_id')
+ else:
+ cache_behavior['field_level_encryption_id'] = ""
+ return cache_behavior
+
+ def validate_allowed_methods(self, config, allowed_methods, cache_behavior):
+ try:
+ if allowed_methods is not None:
+ self.validate_required_key('items', 'cache_behavior.allowed_methods.items[]', allowed_methods)
+ temp_allowed_items = allowed_methods.get('items')
+ self.validate_is_list(temp_allowed_items, 'cache_behavior.allowed_methods.items')
+ self.validate_attribute_list_with_allowed_list(temp_allowed_items, 'cache_behavior.allowed_methods.items[]',
+ self.__valid_methods_allowed_methods)
+ cached_items = allowed_methods.get('cached_methods')
+ if 'cached_methods' in allowed_methods:
+ self.validate_is_list(cached_items, 'cache_behavior.allowed_methods.cached_methods')
+ self.validate_attribute_list_with_allowed_list(cached_items, 'cache_behavior.allowed_items.cached_methods[]',
+ self.__valid_methods_cached_methods)
+ # we don't care if the order of how cloudfront stores the methods differs - preserving existing
+ # order reduces likelihood of making unnecessary changes
+ if 'allowed_methods' in config and set(config['allowed_methods']['items']) == set(temp_allowed_items):
+ cache_behavior['allowed_methods'] = config['allowed_methods']
+ else:
+ cache_behavior['allowed_methods'] = ansible_list_to_cloudfront_list(temp_allowed_items)
+
+ if cached_items and set(cached_items) == set(config.get('allowed_methods', {}).get('cached_methods', {}).get('items', [])):
+ cache_behavior['allowed_methods']['cached_methods'] = config['allowed_methods']['cached_methods']
+ else:
+ cache_behavior['allowed_methods']['cached_methods'] = ansible_list_to_cloudfront_list(cached_items)
+ else:
+ if 'allowed_methods' in config:
+ cache_behavior['allowed_methods'] = config.get('allowed_methods')
+ return cache_behavior
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating allowed methods")
+
+ def validate_trusted_signers(self, config, trusted_signers, cache_behavior):
+ try:
+ if trusted_signers is None:
+ trusted_signers = {}
+ if 'items' in trusted_signers:
+ valid_trusted_signers = ansible_list_to_cloudfront_list(trusted_signers.get('items'))
+ else:
+ valid_trusted_signers = dict(quantity=config.get('quantity', 0))
+ if 'items' in config:
+ valid_trusted_signers = dict(items=config['items'])
+ valid_trusted_signers['enabled'] = trusted_signers.get('enabled', config.get('enabled', self.__default_trusted_signers_enabled))
+ cache_behavior['trusted_signers'] = valid_trusted_signers
+ return cache_behavior
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating trusted signers")
+
+ def validate_viewer_certificate(self, viewer_certificate):
+ try:
+ if viewer_certificate is None:
+ return None
+ if viewer_certificate.get('cloudfront_default_certificate') and viewer_certificate.get('ssl_support_method') is not None:
+ self.module.fail_json(msg="viewer_certificate.ssl_support_method should not be specified with viewer_certificate_cloudfront_default" +
+ "_certificate set to true.")
+ self.validate_attribute_with_allowed_values(viewer_certificate.get('ssl_support_method'), 'viewer_certificate.ssl_support_method',
+ self.__valid_viewer_certificate_ssl_support_methods)
+ self.validate_attribute_with_allowed_values(viewer_certificate.get('minimum_protocol_version'), 'viewer_certificate.minimum_protocol_version',
+ self.__valid_viewer_certificate_minimum_protocol_versions)
+ self.validate_attribute_with_allowed_values(viewer_certificate.get('certificate_source'), 'viewer_certificate.certificate_source',
+ self.__valid_viewer_certificate_certificate_sources)
+ viewer_certificate = change_dict_key_name(viewer_certificate, 'cloudfront_default_certificate', 'cloud_front_default_certificate')
+ viewer_certificate = change_dict_key_name(viewer_certificate, 'ssl_support_method', 's_s_l_support_method')
+ viewer_certificate = change_dict_key_name(viewer_certificate, 'iam_certificate_id', 'i_a_m_certificate_id')
+ viewer_certificate = change_dict_key_name(viewer_certificate, 'acm_certificate_arn', 'a_c_m_certificate_arn')
+ return viewer_certificate
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating viewer certificate")
+
+ def validate_custom_error_responses(self, config, custom_error_responses, purge_custom_error_responses):
+ try:
+ if custom_error_responses is None and not purge_custom_error_responses:
+ return ansible_list_to_cloudfront_list(config)
+ self.validate_is_list(custom_error_responses, 'custom_error_responses')
+ result = list()
+ existing_responses = dict((response['error_code'], response) for response in custom_error_responses)
+ for custom_error_response in custom_error_responses:
+ self.validate_required_key('error_code', 'custom_error_responses[].error_code', custom_error_response)
+ custom_error_response = change_dict_key_name(custom_error_response, 'error_caching_min_ttl', 'error_caching_min_t_t_l')
+ if 'response_code' in custom_error_response:
+ custom_error_response['response_code'] = str(custom_error_response['response_code'])
+ if custom_error_response['error_code'] in existing_responses:
+ del existing_responses[custom_error_response['error_code']]
+ result.append(custom_error_response)
+ if not purge_custom_error_responses:
+ result.extend(existing_responses.values())
+
+ return ansible_list_to_cloudfront_list(result)
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating custom error responses")
+
+ def validate_restrictions(self, config, restrictions, purge_restrictions=False):
+ try:
+ if restrictions is None:
+ if purge_restrictions:
+ return None
+ else:
+ return config
+ self.validate_required_key('geo_restriction', 'restrictions.geo_restriction', restrictions)
+ geo_restriction = restrictions.get('geo_restriction')
+ self.validate_required_key('restriction_type', 'restrictions.geo_restriction.restriction_type', geo_restriction)
+ existing_restrictions = config.get('geo_restriction', {}).get(geo_restriction['restriction_type'], {}).get('items', [])
+ geo_restriction_items = geo_restriction.get('items')
+ if not purge_restrictions:
+ geo_restriction_items.extend([rest for rest in existing_restrictions if
+ rest not in geo_restriction_items])
+ valid_restrictions = ansible_list_to_cloudfront_list(geo_restriction_items)
+ valid_restrictions['restriction_type'] = geo_restriction.get('restriction_type')
+ return {'geo_restriction': valid_restrictions}
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating restrictions")
+
+ def validate_distribution_config_parameters(self, config, default_root_object, ipv6_enabled, http_version, web_acl_id):
+ try:
+ config['default_root_object'] = default_root_object or config.get('default_root_object', '')
+ config['is_i_p_v6_enabled'] = ipv6_enabled if ipv6_enabled is not None else config.get('is_i_p_v6_enabled', self.__default_ipv6_enabled)
+ if http_version is not None or config.get('http_version'):
+ self.validate_attribute_with_allowed_values(http_version, 'http_version', self.__valid_http_versions)
+ config['http_version'] = http_version or config.get('http_version')
+ if web_acl_id or config.get('web_a_c_l_id'):
+ config['web_a_c_l_id'] = web_acl_id or config.get('web_a_c_l_id')
+ return config
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating distribution config parameters")
+
+ def validate_common_distribution_parameters(self, config, enabled, aliases, logging, price_class, purge_aliases=False):
+ try:
+ if config is None:
+ config = {}
+ if aliases is not None:
+ if not purge_aliases:
+ aliases.extend([alias for alias in config.get('aliases', {}).get('items', [])
+ if alias not in aliases])
+ config['aliases'] = ansible_list_to_cloudfront_list(aliases)
+ if logging is not None:
+ config['logging'] = self.validate_logging(logging)
+ config['enabled'] = enabled or config.get('enabled', self.__default_distribution_enabled)
+ if price_class is not None:
+ self.validate_attribute_with_allowed_values(price_class, 'price_class', self.__valid_price_classes)
+ config['price_class'] = price_class
+ return config
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating common distribution parameters")
+
+ def validate_comment(self, config, comment):
+ config['comment'] = comment or config.get('comment', "Distribution created by Ansible with datetime stamp " + self.__default_datetime_string)
+ return config
+
+ def validate_caller_reference(self, caller_reference):
+ return caller_reference or self.__default_datetime_string
+
+ def get_first_origin_id_for_default_cache_behavior(self, valid_origins):
+ try:
+ if valid_origins is not None:
+ valid_origins_list = valid_origins.get('items')
+ if valid_origins_list is not None and isinstance(valid_origins_list, list) and len(valid_origins_list) > 0:
+ return str(valid_origins_list[0].get('id'))
+ self.module.fail_json(msg="There are no valid origins from which to specify a target_origin_id for the default_cache_behavior configuration.")
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error getting first origin_id for default cache behavior")
+
+ def validate_attribute_list_with_allowed_list(self, attribute_list, attribute_list_name, allowed_list):
+ try:
+ self.validate_is_list(attribute_list, attribute_list_name)
+ if (isinstance(allowed_list, list) and set(attribute_list) not in allowed_list or
+ isinstance(allowed_list, set) and not set(allowed_list).issuperset(attribute_list)):
+ self.module.fail_json(msg='The attribute list {0} must be one of [{1}]'.format(attribute_list_name, ' '.join(str(a) for a in allowed_list)))
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating attribute list with allowed value list")
+
+ def validate_attribute_with_allowed_values(self, attribute, attribute_name, allowed_list):
+ if attribute is not None and attribute not in allowed_list:
+ self.module.fail_json(msg='The attribute {0} must be one of [{1}]'.format(attribute_name, ' '.join(str(a) for a in allowed_list)))
+
+ def validate_distribution_from_caller_reference(self, caller_reference):
+ try:
+ distributions = self.__cloudfront_facts_mgr.list_distributions(False)
+ distribution_name = 'Distribution'
+ distribution_config_name = 'DistributionConfig'
+ distribution_ids = [dist.get('Id') for dist in distributions]
+ for distribution_id in distribution_ids:
+ distribution = self.__cloudfront_facts_mgr.get_distribution(distribution_id)
+ if distribution is not None:
+ distribution_config = distribution[distribution_name].get(distribution_config_name)
+ if distribution_config is not None and distribution_config.get('CallerReference') == caller_reference:
+ distribution[distribution_name][distribution_config_name] = distribution_config
+ return distribution
+
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating distribution from caller reference")
+
+ def validate_distribution_from_aliases_caller_reference(self, distribution_id, aliases, caller_reference):
+ try:
+ if caller_reference is not None:
+ return self.validate_distribution_from_caller_reference(caller_reference)
+ else:
+ if aliases and distribution_id is None:
+ distribution_id = self.validate_distribution_id_from_alias(aliases)
+ if distribution_id:
+ return self.__cloudfront_facts_mgr.get_distribution(distribution_id)
+ return None
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error validating distribution_id from alias, aliases and caller reference")
+
+ def validate_distribution_id_from_alias(self, aliases):
+ distributions = self.__cloudfront_facts_mgr.list_distributions(False)
+ if distributions:
+ for distribution in distributions:
+ distribution_aliases = distribution.get('Aliases', {}).get('Items', [])
+ if set(aliases) & set(distribution_aliases):
+ return distribution['Id']
+ return None
+
+ def wait_until_processed(self, client, wait_timeout, distribution_id, caller_reference):
+ if distribution_id is None:
+ distribution_id = self.validate_distribution_from_caller_reference(caller_reference=caller_reference)['Id']
+
+ try:
+ waiter = client.get_waiter('distribution_deployed')
+ attempts = 1 + int(wait_timeout / 60)
+ waiter.wait(Id=distribution_id, WaiterConfig={'MaxAttempts': attempts})
+ except botocore.exceptions.WaiterError as e:
+ self.module.fail_json_aws(e, msg="Timeout waiting for CloudFront action."
+ " Waited for {0} seconds before timeout.".format(to_text(wait_timeout)))
+
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error getting distribution {0}".format(distribution_id))
+
+
+def main():
+ argument_spec = dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ caller_reference=dict(),
+ comment=dict(),
+ distribution_id=dict(),
+ e_tag=dict(),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ alias=dict(),
+ aliases=dict(type='list', default=[], elements='str'),
+ purge_aliases=dict(type='bool', default=False),
+ default_root_object=dict(),
+ origins=dict(type='list', elements='dict'),
+ purge_origins=dict(type='bool', default=False),
+ default_cache_behavior=dict(type='dict'),
+ cache_behaviors=dict(type='list', elements='dict'),
+ purge_cache_behaviors=dict(type='bool', default=False),
+ custom_error_responses=dict(type='list', elements='dict'),
+ purge_custom_error_responses=dict(type='bool', default=False),
+ logging=dict(type='dict'),
+ price_class=dict(),
+ enabled=dict(type='bool'),
+ viewer_certificate=dict(type='dict'),
+ restrictions=dict(type='dict'),
+ web_acl_id=dict(),
+ http_version=dict(),
+ ipv6_enabled=dict(type='bool'),
+ default_origin_domain_name=dict(),
+ default_origin_path=dict(),
+ wait=dict(default=False, type='bool'),
+ wait_timeout=dict(default=1800, type='int')
+ )
+
+ result = {}
+ changed = True
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=False,
+ mutually_exclusive=[
+ ['distribution_id', 'alias'],
+ ['default_origin_domain_name', 'distribution_id'],
+ ['default_origin_domain_name', 'alias'],
+ ]
+ )
+
+ client = module.client('cloudfront', retry_decorator=AWSRetry.jittered_backoff())
+
+ validation_mgr = CloudFrontValidationManager(module)
+
+ state = module.params.get('state')
+ caller_reference = module.params.get('caller_reference')
+ comment = module.params.get('comment')
+ e_tag = module.params.get('e_tag')
+ tags = module.params.get('tags')
+ purge_tags = module.params.get('purge_tags')
+ distribution_id = module.params.get('distribution_id')
+ alias = module.params.get('alias')
+ aliases = module.params.get('aliases')
+ purge_aliases = module.params.get('purge_aliases')
+ default_root_object = module.params.get('default_root_object')
+ origins = module.params.get('origins')
+ purge_origins = module.params.get('purge_origins')
+ default_cache_behavior = module.params.get('default_cache_behavior')
+ cache_behaviors = module.params.get('cache_behaviors')
+ purge_cache_behaviors = module.params.get('purge_cache_behaviors')
+ custom_error_responses = module.params.get('custom_error_responses')
+ purge_custom_error_responses = module.params.get('purge_custom_error_responses')
+ logging = module.params.get('logging')
+ price_class = module.params.get('price_class')
+ enabled = module.params.get('enabled')
+ viewer_certificate = module.params.get('viewer_certificate')
+ restrictions = module.params.get('restrictions')
+ purge_restrictions = module.params.get('purge_restrictions')
+ web_acl_id = module.params.get('web_acl_id')
+ http_version = module.params.get('http_version')
+ ipv6_enabled = module.params.get('ipv6_enabled')
+ default_origin_domain_name = module.params.get('default_origin_domain_name')
+ default_origin_path = module.params.get('default_origin_path')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ if alias and alias not in aliases:
+ aliases.append(alias)
+
+ distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference)
+
+ update = state == 'present' and distribution
+ create = state == 'present' and not distribution
+ delete = state == 'absent' and distribution
+
+ if not (update or create or delete):
+ module.exit_json(changed=False)
+
+ if update or delete:
+ config = distribution['Distribution']['DistributionConfig']
+ e_tag = distribution['ETag']
+ distribution_id = distribution['Distribution']['Id']
+ else:
+ config = dict()
+ if update:
+ config = camel_dict_to_snake_dict(config, reversible=True)
+
+ if create or update:
+ config = validation_mgr.validate_common_distribution_parameters(config, enabled, aliases, logging, price_class, purge_aliases)
+ config = validation_mgr.validate_distribution_config_parameters(config, default_root_object, ipv6_enabled, http_version, web_acl_id)
+ config['origins'] = validation_mgr.validate_origins(client, config.get('origins', {}).get('items', []), origins, default_origin_domain_name,
+ default_origin_path, create, purge_origins)
+ config['cache_behaviors'] = validation_mgr.validate_cache_behaviors(config.get('cache_behaviors', {}).get('items', []),
+ cache_behaviors, config['origins'], purge_cache_behaviors)
+ config['default_cache_behavior'] = validation_mgr.validate_cache_behavior(config.get('default_cache_behavior', {}),
+ default_cache_behavior, config['origins'], True)
+ config['custom_error_responses'] = validation_mgr.validate_custom_error_responses(config.get('custom_error_responses', {}).get('items', []),
+ custom_error_responses, purge_custom_error_responses)
+ valid_restrictions = validation_mgr.validate_restrictions(config.get('restrictions', {}), restrictions, purge_restrictions)
+ if valid_restrictions:
+ config['restrictions'] = valid_restrictions
+ valid_viewer_certificate = validation_mgr.validate_viewer_certificate(viewer_certificate)
+ config = merge_validation_into_config(config, valid_viewer_certificate, 'viewer_certificate')
+ config = validation_mgr.validate_comment(config, comment)
+ config = snake_dict_to_camel_dict(config, capitalize_first=True)
+
+ if create:
+ config['CallerReference'] = validation_mgr.validate_caller_reference(caller_reference)
+ result = create_distribution(client, module, config, ansible_dict_to_boto3_tag_list(tags or {}))
+ result = camel_dict_to_snake_dict(result)
+ result['tags'] = list_tags_for_resource(client, module, result['arn'])
+
+ if delete:
+ if config['Enabled']:
+ config['Enabled'] = False
+ result = update_distribution(client, module, config, distribution_id, e_tag)
+ validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference'))
+ distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference)
+ # e_tag = distribution['ETag']
+ result = delete_distribution(client, module, distribution)
+
+ if update:
+ changed = config != distribution['Distribution']['DistributionConfig']
+ if changed:
+ result = update_distribution(client, module, config, distribution_id, e_tag)
+ else:
+ result = distribution['Distribution']
+ existing_tags = list_tags_for_resource(client, module, result['ARN'])
+ distribution['Distribution']['DistributionConfig']['tags'] = existing_tags
+ changed |= update_tags(client, module, existing_tags, tags, purge_tags, result['ARN'])
+ result = camel_dict_to_snake_dict(result)
+ result['distribution_config']['tags'] = config['tags'] = list_tags_for_resource(client, module, result['arn'])
+ result['diff'] = dict()
+ diff = recursive_diff(distribution['Distribution']['DistributionConfig'], config)
+ if diff:
+ result['diff']['before'] = diff[0]
+ result['diff']['after'] = diff[1]
+
+ if wait and (create or update):
+ validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference'))
+
+ if 'distribution_config' in result:
+ result.update(result['distribution_config'])
+ del result['distribution_config']
+
+ module.exit_json(changed=changed, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/cloudfront_distribution_info.py b/ansible_collections/community/aws/plugins/modules/cloudfront_distribution_info.py
new file mode 100644
index 000000000..cb97664fa
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/cloudfront_distribution_info.py
@@ -0,0 +1,646 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cloudfront_distribution_info
+version_added: 1.0.0
+short_description: Obtain facts about an AWS CloudFront distribution
+description:
+ - Gets information about an AWS CloudFront distribution.
+ - Prior to release 5.0.0 this module was called C(community.aws.cloudfront_info).
+ The usage did not change.
+author:
+ - Willem van Ketwich (@wilvk)
+options:
+ distribution_id:
+ description:
+ - The id of the CloudFront distribution. Used with I(distribution), I(distribution_config),
+ I(invalidation), I(streaming_distribution), I(streaming_distribution_config), I(list_invalidations).
+ required: false
+ type: str
+ invalidation_id:
+ description:
+ - The id of the invalidation to get information about.
+ - Used with I(invalidation).
+ required: false
+ type: str
+ origin_access_identity_id:
+ description:
+ - The id of the CloudFront origin access identity to get information about.
+ required: false
+ type: str
+# web_acl_id:
+# description:
+# - Used with I(list_distributions_by_web_acl_id).
+# required: false
+# type: str
+ domain_name_alias:
+ description:
+ - Can be used instead of I(distribution_id) - uses the aliased CNAME for the CloudFront
+ distribution to get the distribution id where required.
+ required: false
+ type: str
+ all_lists:
+ description:
+ - Get all CloudFront lists that do not require parameters.
+ required: false
+ default: false
+ type: bool
+ origin_access_identity:
+ description:
+ - Get information about an origin access identity.
+ - Requires I(origin_access_identity_id) to be specified.
+ required: false
+ default: false
+ type: bool
+ origin_access_identity_config:
+ description:
+ - Get the configuration information about an origin access identity.
+ - Requires I(origin_access_identity_id) to be specified.
+ required: false
+ default: false
+ type: bool
+ distribution:
+ description:
+ - Get information about a distribution.
+ - Requires I(distribution_id) or I(domain_name_alias) to be specified.
+ required: false
+ default: false
+ type: bool
+ distribution_config:
+ description:
+ - Get the configuration information about a distribution.
+ - Requires I(distribution_id) or I(domain_name_alias) to be specified.
+ required: false
+ default: false
+ type: bool
+ invalidation:
+ description:
+ - Get information about an invalidation.
+ - Requires I(invalidation_id) to be specified.
+ required: false
+ default: false
+ type: bool
+ streaming_distribution:
+ description:
+ - Get information about a specified RTMP distribution.
+ - Requires I(distribution_id) or I(domain_name_alias) to be specified.
+ required: false
+ default: false
+ type: bool
+ streaming_distribution_config:
+ description:
+ - Get the configuration information about a specified RTMP distribution.
+ - Requires I(distribution_id) or I(domain_name_alias) to be specified.
+ required: false
+ default: false
+ type: bool
+ list_origin_access_identities:
+ description:
+ - Get a list of CloudFront origin access identities.
+ - Requires I(origin_access_identity_id) to be set.
+ required: false
+ default: false
+ type: bool
+ list_distributions:
+ description:
+ - Get a list of CloudFront distributions.
+ required: false
+ default: false
+ type: bool
+ list_distributions_by_web_acl_id:
+ description:
+ - Get a list of distributions using web acl id as a filter.
+ - Requires I(web_acl_id) to be set.
+ required: false
+ default: false
+ type: bool
+ list_invalidations:
+ description:
+ - Get a list of invalidations.
+ - Requires I(distribution_id) or I(domain_name_alias) to be specified.
+ required: false
+ default: false
+ type: bool
+ list_streaming_distributions:
+ description:
+ - Get a list of streaming distributions.
+ required: false
+ default: false
+ type: bool
+ summary:
+ description:
+ - Returns a summary of all distributions, streaming distributions and origin_access_identities.
+ - This is the default behaviour if no option is selected.
+ required: false
+ default: false
+ type: bool
+
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Get a summary of distributions
+ community.aws.cloudfront_distribution_info:
+ summary: true
+ register: result
+
+- name: Get information about a distribution
+ community.aws.cloudfront_distribution_info:
+ distribution: true
+ distribution_id: my-cloudfront-distribution-id
+ register: result_did
+- ansible.builtin.debug:
+ msg: "{{ result_did['cloudfront']['my-cloudfront-distribution-id'] }}"
+
+- name: Get information about a distribution using the CNAME of the cloudfront distribution.
+ community.aws.cloudfront_distribution_info:
+ distribution: true
+ domain_name_alias: www.my-website.com
+ register: result_website
+- ansible.builtin.debug:
+ msg: "{{ result_website['cloudfront']['www.my-website.com'] }}"
+
+- name: Get all information about an invalidation for a distribution.
+ community.aws.cloudfront_distribution_info:
+ invalidation: true
+ distribution_id: my-cloudfront-distribution-id
+ invalidation_id: my-cloudfront-invalidation-id
+
+- name: Get all information about a CloudFront origin access identity.
+ community.aws.cloudfront_distribution_info:
+ origin_access_identity: true
+ origin_access_identity_id: my-cloudfront-origin-access-identity-id
+
+- name: Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions)
+ community.aws.cloudfront_distribution_info:
+ origin_access_identity: true
+ origin_access_identity_id: my-cloudfront-origin-access-identity-id
+
+- name: Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions)
+ community.aws.cloudfront_distribution_info:
+ all_lists: true
+'''
+
+RETURN = '''
+origin_access_identity:
+ description: Describes the origin access identity information. Requires I(origin_access_identity_id) to be set.
+ returned: only if I(origin_access_identity) is true
+ type: dict
+origin_access_identity_configuration:
+ description: Describes the origin access identity information configuration information. Requires I(origin_access_identity_id) to be set.
+ returned: only if I(origin_access_identity_configuration) is true
+ type: dict
+distribution:
+ description: >
+ Facts about a CloudFront distribution. Requires I(distribution_id) or I(domain_name_alias)
+ to be specified. Requires I(origin_access_identity_id) to be set.
+ returned: only if distribution is true
+ type: dict
+distribution_config:
+ description: >
+ Facts about a CloudFront distribution's config. Requires I(distribution_id) or I(domain_name_alias)
+ to be specified.
+ returned: only if I(distribution_config) is true
+ type: dict
+invalidation:
+ description: >
+ Describes the invalidation information for the distribution. Requires
+ I(invalidation_id) to be specified and either I(distribution_id) or I(domain_name_alias.)
+ returned: only if invalidation is true
+ type: dict
+streaming_distribution:
+ description: >
+ Describes the streaming information for the distribution. Requires
+ I(distribution_id) or I(domain_name_alias) to be specified.
+ returned: only if I(streaming_distribution) is true
+ type: dict
+streaming_distribution_config:
+ description: >
+ Describes the streaming configuration information for the distribution.
+ Requires I(distribution_id) or I(domain_name_alias) to be specified.
+ returned: only if I(streaming_distribution_config) is true
+ type: dict
+summary:
+ description: Gives a summary of distributions, streaming distributions and origin access identities.
+ returned: as default or if summary is true
+ type: dict
+result:
+ description: >
+ Result dict not nested under the CloudFront ID to access results of module without the knowledge of that id
+ as figuring out the DistributionId is usually the reason one uses this module in the first place.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+class CloudFrontServiceManager:
+ """Handles CloudFront Services"""
+
+ def __init__(self, module):
+ self.module = module
+
+ try:
+ self.client = module.client('cloudfront', retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ def get_distribution(self, distribution_id):
+ try:
+ distribution = self.client.get_distribution(aws_retry=True, Id=distribution_id)
+ return distribution
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error describing distribution")
+
+ def get_distribution_config(self, distribution_id):
+ try:
+ distribution = self.client.get_distribution_config(aws_retry=True, Id=distribution_id)
+ return distribution
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error describing distribution configuration")
+
+ def get_origin_access_identity(self, origin_access_identity_id):
+ try:
+ origin_access_identity = self.client.get_cloud_front_origin_access_identity(aws_retry=True, Id=origin_access_identity_id)
+ return origin_access_identity
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error describing origin access identity")
+
+ def get_origin_access_identity_config(self, origin_access_identity_id):
+ try:
+ origin_access_identity = self.client.get_cloud_front_origin_access_identity_config(aws_retry=True, Id=origin_access_identity_id)
+ return origin_access_identity
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error describing origin access identity configuration")
+
+ def get_invalidation(self, distribution_id, invalidation_id):
+ try:
+ invalidation = self.client.get_invalidation(aws_retry=True, DistributionId=distribution_id, Id=invalidation_id)
+ return invalidation
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error describing invalidation")
+
+ def get_streaming_distribution(self, distribution_id):
+ try:
+ streaming_distribution = self.client.get_streaming_distribution(aws_retry=True, Id=distribution_id)
+ return streaming_distribution
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error describing streaming distribution")
+
+ def get_streaming_distribution_config(self, distribution_id):
+ try:
+ streaming_distribution = self.client.get_streaming_distribution_config(aws_retry=True, Id=distribution_id)
+ return streaming_distribution
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error describing streaming distribution")
+
+ # Split out paginator to allow for the backoff decorator to function
+ @AWSRetry.jittered_backoff()
+ def _paginated_result(self, paginator_name, **params):
+ paginator = self.client.get_paginator(paginator_name)
+ results = paginator.paginate(**params).build_full_result()
+ return results
+
+ def list_origin_access_identities(self):
+ try:
+ results = self._paginated_result('list_cloud_front_origin_access_identities')
+ origin_access_identity_list = results.get('CloudFrontOriginAccessIdentityList', {'Items': []})
+
+ if len(origin_access_identity_list['Items']) > 0:
+ return origin_access_identity_list['Items']
+ return {}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error listing cloud front origin access identities")
+
+ def list_distributions(self, keyed=True):
+ try:
+ results = self._paginated_result('list_distributions')
+ distribution_list = results.get('DistributionList', {'Items': []})
+
+ if len(distribution_list['Items']) > 0:
+ distribution_list = distribution_list['Items']
+ else:
+ return {}
+
+ if not keyed:
+ return distribution_list
+ return self.keyed_list_helper(distribution_list)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error listing distributions")
+
+ def list_distributions_by_web_acl_id(self, web_acl_id):
+ try:
+ results = self._paginated_result('list_cloud_front_origin_access_identities', WebAclId=web_acl_id)
+ distribution_list = results.get('DistributionList', {'Items': []})
+
+ if len(distribution_list['Items']) > 0:
+ distribution_list = distribution_list['Items']
+ else:
+ return {}
+ return self.keyed_list_helper(distribution_list)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error listing distributions by web acl id")
+
+ def list_invalidations(self, distribution_id):
+ try:
+ results = self._paginated_result('list_invalidations', DistributionId=distribution_id)
+ invalidation_list = results.get('InvalidationList', {'Items': []})
+
+ if len(invalidation_list['Items']) > 0:
+ return invalidation_list['Items']
+ return {}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error listing invalidations")
+
+ def list_streaming_distributions(self, keyed=True):
+ try:
+ results = self._paginated_result('list_streaming_distributions')
+ streaming_distribution_list = results.get('StreamingDistributionList', {'Items': []})
+
+ if len(streaming_distribution_list['Items']) > 0:
+ streaming_distribution_list = streaming_distribution_list['Items']
+ else:
+ return {}
+
+ if not keyed:
+ return streaming_distribution_list
+ return self.keyed_list_helper(streaming_distribution_list)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error listing streaming distributions")
+
+ def summary(self):
+ summary_dict = {}
+ summary_dict.update(self.summary_get_distribution_list(False))
+ summary_dict.update(self.summary_get_distribution_list(True))
+ summary_dict.update(self.summary_get_origin_access_identity_list())
+ return summary_dict
+
+ def summary_get_origin_access_identity_list(self):
+ try:
+ origin_access_identity_list = {'origin_access_identities': []}
+ origin_access_identities = self.list_origin_access_identities()
+ for origin_access_identity in origin_access_identities:
+ oai_id = origin_access_identity['Id']
+ oai_full_response = self.get_origin_access_identity(oai_id)
+ oai_summary = {'Id': oai_id, 'ETag': oai_full_response['ETag']}
+ origin_access_identity_list['origin_access_identities'].append(oai_summary)
+ return origin_access_identity_list
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error generating summary of origin access identities")
+
+ def summary_get_distribution_list(self, streaming=False):
+ try:
+ list_name = 'streaming_distributions' if streaming else 'distributions'
+ key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled']
+ distribution_list = {list_name: []}
+ distributions = self.list_streaming_distributions(False) if streaming else self.list_distributions(False)
+ for dist in distributions:
+ temp_distribution = {}
+ for key_name in key_list:
+ temp_distribution[key_name] = dist[key_name]
+ temp_distribution['Aliases'] = [alias for alias in dist['Aliases'].get('Items', [])]
+ temp_distribution['ETag'] = self.get_etag_from_distribution_id(dist['Id'], streaming)
+ if not streaming:
+ temp_distribution['WebACLId'] = dist['WebACLId']
+ invalidation_ids = self.get_list_of_invalidation_ids_from_distribution_id(dist['Id'])
+ if invalidation_ids:
+ temp_distribution['Invalidations'] = invalidation_ids
+ resource_tags = self.client.list_tags_for_resource(Resource=dist['ARN'])
+ temp_distribution['Tags'] = boto3_tag_list_to_ansible_dict(resource_tags['Tags'].get('Items', []))
+ distribution_list[list_name].append(temp_distribution)
+ return distribution_list
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error generating summary of distributions")
+ except Exception as e:
+ self.module.fail_json(msg="Error generating summary of distributions - " + str(e),
+ exception=traceback.format_exc())
+
+ def get_etag_from_distribution_id(self, distribution_id, streaming):
+ distribution = {}
+ if not streaming:
+ distribution = self.get_distribution(distribution_id)
+ else:
+ distribution = self.get_streaming_distribution(distribution_id)
+ return distribution['ETag']
+
+ def get_list_of_invalidation_ids_from_distribution_id(self, distribution_id):
+ try:
+ invalidation_ids = []
+ invalidations = self.list_invalidations(distribution_id)
+ for invalidation in invalidations:
+ invalidation_ids.append(invalidation['Id'])
+ return invalidation_ids
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error getting list of invalidation ids")
+
+ def get_distribution_id_from_domain_name(self, domain_name):
+ try:
+ distribution_id = ""
+ distributions = self.list_distributions(False)
+ distributions += self.list_streaming_distributions(False)
+ for dist in distributions:
+ if 'Items' in dist['Aliases']:
+ for alias in dist['Aliases']['Items']:
+ if str(alias).lower() == domain_name.lower():
+ distribution_id = dist['Id']
+ break
+ return distribution_id
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error getting distribution id from domain name")
+
+ def get_aliases_from_distribution_id(self, distribution_id):
+ aliases = []
+ try:
+ distributions = self.list_distributions(False)
+ for dist in distributions:
+ if dist['Id'] == distribution_id and 'Items' in dist['Aliases']:
+ for alias in dist['Aliases']['Items']:
+ aliases.append(alias)
+ break
+ return aliases
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error getting list of aliases from distribution_id")
+
+ def keyed_list_helper(self, list_to_key):
+ keyed_list = dict()
+ for item in list_to_key:
+ distribution_id = item['Id']
+ if 'Items' in item['Aliases']:
+ aliases = item['Aliases']['Items']
+ for alias in aliases:
+ keyed_list.update({alias: item})
+ keyed_list.update({distribution_id: item})
+ return keyed_list
+
+
+def set_facts_for_distribution_id_and_alias(details, facts, distribution_id, aliases):
+ facts[distribution_id].update(details)
+ # also have a fixed key for accessing results/details returned
+ facts['result'] = details
+ facts['result']['DistributionId'] = distribution_id
+
+ for alias in aliases:
+ facts[alias].update(details)
+ return facts
+
+
+def main():
+ argument_spec = dict(
+ distribution_id=dict(required=False, type='str'),
+ invalidation_id=dict(required=False, type='str'),
+ origin_access_identity_id=dict(required=False, type='str'),
+ domain_name_alias=dict(required=False, type='str'),
+ all_lists=dict(required=False, default=False, type='bool'),
+ distribution=dict(required=False, default=False, type='bool'),
+ distribution_config=dict(required=False, default=False, type='bool'),
+ origin_access_identity=dict(required=False, default=False, type='bool'),
+ origin_access_identity_config=dict(required=False, default=False, type='bool'),
+ invalidation=dict(required=False, default=False, type='bool'),
+ streaming_distribution=dict(required=False, default=False, type='bool'),
+ streaming_distribution_config=dict(required=False, default=False, type='bool'),
+ list_origin_access_identities=dict(required=False, default=False, type='bool'),
+ list_distributions=dict(required=False, default=False, type='bool'),
+ list_distributions_by_web_acl_id=dict(required=False, default=False, type='bool'),
+ list_invalidations=dict(required=False, default=False, type='bool'),
+ list_streaming_distributions=dict(required=False, default=False, type='bool'),
+ summary=dict(required=False, default=False, type='bool'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ service_mgr = CloudFrontServiceManager(module)
+
+ distribution_id = module.params.get('distribution_id')
+ invalidation_id = module.params.get('invalidation_id')
+ origin_access_identity_id = module.params.get('origin_access_identity_id')
+ web_acl_id = module.params.get('web_acl_id')
+ domain_name_alias = module.params.get('domain_name_alias')
+ all_lists = module.params.get('all_lists')
+ distribution = module.params.get('distribution')
+ distribution_config = module.params.get('distribution_config')
+ origin_access_identity = module.params.get('origin_access_identity')
+ origin_access_identity_config = module.params.get('origin_access_identity_config')
+ invalidation = module.params.get('invalidation')
+ streaming_distribution = module.params.get('streaming_distribution')
+ streaming_distribution_config = module.params.get('streaming_distribution_config')
+ list_origin_access_identities = module.params.get('list_origin_access_identities')
+ list_distributions = module.params.get('list_distributions')
+ list_distributions_by_web_acl_id = module.params.get('list_distributions_by_web_acl_id')
+ list_invalidations = module.params.get('list_invalidations')
+ list_streaming_distributions = module.params.get('list_streaming_distributions')
+ summary = module.params.get('summary')
+
+ aliases = []
+ result = {'cloudfront': {}}
+ facts = {}
+
+ require_distribution_id = (distribution or distribution_config or invalidation or streaming_distribution or
+ streaming_distribution_config or list_invalidations)
+
+ # set default to summary if no option specified
+ summary = summary or not (distribution or distribution_config or origin_access_identity or
+ origin_access_identity_config or invalidation or streaming_distribution or streaming_distribution_config or
+ list_origin_access_identities or list_distributions_by_web_acl_id or list_invalidations or
+ list_streaming_distributions or list_distributions)
+
+ # validations
+ if require_distribution_id and distribution_id is None and domain_name_alias is None:
+ module.fail_json(msg='Error distribution_id or domain_name_alias have not been specified.')
+ if (invalidation and invalidation_id is None):
+ module.fail_json(msg='Error invalidation_id has not been specified.')
+ if (origin_access_identity or origin_access_identity_config) and origin_access_identity_id is None:
+ module.fail_json(msg='Error origin_access_identity_id has not been specified.')
+ if list_distributions_by_web_acl_id and web_acl_id is None:
+ module.fail_json(msg='Error web_acl_id has not been specified.')
+
+ # get distribution id from domain name alias
+ if require_distribution_id and distribution_id is None:
+ distribution_id = service_mgr.get_distribution_id_from_domain_name(domain_name_alias)
+ if not distribution_id:
+ module.fail_json(msg='Error unable to source a distribution id from domain_name_alias')
+
+ # set appropriate cloudfront id
+ if distribution_id and not list_invalidations:
+ facts = {distribution_id: {}}
+ aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
+ for alias in aliases:
+ facts.update({alias: {}})
+ if invalidation_id:
+ facts.update({invalidation_id: {}})
+ elif distribution_id and list_invalidations:
+ facts = {distribution_id: {}}
+ aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
+ for alias in aliases:
+ facts.update({alias: {}})
+ elif origin_access_identity_id:
+ facts = {origin_access_identity_id: {}}
+ elif web_acl_id:
+ facts = {web_acl_id: {}}
+
+ # get details based on options
+ if distribution:
+ facts_to_set = service_mgr.get_distribution(distribution_id)
+ if distribution_config:
+ facts_to_set = service_mgr.get_distribution_config(distribution_id)
+ if origin_access_identity:
+ facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity(origin_access_identity_id))
+ if origin_access_identity_config:
+ facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity_config(origin_access_identity_id))
+ if invalidation:
+ facts_to_set = service_mgr.get_invalidation(distribution_id, invalidation_id)
+ facts[invalidation_id].update(facts_to_set)
+ if streaming_distribution:
+ facts_to_set = service_mgr.get_streaming_distribution(distribution_id)
+ if streaming_distribution_config:
+ facts_to_set = service_mgr.get_streaming_distribution_config(distribution_id)
+ if list_invalidations:
+ facts_to_set = {'invalidations': service_mgr.list_invalidations(distribution_id)}
+ if 'facts_to_set' in vars():
+ facts = set_facts_for_distribution_id_and_alias(facts_to_set, facts, distribution_id, aliases)
+
+ # get list based on options
+ if all_lists or list_origin_access_identities:
+ facts['origin_access_identities'] = service_mgr.list_origin_access_identities()
+ if all_lists or list_distributions:
+ facts['distributions'] = service_mgr.list_distributions()
+ if all_lists or list_streaming_distributions:
+ facts['streaming_distributions'] = service_mgr.list_streaming_distributions()
+ if list_distributions_by_web_acl_id:
+ facts['distributions_by_web_acl_id'] = service_mgr.list_distributions_by_web_acl_id(web_acl_id)
+ if list_invalidations:
+ facts['invalidations'] = service_mgr.list_invalidations(distribution_id)
+
+ # default summary option
+ if summary:
+ facts['summary'] = service_mgr.summary()
+
+ result['changed'] = False
+ result['cloudfront'].update(facts)
+
+ module.exit_json(msg="Retrieved CloudFront info.", **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/cloudfront_invalidation.py b/ansible_collections/community/aws/plugins/modules/cloudfront_invalidation.py
new file mode 100644
index 000000000..767a1d181
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/cloudfront_invalidation.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+
+version_added: 1.0.0
+module: cloudfront_invalidation
+
+short_description: create invalidations for AWS CloudFront distributions
+description:
+ - Allows for invalidation of a batch of paths for a CloudFront distribution.
+
+author: Willem van Ketwich (@wilvk)
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+
+options:
+ distribution_id:
+ description:
+ - The ID of the CloudFront distribution to invalidate paths for. Can be specified instead of the alias.
+ required: false
+ type: str
+ alias:
+ description:
+ - The alias of the CloudFront distribution to invalidate paths for. Can be specified instead of distribution_id.
+ required: false
+ type: str
+ caller_reference:
+ description:
+ - A unique reference identifier for the invalidation paths.
+ - Defaults to current datetime stamp.
+ required: false
+ default:
+ type: str
+ target_paths:
+ description:
+ - A list of paths on the distribution to invalidate. Each path should begin with C(/). Wildcards are allowed. eg. C(/foo/bar/*)
+ required: true
+ type: list
+ elements: str
+
+notes:
+ - does not support check mode
+
+'''
+
+EXAMPLES = r'''
+
+- name: create a batch of invalidations using a distribution_id for a reference
+ community.aws.cloudfront_invalidation:
+ distribution_id: E15BU8SDCGSG57
+ caller_reference: testing 123
+ target_paths:
+ - /testpathone/test1.css
+ - /testpathtwo/test2.js
+ - /testpaththree/test3.ss
+
+- name: create a batch of invalidations using an alias as a reference and one path using a wildcard match
+ community.aws.cloudfront_invalidation:
+ alias: alias.test.com
+ caller_reference: testing 123
+ target_paths:
+ - /testpathone/test4.css
+ - /testpathtwo/test5.js
+ - /testpaththree/*
+
+'''
+
+RETURN = r'''
+invalidation:
+ description: The invalidation's information.
+ returned: always
+ type: complex
+ contains:
+ create_time:
+ description: The date and time the invalidation request was first made.
+ returned: always
+ type: str
+ sample: '2018-02-01T15:50:41.159000+00:00'
+ id:
+ description: The identifier for the invalidation request.
+ returned: always
+ type: str
+ sample: I2G9MOWJZFV612
+ invalidation_batch:
+ description: The current invalidation information for the batch request.
+ returned: always
+ type: complex
+ contains:
+ caller_reference:
+ description: The value used to uniquely identify an invalidation request.
+ returned: always
+ type: str
+ sample: testing 123
+ paths:
+ description: A dict that contains information about the objects that you want to invalidate.
+ returned: always
+ type: complex
+ contains:
+ items:
+ description: A list of the paths that you want to invalidate.
+ returned: always
+ type: list
+ sample:
+ - /testpathtwo/test2.js
+ - /testpathone/test1.css
+ - /testpaththree/test3.ss
+ quantity:
+ description: The number of objects that you want to invalidate.
+ returned: always
+ type: int
+ sample: 3
+ status:
+ description: The status of the invalidation request.
+ returned: always
+ type: str
+ sample: Completed
+location:
+ description: The fully qualified URI of the distribution and invalidation batch request.
+ returned: always
+ type: str
+ sample: https://cloudfront.amazonaws.com/2017-03-25/distribution/E1ZID6KZJECZY7/invalidation/I2G9MOWJZFV622
+'''
+
+import datetime
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by imported AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message
+from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager
+
+
+class CloudFrontInvalidationServiceManager(object):
+ """
+ Handles CloudFront service calls to AWS for invalidations
+ """
+
+ def __init__(self, module):
+ self.module = module
+ self.client = module.client('cloudfront')
+
+ def create_invalidation(self, distribution_id, invalidation_batch):
+ current_invalidation_response = self.get_invalidation(distribution_id, invalidation_batch['CallerReference'])
+ try:
+ response = self.client.create_invalidation(DistributionId=distribution_id, InvalidationBatch=invalidation_batch)
+ response.pop('ResponseMetadata', None)
+ if current_invalidation_response:
+ return response, False
+ else:
+ return response, True
+ except is_boto3_error_message('Your request contains a caller reference that was used for a previous invalidation '
+ 'batch for the same distribution.'):
+ self.module.warn("InvalidationBatch target paths are not modifiable. "
+ "To make a new invalidation please update caller_reference.")
+ return current_invalidation_response, False
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ self.module.fail_json_aws(e, msg="Error creating CloudFront invalidations.")
+
+ def get_invalidation(self, distribution_id, caller_reference):
+ current_invalidation = {}
+ # find all invalidations for the distribution
+ try:
+ paginator = self.client.get_paginator('list_invalidations')
+ invalidations = paginator.paginate(DistributionId=distribution_id).build_full_result().get('InvalidationList', {}).get('Items', [])
+ invalidation_ids = [inv['Id'] for inv in invalidations]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error listing CloudFront invalidations.")
+
+ # check if there is an invalidation with the same caller reference
+ for inv_id in invalidation_ids:
+ try:
+ invalidation = self.client.get_invalidation(DistributionId=distribution_id, Id=inv_id)['Invalidation']
+ caller_ref = invalidation.get('InvalidationBatch', {}).get('CallerReference')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error getting CloudFront invalidation {0}".format(inv_id))
+ if caller_ref == caller_reference:
+ current_invalidation = invalidation
+ break
+
+ current_invalidation.pop('ResponseMetadata', None)
+ return current_invalidation
+
+
+class CloudFrontInvalidationValidationManager(object):
+ """
+ Manages CloudFront validations for invalidation batches
+ """
+
+ def __init__(self, module):
+ self.module = module
+ self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module)
+
+ def validate_distribution_id(self, distribution_id, alias):
+ try:
+ if distribution_id is None and alias is None:
+ self.module.fail_json(msg="distribution_id or alias must be specified")
+ if distribution_id is None:
+ distribution_id = self.__cloudfront_facts_mgr.get_distribution_id_from_domain_name(alias)
+ return distribution_id
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error validating parameters.")
+
+ def create_aws_list(self, invalidation_batch):
+ aws_list = {}
+ aws_list["Quantity"] = len(invalidation_batch)
+ aws_list["Items"] = invalidation_batch
+ return aws_list
+
+ def validate_invalidation_batch(self, invalidation_batch, caller_reference):
+ try:
+ if caller_reference is not None:
+ valid_caller_reference = caller_reference
+ else:
+ valid_caller_reference = datetime.datetime.now().isoformat()
+ valid_invalidation_batch = {
+ 'paths': self.create_aws_list(invalidation_batch),
+ 'caller_reference': valid_caller_reference
+ }
+ return valid_invalidation_batch
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error validating invalidation batch.")
+
+
+def main():
+ argument_spec = dict(
+ caller_reference=dict(),
+ distribution_id=dict(),
+ alias=dict(),
+ target_paths=dict(required=True, type='list', elements='str')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[['distribution_id', 'alias']])
+
+ validation_mgr = CloudFrontInvalidationValidationManager(module)
+ service_mgr = CloudFrontInvalidationServiceManager(module)
+
+ caller_reference = module.params.get('caller_reference')
+ distribution_id = module.params.get('distribution_id')
+ alias = module.params.get('alias')
+ target_paths = module.params.get('target_paths')
+
+ result = {}
+
+ distribution_id = validation_mgr.validate_distribution_id(distribution_id, alias)
+ valid_target_paths = validation_mgr.validate_invalidation_batch(target_paths, caller_reference)
+ valid_pascal_target_paths = snake_dict_to_camel_dict(valid_target_paths, True)
+ result, changed = service_mgr.create_invalidation(distribution_id, valid_pascal_target_paths)
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(result))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/cloudfront_origin_access_identity.py b/ansible_collections/community/aws/plugins/modules/cloudfront_origin_access_identity.py
new file mode 100644
index 000000000..c6879d0c5
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/cloudfront_origin_access_identity.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+
+version_added: 1.0.0
+module: cloudfront_origin_access_identity
+
+short_description: Create, update and delete origin access identities for a
+ CloudFront distribution
+
+description:
+ - Allows for easy creation, updating and deletion of origin access
+ identities.
+
+author: Willem van Ketwich (@wilvk)
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+
+options:
+ state:
+ description: If the named resource should exist.
+ choices:
+ - present
+ - absent
+ default: present
+ type: str
+ origin_access_identity_id:
+ description:
+ - The origin_access_identity_id of the CloudFront distribution.
+ required: false
+ type: str
+ comment:
+ description:
+ - A comment to describe the CloudFront origin access identity.
+ required: false
+ type: str
+ caller_reference:
+ description:
+ - A unique identifier to reference the origin access identity by.
+ required: false
+ type: str
+
+notes:
+ - Does not support check mode.
+
+'''
+
+EXAMPLES = '''
+
+- name: create an origin access identity
+ community.aws.cloudfront_origin_access_identity:
+ state: present
+ caller_reference: this is an example reference
+ comment: this is an example comment
+
+- name: update an existing origin access identity using caller_reference as an identifier
+ community.aws.cloudfront_origin_access_identity:
+ origin_access_identity_id: E17DRN9XUOAHZX
+ caller_reference: this is an example reference
+ comment: this is a new comment
+
+- name: delete an existing origin access identity using caller_reference as an identifier
+ community.aws.cloudfront_origin_access_identity:
+ state: absent
+ caller_reference: this is an example reference
+ comment: this is a new comment
+
+'''
+
+RETURN = '''
+cloud_front_origin_access_identity:
+ description: The origin access identity's information.
+ returned: always
+ type: complex
+ contains:
+ cloud_front_origin_access_identity_config:
+ description: describes a url specifying the origin access identity.
+ returned: always
+ type: complex
+ contains:
+ caller_reference:
+ description: a caller reference for the oai
+ returned: always
+ type: str
+ comment:
+ description: a comment describing the oai
+ returned: always
+ type: str
+ id:
+ description: a unique identifier of the oai
+ returned: always
+ type: str
+ s3_canonical_user_id:
+ description: the canonical user ID of the user who created the oai
+ returned: always
+ type: str
+e_tag:
+ description: The current version of the origin access identity created.
+ returned: always
+ type: str
+location:
+ description: The fully qualified URI of the new origin access identity just created.
+ returned: when initially created
+ type: str
+
+'''
+
+import datetime
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by imported AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+class CloudFrontOriginAccessIdentityServiceManager(object):
+ """
+ Handles CloudFront origin access identity service calls to aws
+ """
+
+ def __init__(self, module):
+ self.module = module
+ self.client = module.client('cloudfront')
+
+ def create_origin_access_identity(self, caller_reference, comment):
+ try:
+ return self.client.create_cloud_front_origin_access_identity(
+ CloudFrontOriginAccessIdentityConfig={
+ 'CallerReference': caller_reference,
+ 'Comment': comment
+ }
+ )
+ except (ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error creating cloud front origin access identity.")
+
+ def delete_origin_access_identity(self, origin_access_identity_id, e_tag):
+ try:
+ return self.client.delete_cloud_front_origin_access_identity(Id=origin_access_identity_id, IfMatch=e_tag)
+ except (ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error updating Origin Access Identity.")
+
+ def update_origin_access_identity(self, caller_reference, comment, origin_access_identity_id, e_tag):
+ changed = False
+ new_config = {
+ 'CallerReference': caller_reference,
+ 'Comment': comment
+ }
+
+ try:
+ current_config = self.client.get_cloud_front_origin_access_identity_config(
+ Id=origin_access_identity_id)['CloudFrontOriginAccessIdentityConfig']
+ except (ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error getting Origin Access Identity config.")
+
+ if new_config != current_config:
+ changed = True
+
+ try:
+ # If the CallerReference is a value already sent in a previous identity request
+ # the returned value is that of the original request
+ result = self.client.update_cloud_front_origin_access_identity(
+ CloudFrontOriginAccessIdentityConfig=new_config,
+ Id=origin_access_identity_id,
+ IfMatch=e_tag,
+ )
+ except (ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error updating Origin Access Identity.")
+
+ return result, changed
+
+
+class CloudFrontOriginAccessIdentityValidationManager(object):
+ """
+ Manages CloudFront Origin Access Identities
+ """
+
+ def __init__(self, module):
+ self.module = module
+ self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module)
+
+ def validate_etag_from_origin_access_identity_id(self, origin_access_identity_id):
+ try:
+ if origin_access_identity_id is None:
+ return
+ oai = self.__cloudfront_facts_mgr.get_origin_access_identity(origin_access_identity_id)
+ if oai is not None:
+ return oai.get('ETag')
+ except (ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error getting etag from origin_access_identity.")
+
+ def validate_origin_access_identity_id_from_caller_reference(
+ self, caller_reference):
+ try:
+ origin_access_identities = self.__cloudfront_facts_mgr.list_origin_access_identities()
+ origin_origin_access_identity_ids = [oai.get('Id') for oai in origin_access_identities]
+ for origin_access_identity_id in origin_origin_access_identity_ids:
+ oai_config = (self.__cloudfront_facts_mgr.get_origin_access_identity_config(origin_access_identity_id))
+ temp_caller_reference = oai_config.get('CloudFrontOriginAccessIdentityConfig').get('CallerReference')
+ if temp_caller_reference == caller_reference:
+ return origin_access_identity_id
+ except (ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error getting Origin Access Identity from caller_reference.")
+
+ def validate_comment(self, comment):
+ if comment is None:
+ return "origin access identity created by Ansible with datetime " + datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')
+ return comment
+
+
+def main():
+ argument_spec = dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ origin_access_identity_id=dict(),
+ caller_reference=dict(),
+ comment=dict(),
+ )
+
+ result = {}
+ e_tag = None
+ changed = False
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False)
+ service_mgr = CloudFrontOriginAccessIdentityServiceManager(module)
+ validation_mgr = CloudFrontOriginAccessIdentityValidationManager(module)
+
+ state = module.params.get('state')
+ caller_reference = module.params.get('caller_reference')
+
+ comment = module.params.get('comment')
+ origin_access_identity_id = module.params.get('origin_access_identity_id')
+
+ if origin_access_identity_id is None and caller_reference is not None:
+ origin_access_identity_id = validation_mgr.validate_origin_access_identity_id_from_caller_reference(caller_reference)
+
+ e_tag = validation_mgr.validate_etag_from_origin_access_identity_id(origin_access_identity_id)
+ comment = validation_mgr.validate_comment(comment)
+
+ if state == 'present':
+ if origin_access_identity_id is not None and e_tag is not None:
+ result, changed = service_mgr.update_origin_access_identity(caller_reference, comment, origin_access_identity_id, e_tag)
+ else:
+ result = service_mgr.create_origin_access_identity(caller_reference, comment)
+ changed = True
+ elif state == 'absent' and origin_access_identity_id is not None and e_tag is not None:
+ result = service_mgr.delete_origin_access_identity(origin_access_identity_id, e_tag)
+ changed = True
+
+ result.pop('ResponseMetadata', None)
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(result))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/cloudfront_response_headers_policy.py b/ansible_collections/community/aws/plugins/modules/cloudfront_response_headers_policy.py
new file mode 100644
index 000000000..01b38a3bd
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/cloudfront_response_headers_policy.py
@@ -0,0 +1,292 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+version_added: 3.2.0
+module: cloudfront_response_headers_policy
+
+short_description: Create, update and delete response headers policies to be used in a Cloudfront distribution
+
+description:
+ - Create, update and delete response headers policies to be used in a Cloudfront distribution for inserting custom headers
+ - See docs at U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudfront.html#CloudFront.Client.create_response_headers_policy)
+
+author: Stefan Horning (@stefanhorning)
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+
+options:
+ state:
+ description: Decides if the named policy should be absent or present
+ choices:
+ - present
+ - absent
+ default: present
+ type: str
+ name:
+ description: Name of the policy
+ required: true
+ type: str
+ comment:
+ description: Description of the policy
+ required: false
+ type: str
+ cors_config:
+ description: CORS header config block
+ required: false
+ default: {}
+ type: dict
+ security_headers_config:
+ description: Security headers config block. For headers suchs as XSS-Protection, Content-Security-Policy or Strict-Transport-Security
+ required: false
+ default: {}
+ type: dict
+ custom_headers_config:
+ description: Custom headers config block. Define your own list of headers and values as a list
+ required: false
+ default: {}
+ type: dict
+
+'''
+
+EXAMPLES = '''
+- name: Creationg a Cloudfront header policy using all predefined header features and a custom header for demonstration
+ community.aws.cloudfront_response_headers_policy:
+ name: my-header-policy
+ comment: My header policy for all the headers
+ cors_config:
+ access_control_allow_origins:
+ items:
+ - 'https://foo.com/bar'
+ - 'https://bar.com/foo'
+ access_control_allow_headers:
+ items:
+ - 'X-Session-Id'
+ access_control_allow_methods:
+ items:
+ - GET
+ - OPTIONS
+ - HEAD
+ access_control_allow_credentials: true
+ access_control_expose_headers:
+ items:
+ - 'X-Session-Id'
+ access_control_max_age_sec: 1800
+ origin_override: true
+ security_headers_config:
+ xss_protection:
+ protection: true
+ report_uri: 'https://my.report-uri.com/foo/bar'
+ override: true
+ frame_options:
+ frame_option: 'SAMEORIGIN'
+ override: true
+ referrer_policy:
+ referrer_policy: 'same-origin'
+ override: true
+ content_security_policy:
+ content_security_policy: "frame-ancestors 'none'; report-uri https://my.report-uri.com/r/d/csp/enforce;"
+ override: true
+ content_type_options:
+ override: true
+ strict_transport_security:
+ include_subdomains: true
+ preload: true
+ access_control_max_age_sec: 63072000
+ override: true
+ custom_headers_config:
+ items:
+ - { header: 'X-Test-Header', value: 'Foo', override: true }
+ state: present
+
+- name: Delete header policy
+ community.aws.cloudfront_response_headers_policy:
+ name: my-header-policy
+ state: absent
+'''
+
+RETURN = '''
+response_headers_policy:
+ description: The policy's information
+ returned: success
+ type: complex
+ contains:
+ id:
+ description: ID of the policy
+ returned: always
+ type: str
+ sample: '10a45b52-630e-4b7c-77c6-205f06df0462'
+ last_modified_time:
+ description: Timestamp of last modification of policy
+ returned: always
+ type: str
+ sample: '2022-02-04T13:23:27.304000+00:00'
+ response_headers_policy_config:
+ description: The response headers config dict containing all the headers configured
+ returned: always
+ type: complex
+ contains:
+ name:
+ description: Name of the policy
+ type: str
+ returned: always
+ sample: my-header-policy
+'''
+
+try:
+ from botocore.exceptions import ClientError, ParamValidationError, BotoCoreError
+except ImportError:
+ pass # caught by imported AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict, snake_dict_to_camel_dict
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+import datetime
+
+
+class CloudfrontResponseHeadersPolicyService(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.client = module.client('cloudfront')
+ self.check_mode = module.check_mode
+
+ def find_response_headers_policy(self, name):
+ try:
+ policies = self.client.list_response_headers_policies()['ResponseHeadersPolicyList']['Items']
+
+ for policy in policies:
+ if policy['ResponseHeadersPolicy']['ResponseHeadersPolicyConfig']['Name'] == name:
+ policy_id = policy['ResponseHeadersPolicy']['Id']
+ # as the list_ request does not contain the Etag (which we need), we need to do another get_ request here
+ matching_policy = self.client.get_response_headers_policy(Id=policy['ResponseHeadersPolicy']['Id'])
+ break
+ else:
+ matching_policy = None
+
+ return matching_policy
+ except (ParamValidationError, ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error fetching policy information")
+
+ def create_response_header_policy(self, name, comment, cors_config, security_headers_config, custom_headers_config):
+ cors_config = snake_dict_to_camel_dict(cors_config, capitalize_first=True)
+ security_headers_config = snake_dict_to_camel_dict(security_headers_config, capitalize_first=True)
+
+ # Little helper for turning xss_protection into XSSProtection and not into XssProtection
+ if 'XssProtection' in security_headers_config:
+ security_headers_config['XSSProtection'] = security_headers_config.pop('XssProtection')
+
+ custom_headers_config = snake_dict_to_camel_dict(custom_headers_config, capitalize_first=True)
+
+ config = {
+ 'Name': name,
+ 'Comment': comment,
+ 'CorsConfig': self.insert_quantities(cors_config),
+ 'SecurityHeadersConfig': security_headers_config,
+ 'CustomHeadersConfig': self.insert_quantities(custom_headers_config)
+ }
+
+ config = {k: v for k, v in config.items() if v}
+
+ matching_policy = self.find_response_headers_policy(name)
+
+ changed = False
+
+ if self.check_mode:
+ self.module.exit_json(changed=True, response_headers_policy=camel_dict_to_snake_dict(config))
+
+ if matching_policy is None:
+ try:
+ result = self.client.create_response_headers_policy(ResponseHeadersPolicyConfig=config)
+ changed = True
+ except (ParamValidationError, ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error creating policy")
+ else:
+ policy_id = matching_policy['ResponseHeadersPolicy']['Id']
+ etag = matching_policy['ETag']
+ try:
+ result = self.client.update_response_headers_policy(Id=policy_id, IfMatch=etag, ResponseHeadersPolicyConfig=config)
+
+ changed_time = result['ResponseHeadersPolicy']['LastModifiedTime']
+ seconds = 3 # threshhold for returned timestamp age
+ seconds_ago = (datetime.datetime.now(changed_time.tzinfo) - datetime.timedelta(0, seconds))
+
+ # consider change made by this execution of the module if returned timestamp was very recent
+ if changed_time > seconds_ago:
+ changed = True
+ except (ParamValidationError, ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Updating creating policy")
+
+ self.module.exit_json(changed=changed, **camel_dict_to_snake_dict(result))
+
+ def delete_response_header_policy(self, name):
+ matching_policy = self.find_response_headers_policy(name)
+
+ if matching_policy is None:
+ self.module.exit_json(msg="Didn't find a matching policy by that name, not deleting")
+ else:
+ policy_id = matching_policy['ResponseHeadersPolicy']['Id']
+ etag = matching_policy['ETag']
+ if self.check_mode:
+ result = {}
+ else:
+ try:
+ result = self.client.delete_response_headers_policy(Id=policy_id, IfMatch=etag)
+ except (ParamValidationError, ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error deleting policy")
+
+ self.module.exit_json(changed=True, **camel_dict_to_snake_dict(result))
+
+ # Inserts a Quantity field into dicts with a list ('Items')
+ @staticmethod
+ def insert_quantities(dict_with_items):
+ # Items on top level case
+ if 'Items' in dict_with_items and isinstance(dict_with_items['Items'], list):
+ dict_with_items['Quantity'] = len(dict_with_items['Items'])
+
+ # Items on second level case
+ for k, v in dict_with_items.items():
+ if isinstance(v, dict) and 'Items' in v:
+ v['Quantity'] = len(v['Items'])
+
+ return dict_with_items
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True, type='str'),
+ comment=dict(type='str'),
+ cors_config=dict(type='dict', default=dict()),
+ security_headers_config=dict(type='dict', default=dict()),
+ custom_headers_config=dict(type='dict', default=dict()),
+ state=dict(choices=['present', 'absent'], type='str', default='present'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ name = module.params.get('name')
+ comment = module.params.get('comment', '')
+ cors_config = module.params.get('cors_config')
+ security_headers_config = module.params.get('security_headers_config')
+ custom_headers_config = module.params.get('custom_headers_config')
+ state = module.params.get('state')
+
+ service = CloudfrontResponseHeadersPolicyService(module)
+
+ if state == 'absent':
+ service.delete_response_header_policy(name)
+ else:
+ service.create_response_header_policy(name, comment, cors_config, security_headers_config, custom_headers_config)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/codebuild_project.py b/ansible_collections/community/aws/plugins/modules/codebuild_project.py
new file mode 100644
index 000000000..873b74010
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/codebuild_project.py
@@ -0,0 +1,488 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: codebuild_project
+version_added: 1.0.0
+short_description: Create or delete an AWS CodeBuild project
+notes:
+ - For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html).
+description:
+ - Create or delete a CodeBuild projects on AWS, used for building code artifacts from source code.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_codebuild).
+ The usage did not change.
+author:
+ - Stefan Horning (@stefanhorning) <horning@mediapeers.com>
+options:
+ name:
+ description:
+ - Name of the CodeBuild project.
+ required: true
+ type: str
+ description:
+ description:
+ - Descriptive text of the CodeBuild project.
+ type: str
+ source:
+ description:
+ - Configure service and location for the build input source.
+ - I(source) is required when creating a new project.
+ suboptions:
+ type:
+ description:
+ - "The type of the source. Allows one of these: C(CODECOMMIT), C(CODEPIPELINE), C(GITHUB), C(S3), C(BITBUCKET), C(GITHUB_ENTERPRISE)."
+ required: true
+ type: str
+ location:
+ description:
+ - Information about the location of the source code to be built. For I(type) C(CODEPIPELINE) location should not be specified.
+ type: str
+ git_clone_depth:
+ description:
+ - When using git you can specify the clone depth as an integer here.
+ type: int
+ buildspec:
+ description:
+ - The build spec declaration to use for the builds in this build project. Leave empty if part of the CodeBuild project.
+ type: str
+ insecure_ssl:
+ description:
+ - Enable this flag to ignore SSL warnings while connecting to the project source code.
+ type: bool
+ type: dict
+ artifacts:
+ description:
+ - Information about the build output artifacts for the build project.
+ - I(artifacts) is required when creating a new project.
+ suboptions:
+ type:
+ description:
+ - "The type of build output for artifacts. Can be one of the following: C(CODEPIPELINE), C(NO_ARTIFACTS), C(S3)."
+ required: true
+ location:
+ description:
+ - Information about the build output artifact location. When choosing I(type) C(S3), set the bucket name here.
+ path:
+ description:
+ - Along with namespace_type and name, the pattern that AWS CodeBuild will use to name and store the output artifacts.
+ - Used for path in S3 bucket when type is C(S3).
+ namespace_type:
+ description:
+ - Along with path and name, the pattern that AWS CodeBuild will use to determine the name and location to store the output artifacts.
+ - Accepts C(BUILD_ID) and C(NONE).
+ - "See docs here: U(http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html#CodeBuild.Client.create_project)."
+ name:
+ description:
+ - Along with path and namespace_type, the pattern that AWS CodeBuild will use to name and store the output artifact.
+ packaging:
+ description:
+ - The type of build output artifact to create on S3, can be NONE for creating a folder or ZIP for a ZIP file.
+ type: dict
+ cache:
+ description:
+ - Caching params to speed up following builds.
+ suboptions:
+ type:
+ description:
+ - Cache type. Can be C(NO_CACHE) or C(S3).
+ required: true
+ location:
+ description:
+ - Caching location on S3.
+ required: true
+ type: dict
+ environment:
+ description:
+ - Information about the build environment for the build project.
+ suboptions:
+ type:
+ description:
+ - The type of build environment to use for the project. Usually C(LINUX_CONTAINER).
+ required: true
+ image:
+ description:
+ - The ID of the Docker image to use for this build project.
+ required: true
+ compute_type:
+ description:
+ - Information about the compute resources the build project will use.
+ - "Available values include: C(BUILD_GENERAL1_SMALL), C(BUILD_GENERAL1_MEDIUM), C(BUILD_GENERAL1_LARGE)."
+ required: true
+ environment_variables:
+ description:
+ - A set of environment variables to make available to builds for the build project. List of dictionaries with name and value fields.
+ - "Example: { name: 'MY_ENV_VARIABLE', value: 'test' }"
+ privileged_mode:
+ description:
+ - Enables running the Docker daemon inside a Docker container.
+ - Set to C(true) only if the build project is be used to build Docker images.
+ type: dict
+ service_role:
+ description:
+ - The ARN of the AWS IAM role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.
+ type: str
+ timeout_in_minutes:
+ description:
+ - How long CodeBuild should wait until timing out any build that has not been marked as completed.
+ default: 60
+ type: int
+ encryption_key:
+ description:
+ - The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.
+ type: str
+ tags:
+ description:
+ - A set of tags for the build project.
+ - Mutually exclusive with the I(resource_tags) parameter.
+ - In release 6.0.0 this parameter will accept a simple dictionary
+ instead of the list of dictionaries format. To use the simple
+ dictionary format prior to release 6.0.0 the I(resource_tags) can
+ be used instead of I(tags).
+ type: list
+ elements: dict
+ suboptions:
+ key:
+ description: The name of the Tag.
+ type: str
+ value:
+ description: The value of the Tag.
+ type: str
+ vpc_config:
+ description:
+ - The VPC config enables AWS CodeBuild to access resources in an Amazon VPC.
+ type: dict
+ state:
+ description:
+ - Create or remove CodeBuild project.
+ default: 'present'
+ choices: ['present', 'absent']
+ type: str
+ resource_tags:
+ description:
+ - A dictionary representing the tags to be applied to the build project.
+ - If the I(resource_tags) parameter is not set then tags will not be modified.
+ - Mutually exclusive with the I(tags) parameter.
+ type: dict
+ required: false
+ purge_tags:
+ description:
+ - If I(purge_tags=true) and I(tags) is set, existing tags will be purged
+ from the resource to match exactly what is defined by I(tags) parameter.
+ - If the I(resource_tags) parameter is not set then tags will not be modified, even
+ if I(purge_tags=True).
+ - Tag keys beginning with C(aws:) are reserved by Amazon and can not be
+ modified. As such they will be ignored for the purposes of the
+ I(purge_tags) parameter. See the Amazon documentation for more information
+ U(https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html#tag-conventions).
+ type: bool
+ default: true
+ required: false
+
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- community.aws.codebuild_project:
+ name: my_project
+ description: My nice little project
+ service_role: "arn:aws:iam::123123:role/service-role/code-build-service-role"
+ source:
+ # Possible values: BITBUCKET, CODECOMMIT, CODEPIPELINE, GITHUB, S3
+ type: CODEPIPELINE
+ buildspec: ''
+ artifacts:
+ namespaceType: NONE
+ packaging: NONE
+ type: CODEPIPELINE
+ name: my_project
+ environment:
+ computeType: BUILD_GENERAL1_SMALL
+ privilegedMode: "true"
+ image: "aws/codebuild/docker:17.09.0"
+ type: LINUX_CONTAINER
+ environmentVariables:
+ - { name: 'PROFILE', value: 'staging' }
+ encryption_key: "arn:aws:kms:us-east-1:123123:alias/aws/s3"
+ region: us-east-1
+ state: present
+'''
+
+RETURN = r'''
+project:
+ description: Returns the dictionary describing the code project configuration.
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: Name of the CodeBuild project.
+ returned: always
+ type: str
+ sample: my_project
+ arn:
+ description: ARN of the CodeBuild project.
+ returned: always
+ type: str
+ sample: arn:aws:codebuild:us-east-1:123123123:project/vod-api-app-builder
+ description:
+ description: A description of the CodeBuild project.
+ returned: always
+ type: str
+ sample: My nice little project
+ source:
+ description: Information about the build input source code.
+ returned: always
+ type: complex
+ contains:
+ type:
+ description: The type of the repository.
+ returned: always
+ type: str
+ sample: CODEPIPELINE
+ location:
+ description: Location identifier, depending on the source type.
+ returned: when configured
+ type: str
+ git_clone_depth:
+ description: The git clone depth.
+ returned: when configured
+ type: int
+ build_spec:
+ description: The build spec declaration to use for the builds in this build project.
+ returned: always
+ type: str
+ auth:
+ description: Information about the authorization settings for AWS CodeBuild to access the source code to be built.
+ returned: when configured
+ type: complex
+ insecure_ssl:
+ description: True if set to ignore SSL warnings.
+ returned: when configured
+ type: bool
+ artifacts:
+ description: Information about the output of build artifacts
+ returned: always
+ type: complex
+ contains:
+ type:
+ description: The type of build artifact.
+ returned: always
+ type: str
+ sample: CODEPIPELINE
+ location:
+ description: Output location for build artifacts.
+ returned: when configured
+ type: str
+ # and more... see http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html#CodeBuild.Client.create_project
+ cache:
+ description: Cache settings for the build project.
+ returned: when configured
+ type: dict
+ environment:
+ description: Environment settings for the build.
+ returned: always
+ type: dict
+ service_role:
+ description: IAM role to be used during build to access other AWS services.
+ returned: always
+ type: str
+ sample: arn:aws:iam::123123123:role/codebuild-service-role
+ timeout_in_minutes:
+ description: The timeout of a build in minutes.
+ returned: always
+ type: int
+ sample: 60
+ tags:
+ description:
+ - Tags added to the project in the boto3 list of dictionaries format.
+ - I(tags) and I(reource_tags) represent the same information in
+ different formats.
+ returned: when configured
+ type: list
+ reource_tags:
+ description:
+ - A simple dictionary representing the tags added to the project.
+ - I(tags) and I(reource_tags) represent the same information in
+ different formats.
+ returned: when configured
+ type: dict
+ version_added: 4.0.0
+ created:
+ description: Timestamp of the create time of the project.
+ returned: always
+ type: str
+ sample: "2018-04-17T16:56:03.245000+02:00"
+'''
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+
+def create_or_update_project(client, params, module):
+ resp = {}
+ name = params['name']
+ # clean up params
+ formatted_params = snake_dict_to_camel_dict(dict((k, v) for k, v in params.items() if v is not None))
+ permitted_create_params = get_boto3_client_method_parameters(client, 'create_project')
+ permitted_update_params = get_boto3_client_method_parameters(client, 'update_project')
+
+ formatted_create_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_create_params)
+ formatted_update_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_update_params)
+
+ # Check if project with that name already exists and if so update existing:
+ found = describe_project(client=client, name=name, module=module)
+ changed = False
+
+ if 'name' in found:
+ found_project = found
+ found_tags = found_project.pop('tags', [])
+ # Support tagging using a dict instead of the list of dicts
+ if params['resource_tags'] is not None:
+ if params['purge_tags']:
+ tags = dict()
+ else:
+ tags = boto3_tag_list_to_ansible_dict(found_tags)
+ tags.update(params['resource_tags'])
+ formatted_update_params['tags'] = ansible_dict_to_boto3_tag_list(tags, tag_name_key_name='key', tag_value_key_name='value')
+
+ resp = update_project(client=client, params=formatted_update_params, module=module)
+ updated_project = resp['project']
+
+ # Prep both dicts for sensible change comparison:
+ found_project.pop('lastModified')
+ updated_project.pop('lastModified')
+ updated_tags = updated_project.pop('tags', [])
+ found_project['ResourceTags'] = boto3_tag_list_to_ansible_dict(found_tags)
+ updated_project['ResourceTags'] = boto3_tag_list_to_ansible_dict(updated_tags)
+
+ if updated_project != found_project:
+ changed = True
+ updated_project['tags'] = updated_tags
+ return resp, changed
+ # Or create new project:
+ try:
+ if params['source'] is None or params['artifacts'] is None:
+ module.fail_json(
+ "The source and artifacts parameters must be provided when "
+ "creating a new project. No existing project was found.")
+ resp = client.create_project(**formatted_create_params)
+ changed = True
+ return resp, changed
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to create CodeBuild project")
+
+
+def update_project(client, params, module):
+ name = params['name']
+
+ try:
+ resp = client.update_project(**params)
+ return resp
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to update CodeBuild project")
+
+
+def delete_project(client, name, module):
+ found = describe_project(client=client, name=name, module=module)
+ changed = False
+ if 'name' in found:
+ # Mark as changed when a project with that name existed before calling delete
+ changed = True
+ try:
+ resp = client.delete_project(name=name)
+ return resp, changed
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to delete CodeBuild project")
+
+
+def describe_project(client, name, module):
+ project = {}
+ try:
+ projects = client.batch_get_projects(names=[name])['projects']
+ if len(projects) > 0:
+ project = projects[0]
+ return project
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to describe CodeBuild projects")
+
+
+def format_project_result(project_result):
+ formated_result = camel_dict_to_snake_dict(project_result)
+ project = project_result.get('project', {})
+ if project:
+ tags = project.get('tags', [])
+ formated_result['project']['resource_tags'] = boto3_tag_list_to_ansible_dict(tags)
+ formated_result['ORIGINAL'] = project_result
+ return formated_result
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(),
+ source=dict(type='dict'),
+ artifacts=dict(type='dict'),
+ cache=dict(type='dict'),
+ environment=dict(type='dict'),
+ service_role=dict(),
+ timeout_in_minutes=dict(type='int', default=60),
+ encryption_key=dict(no_log=False),
+ tags=dict(type='list', elements='dict'),
+ resource_tags=dict(type='dict'),
+ purge_tags=dict(type='bool', default=True),
+ vpc_config=dict(type='dict'),
+ state=dict(choices=['present', 'absent'], default='present')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+ client_conn = module.client('codebuild')
+
+ state = module.params.get('state')
+ changed = False
+
+ if module.params['tags']:
+ module.deprecate(
+ 'The tags parameter currently uses a non-standard format and has '
+ 'been deprecated. In release 6.0.0 this paramater will accept '
+ 'a simple key/value pair dictionary instead of the current list '
+ 'of dictionaries. It is recommended to migrate to using the '
+ 'resource_tags parameter which already accepts the simple dictionary '
+ 'format.', version='6.0.0', collection_name='community.aws')
+
+ if state == 'present':
+ project_result, changed = create_or_update_project(
+ client=client_conn,
+ params=module.params,
+ module=module)
+ elif state == 'absent':
+ project_result, changed = delete_project(client=client_conn, name=module.params['name'], module=module)
+
+ formatted_result = format_project_result(project_result)
+ module.exit_json(changed=changed, **formatted_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/codecommit_repository.py b/ansible_collections/community/aws/plugins/modules/codecommit_repository.py
new file mode 100644
index 000000000..fce4d15d6
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/codecommit_repository.py
@@ -0,0 +1,244 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Shuang Wang <ooocamel@icloud.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: codecommit_repository
+version_added: 1.0.0
+short_description: Manage repositories in AWS CodeCommit
+description:
+ - Supports creation and deletion of CodeCommit repositories.
+ - See U(https://aws.amazon.com/codecommit/) for more information about CodeCommit.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_codecommit).
+ The usage did not change.
+author: Shuang Wang (@ptux)
+options:
+ name:
+ description:
+ - Name of repository.
+ required: true
+ type: str
+ description:
+ description:
+ - Description or comment of repository.
+ required: false
+ aliases:
+ - comment
+ type: str
+ default: ''
+ state:
+ description:
+ - Specifies the state of repository.
+ required: true
+ choices: [ 'present', 'absent' ]
+ type: str
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+RETURN = '''
+repository_metadata:
+ description: "Information about the repository."
+ returned: always
+ type: complex
+ contains:
+ account_id:
+ description: "The ID of the AWS account associated with the repository."
+ returned: when state is present
+ type: str
+ sample: "123456789012"
+ arn:
+ description: "The Amazon Resource Name (ARN) of the repository."
+ returned: when state is present
+ type: str
+ sample: "arn:aws:codecommit:ap-northeast-1:123456789012:username"
+ clone_url_http:
+ description: "The URL to use for cloning the repository over HTTPS."
+ returned: when state is present
+ type: str
+ sample: "https://git-codecommit.ap-northeast-1.amazonaws.com/v1/repos/reponame"
+ clone_url_ssh:
+ description: "The URL to use for cloning the repository over SSH."
+ returned: when state is present
+ type: str
+ sample: "ssh://git-codecommit.ap-northeast-1.amazonaws.com/v1/repos/reponame"
+ creation_date:
+ description: "The date and time the repository was created, in timestamp format."
+ returned: when state is present
+ type: str
+ sample: "2018-10-16T13:21:41.261000+09:00"
+ last_modified_date:
+ description: "The date and time the repository was last modified, in timestamp format."
+ returned: when state is present
+ type: str
+ sample: "2018-10-16T13:21:41.261000+09:00"
+ repository_description:
+ description: "A comment or description about the repository."
+ returned: when state is present
+ type: str
+ sample: "test from ptux"
+ repository_id:
+ description: "The ID of the repository that was created or deleted"
+ returned: always
+ type: str
+ sample: "e62a5c54-i879-497b-b62f-9f99e4ebfk8e"
+ repository_name:
+ description: "The repository's name."
+ returned: when state is present
+ type: str
+ sample: "reponame"
+
+response_metadata:
+ description: "Information about the response."
+ returned: always
+ type: complex
+ contains:
+ http_headers:
+ description: "http headers of http response"
+ returned: always
+ type: dict
+ http_status_code:
+ description: "http status code of http response"
+ returned: always
+ type: str
+ sample: "200"
+ request_id:
+ description: "http request id"
+ returned: always
+ type: str
+ sample: "fb49cfca-d0fa-11e8-85cb-b3cc4b5045ef"
+ retry_attempts:
+ description: "numbers of retry attempts"
+ returned: always
+ type: str
+ sample: "0"
+'''
+
+EXAMPLES = '''
+# Create a new repository
+- community.aws.codecommit_repository:
+ name: repo
+ state: present
+
+# Delete a repository
+- community.aws.codecommit_repository:
+ name: repo
+ state: absent
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+class CodeCommit(object):
+ def __init__(self, module=None):
+ self._module = module
+ self._client = self._module.client('codecommit')
+ self._check_mode = self._module.check_mode
+
+ def process(self):
+ result = dict(changed=False)
+
+ if self._module.params['state'] == 'present':
+ if not self._repository_exists():
+ if not self._check_mode:
+ result = self._create_repository()
+ result['changed'] = True
+ else:
+ metadata = self._get_repository()['repositoryMetadata']
+ if not metadata.get('repositoryDescription'):
+ metadata['repositoryDescription'] = ''
+ if metadata['repositoryDescription'] != self._module.params['description']:
+ if not self._check_mode:
+ self._update_repository()
+ result['changed'] = True
+ result.update(self._get_repository())
+ if self._module.params['state'] == 'absent' and self._repository_exists():
+ if not self._check_mode:
+ result = self._delete_repository()
+ result['changed'] = True
+ return result
+
+ def _repository_exists(self):
+ try:
+ paginator = self._client.get_paginator('list_repositories')
+ for page in paginator.paginate():
+ repositories = page['repositories']
+ for item in repositories:
+ if self._module.params['name'] in item.values():
+ return True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="couldn't get repository")
+ return False
+
+ def _get_repository(self):
+ try:
+ result = self._client.get_repository(
+ repositoryName=self._module.params['name']
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="couldn't get repository")
+ return result
+
+ def _update_repository(self):
+ try:
+ result = self._client.update_repository_description(
+ repositoryName=self._module.params['name'],
+ repositoryDescription=self._module.params['description']
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="couldn't create repository")
+ return result
+
+ def _create_repository(self):
+ try:
+ result = self._client.create_repository(
+ repositoryName=self._module.params['name'],
+ repositoryDescription=self._module.params['description']
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="couldn't create repository")
+ return result
+
+ def _delete_repository(self):
+ try:
+ result = self._client.delete_repository(
+ repositoryName=self._module.params['name']
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="couldn't delete repository")
+ return result
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ state=dict(choices=['present', 'absent'], required=True),
+ description=dict(default='', aliases=['comment'])
+ )
+
+ ansible_aws_module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ aws_codecommit = CodeCommit(module=ansible_aws_module)
+ result = aws_codecommit.process()
+ ansible_aws_module.exit_json(**camel_dict_to_snake_dict(result))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/codepipeline.py b/ansible_collections/community/aws/plugins/modules/codepipeline.py
new file mode 100644
index 000000000..5c5935cb9
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/codepipeline.py
@@ -0,0 +1,308 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: codepipeline
+version_added: 1.0.0
+short_description: Create or delete AWS CodePipelines
+notes:
+ - For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/codepipeline.html).
+description:
+ - Create or delete a CodePipeline on AWS.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_codepipeline).
+ The usage did not change.
+author:
+ - Stefan Horning (@stefanhorning) <horning@mediapeers.com>
+options:
+ name:
+ description:
+ - Name of the CodePipeline.
+ required: true
+ type: str
+ role_arn:
+ description:
+ - ARN of the IAM role to use when executing the CodePipeline.
+ required: true
+ type: str
+ artifact_store:
+ description:
+ - Location information where artifacts are stored (on S3). Dictionary with fields type and location.
+ required: true
+ suboptions:
+ type:
+ description:
+ - Type of the artifacts storage (only 'S3' is currently supported).
+ type: str
+ location:
+ description:
+ - Bucket name for artifacts.
+ type: str
+ type: dict
+ stages:
+ description:
+ - List of stages to perform in the CodePipeline. List of dictionaries containing name and actions for each stage.
+ required: true
+ suboptions:
+ name:
+ description:
+ - Name of the stage (step) in the CodePipeline.
+ type: str
+ actions:
+ description:
+ - List of action configurations for that stage.
+ - 'See the boto3 documentation for full documentation of suboptions:'
+ - 'U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/codepipeline.html#CodePipeline.Client.create_pipeline)'
+ type: list
+ elements: dict
+ elements: dict
+ type: list
+ version:
+ description:
+ - Version number of the CodePipeline. This number is automatically incremented when a CodePipeline is updated.
+ required: false
+ type: int
+ state:
+ description:
+ - Create or remove CodePipeline.
+ default: 'present'
+ choices: ['present', 'absent']
+ type: str
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Example for creating a pipeline for continuous deploy of Github code to an ECS cluster (container)
+- community.aws.aws_codepipeline:
+ name: my_deploy_pipeline
+ role_arn: arn:aws:iam::123456:role/AWS-CodePipeline-Service
+ artifact_store:
+ type: S3
+ location: my_s3_codepipline_bucket
+ stages:
+ - name: Get_source
+ actions:
+ -
+ name: Git_pull
+ actionTypeId:
+ category: Source
+ owner: ThirdParty
+ provider: GitHub
+ version: '1'
+ outputArtifacts:
+ - { name: my-app-source }
+ configuration:
+ Owner: mediapeers
+ Repo: my_gh_repo
+ PollForSourceChanges: 'true'
+ Branch: master
+ # Generate token like this:
+ # https://docs.aws.amazon.com/codepipeline/latest/userguide/GitHub-rotate-personal-token-CLI.html
+ # GH Link: https://github.com/settings/tokens
+ OAuthToken: 'abc123def456'
+ runOrder: 1
+ - name: Build
+ actions:
+ -
+ name: CodeBuild
+ actionTypeId:
+ category: Build
+ owner: AWS
+ provider: CodeBuild
+ version: '1'
+ inputArtifacts:
+ - { name: my-app-source }
+ outputArtifacts:
+ - { name: my-app-build }
+ configuration:
+ # A project with that name needs to be setup on AWS CodeBuild already (use code_build module).
+ ProjectName: codebuild-project-name
+ runOrder: 1
+ - name: ECS_deploy
+ actions:
+ -
+ name: ECS_deploy
+ actionTypeId:
+ category: Deploy
+ owner: AWS
+ provider: ECS
+ version: '1'
+ inputArtifacts:
+ - { name: vod-api-app-build }
+ configuration:
+ # an ECS cluster with that name needs to be setup on AWS ECS already (use ecs_cluster and ecs_service module)
+ ClusterName: ecs-cluster-name
+ ServiceName: ecs-cluster-service-name
+ FileName: imagedefinitions.json
+ region: us-east-1
+ state: present
+'''
+
+RETURN = r'''
+pipeline:
+ description: Returns the dictionary describing the CodePipeline configuration.
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: Name of the CodePipeline
+ returned: always
+ type: str
+ sample: my_deploy_pipeline
+ role_arn:
+ description: ARN of the IAM role attached to the CodePipeline
+ returned: always
+ type: str
+ sample: arn:aws:iam::123123123:role/codepipeline-service-role
+ artifact_store:
+ description: Information about where the build artifacts are stored
+ returned: always
+ type: complex
+ contains:
+ type:
+ description: The type of the artifacts store, such as S3
+ returned: always
+ type: str
+ sample: S3
+ location:
+ description: The location of the artifacts storage (s3 bucket name)
+ returned: always
+ type: str
+ sample: my_s3_codepipline_bucket
+ encryption_key:
+ description: The encryption key used to encrypt the artifacts store, such as an AWS KMS key.
+ returned: when configured
+ type: str
+ stages:
+ description: List of stages configured for this CodePipeline
+ returned: always
+ type: list
+ version:
+ description:
+ - The version number of the CodePipeline.
+ - This number is auto incremented when CodePipeline params are changed.
+ returned: always
+ type: int
+'''
+
+import copy
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies
+
+
+def create_pipeline(client, name, role_arn, artifact_store, stages, version, module):
+ pipeline_dict = {'name': name, 'roleArn': role_arn, 'artifactStore': artifact_store, 'stages': stages}
+ if version:
+ pipeline_dict['version'] = version
+ try:
+ resp = client.create_pipeline(pipeline=pipeline_dict)
+ return resp
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable create pipeline {0}".format(pipeline_dict['name']))
+
+
+def update_pipeline(client, pipeline_dict, module):
+ try:
+ resp = client.update_pipeline(pipeline=pipeline_dict)
+ return resp
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable update pipeline {0}".format(pipeline_dict['name']))
+
+
+def delete_pipeline(client, name, module):
+ try:
+ resp = client.delete_pipeline(name=name)
+ return resp
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable delete pipeline {0}".format(name))
+
+
+def describe_pipeline(client, name, version, module):
+ pipeline = {}
+ try:
+ if version is not None:
+ pipeline = client.get_pipeline(name=name, version=version)
+ return pipeline
+ else:
+ pipeline = client.get_pipeline(name=name)
+ return pipeline
+ except is_boto3_error_code('PipelineNotFoundException'):
+ return pipeline
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True, type='str'),
+ role_arn=dict(required=True, type='str'),
+ artifact_store=dict(required=True, type='dict'),
+ stages=dict(required=True, type='list', elements='dict'),
+ version=dict(type='int'),
+ state=dict(choices=['present', 'absent'], default='present')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+ client_conn = module.client('codepipeline')
+
+ state = module.params.get('state')
+ changed = False
+
+ # Determine if the CodePipeline exists
+ found_code_pipeline = describe_pipeline(client=client_conn, name=module.params['name'], version=module.params['version'], module=module)
+ pipeline_result = {}
+
+ if state == 'present':
+ if 'pipeline' in found_code_pipeline:
+ pipeline_dict = copy.deepcopy(found_code_pipeline['pipeline'])
+ # Update dictionary with provided module params:
+ pipeline_dict['roleArn'] = module.params['role_arn']
+ pipeline_dict['artifactStore'] = module.params['artifact_store']
+ pipeline_dict['stages'] = module.params['stages']
+ if module.params['version'] is not None:
+ pipeline_dict['version'] = module.params['version']
+
+ pipeline_result = update_pipeline(client=client_conn, pipeline_dict=pipeline_dict, module=module)
+
+ if compare_policies(found_code_pipeline['pipeline'], pipeline_result['pipeline']):
+ changed = True
+ else:
+ pipeline_result = create_pipeline(
+ client=client_conn,
+ name=module.params['name'],
+ role_arn=module.params['role_arn'],
+ artifact_store=module.params['artifact_store'],
+ stages=module.params['stages'],
+ version=module.params['version'],
+ module=module)
+ changed = True
+ elif state == 'absent':
+ if found_code_pipeline:
+ pipeline_result = delete_pipeline(client=client_conn, name=module.params['name'], module=module)
+ changed = True
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(pipeline_result))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/config_aggregation_authorization.py b/ansible_collections/community/aws/plugins/modules/config_aggregation_authorization.py
new file mode 100644
index 000000000..7b92abb7f
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/config_aggregation_authorization.py
@@ -0,0 +1,159 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: config_aggregation_authorization
+version_added: 1.0.0
+short_description: Manage cross-account AWS Config authorizations
+description:
+ - Module manages AWS Config aggregation authorizations.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_config_aggregation_authorization).
+ The usage did not change.
+author:
+ - "Aaron Smith (@slapula)"
+options:
+ state:
+ description:
+ - Whether the Config rule should be present or absent.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ authorized_account_id:
+ description:
+ - The 12-digit account ID of the account authorized to aggregate data.
+ type: str
+ required: true
+ authorized_aws_region:
+ description:
+ - The region authorized to collect aggregated data.
+ type: str
+ required: true
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+- name: Get current account ID
+ community.aws.aws_caller_info:
+ register: whoami
+- community.aws.config_aggregation_authorization:
+ state: present
+ authorized_account_id: '{{ whoami.account }}'
+ authorized_aws_region: us-east-1
+'''
+
+RETURN = '''#'''
+
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+def resource_exists(client, module, params):
+ try:
+ current_authorizations = client.describe_aggregation_authorizations()['AggregationAuthorizations']
+ authorization_exists = next(
+ (item for item in current_authorizations if item["AuthorizedAccountId"] == params['AuthorizedAccountId']),
+ None
+ )
+ if authorization_exists:
+ return True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError):
+ return False
+
+
+def create_resource(client, module, params, result):
+ try:
+ response = client.put_aggregation_authorization(
+ AuthorizedAccountId=params['AuthorizedAccountId'],
+ AuthorizedAwsRegion=params['AuthorizedAwsRegion']
+ )
+ result['changed'] = True
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create AWS Aggregation authorization")
+
+
+def update_resource(client, module, params, result):
+ current_authorizations = client.describe_aggregation_authorizations()['AggregationAuthorizations']
+ current_params = next(
+ (item for item in current_authorizations if item["AuthorizedAccountId"] == params['AuthorizedAccountId']),
+ None
+ )
+
+ del current_params['AggregationAuthorizationArn']
+ del current_params['CreationTime']
+
+ if params != current_params:
+ try:
+ response = client.put_aggregation_authorization(
+ AuthorizedAccountId=params['AuthorizedAccountId'],
+ AuthorizedAwsRegion=params['AuthorizedAwsRegion']
+ )
+ result['changed'] = True
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create AWS Aggregation authorization")
+
+
+def delete_resource(client, module, params, result):
+ try:
+ response = client.delete_aggregation_authorization(
+ AuthorizedAccountId=params['AuthorizedAccountId'],
+ AuthorizedAwsRegion=params['AuthorizedAwsRegion']
+ )
+ result['changed'] = True
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete AWS Aggregation authorization")
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec={
+ 'state': dict(type='str', choices=['present', 'absent'], default='present'),
+ 'authorized_account_id': dict(type='str', required=True),
+ 'authorized_aws_region': dict(type='str', required=True),
+ },
+ supports_check_mode=False,
+ )
+
+ result = {'changed': False}
+
+ params = {
+ 'AuthorizedAccountId': module.params.get('authorized_account_id'),
+ 'AuthorizedAwsRegion': module.params.get('authorized_aws_region'),
+ }
+
+ client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
+ resource_status = resource_exists(client, module, params)
+
+ if module.params.get('state') == 'present':
+ if not resource_status:
+ create_resource(client, module, params, result)
+ else:
+ update_resource(client, module, params, result)
+
+ if module.params.get('state') == 'absent':
+ if resource_status:
+ delete_resource(client, module, params, result)
+
+ module.exit_json(changed=result['changed'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/config_aggregator.py b/ansible_collections/community/aws/plugins/modules/config_aggregator.py
new file mode 100644
index 000000000..3dc4c6faa
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/config_aggregator.py
@@ -0,0 +1,232 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: config_aggregator
+version_added: 1.0.0
+short_description: Manage AWS Config aggregations across multiple accounts
+description:
+ - Module manages AWS Config aggregator resources.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_config_aggregator).
+ The usage did not change.
+author:
+ - "Aaron Smith (@slapula)"
+options:
+ name:
+ description:
+ - The name of the AWS Config resource.
+ required: true
+ type: str
+ state:
+ description:
+ - Whether the Config rule should be present or absent.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ account_sources:
+ description:
+ - Provides a list of source accounts and regions to be aggregated.
+ suboptions:
+ account_ids:
+ description:
+ - A list of 12-digit account IDs of accounts being aggregated.
+ type: list
+ elements: str
+ aws_regions:
+ description:
+ - A list of source regions being aggregated.
+ type: list
+ elements: str
+ all_aws_regions:
+ description:
+ - If true, aggregate existing AWS Config regions and future regions.
+ type: bool
+ type: list
+ elements: dict
+ required: true
+ organization_source:
+ description:
+ - The region authorized to collect aggregated data.
+ suboptions:
+ role_arn:
+ description:
+ - ARN of the IAM role used to retrieve AWS Organization details associated with the aggregator account.
+ type: str
+ aws_regions:
+ description:
+ - The source regions being aggregated.
+ type: list
+ elements: str
+ all_aws_regions:
+ description:
+ - If true, aggregate existing AWS Config regions and future regions.
+ type: bool
+ type: dict
+ required: true
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+- name: Create cross-account aggregator
+ community.aws.config_aggregator:
+ name: test_config_rule
+ state: present
+ account_sources:
+ account_ids:
+ - 1234567890
+ - 0123456789
+ - 9012345678
+ all_aws_regions: true
+'''
+
+RETURN = r'''#'''
+
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict
+
+
+def resource_exists(client, module, params):
+ try:
+ aggregator = client.describe_configuration_aggregators(
+ ConfigurationAggregatorNames=[params['ConfigurationAggregatorName']]
+ )
+ return aggregator['ConfigurationAggregators'][0]
+ except is_boto3_error_code('NoSuchConfigurationAggregatorException'):
+ return
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+
+
+def create_resource(client, module, params, result):
+ try:
+ client.put_configuration_aggregator(
+ ConfigurationAggregatorName=params['ConfigurationAggregatorName'],
+ AccountAggregationSources=params['AccountAggregationSources'],
+ OrganizationAggregationSource=params['OrganizationAggregationSource']
+ )
+ result['changed'] = True
+ result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator")
+
+
+def update_resource(client, module, params, result):
+ result['changed'] = False
+
+ current_params = client.describe_configuration_aggregators(
+ ConfigurationAggregatorNames=[params['ConfigurationAggregatorName']]
+ )['ConfigurationAggregators'][0]
+
+ if params['AccountAggregationSources'] != current_params.get('AccountAggregationSources', []):
+ result['changed'] = True
+
+ if params['OrganizationAggregationSource'] != current_params.get('OrganizationAggregationSource', {}):
+ result['changed'] = True
+
+ if result['changed']:
+ try:
+ client.put_configuration_aggregator(
+ ConfigurationAggregatorName=params['ConfigurationAggregatorName'],
+ AccountAggregationSources=params['AccountAggregationSources'],
+ OrganizationAggregationSource=params['OrganizationAggregationSource']
+ )
+ result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator")
+
+
+def delete_resource(client, module, params, result):
+ try:
+ client.delete_configuration_aggregator(
+ ConfigurationAggregatorName=params['ConfigurationAggregatorName']
+ )
+ result['changed'] = True
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration aggregator")
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec={
+ 'name': dict(type='str', required=True),
+ 'state': dict(type='str', choices=['present', 'absent'], default='present'),
+ 'account_sources': dict(type='list', required=True, elements='dict'),
+ 'organization_source': dict(type='dict', required=True)
+ },
+ supports_check_mode=False,
+ )
+
+ result = {
+ 'changed': False
+ }
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+
+ params = {}
+ if name:
+ params['ConfigurationAggregatorName'] = name
+ params['AccountAggregationSources'] = []
+ if module.params.get('account_sources'):
+ for i in module.params.get('account_sources'):
+ tmp_dict = {}
+ if i.get('account_ids'):
+ tmp_dict['AccountIds'] = i.get('account_ids')
+ if i.get('aws_regions'):
+ tmp_dict['AwsRegions'] = i.get('aws_regions')
+ if i.get('all_aws_regions') is not None:
+ tmp_dict['AllAwsRegions'] = i.get('all_aws_regions')
+ params['AccountAggregationSources'].append(tmp_dict)
+ if module.params.get('organization_source'):
+ params['OrganizationAggregationSource'] = {}
+ if module.params.get('organization_source').get('role_arn'):
+ params['OrganizationAggregationSource'].update({
+ 'RoleArn': module.params.get('organization_source').get('role_arn')
+ })
+ if module.params.get('organization_source').get('aws_regions'):
+ params['OrganizationAggregationSource'].update({
+ 'AwsRegions': module.params.get('organization_source').get('aws_regions')
+ })
+ if module.params.get('organization_source').get('all_aws_regions') is not None:
+ params['OrganizationAggregationSource'].update({
+ 'AllAwsRegions': module.params.get('organization_source').get('all_aws_regions')
+ })
+
+ client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
+
+ resource_status = resource_exists(client, module, params)
+
+ if state == 'present':
+ if not resource_status:
+ create_resource(client, module, params, result)
+ else:
+ update_resource(client, module, params, result)
+
+ if state == 'absent':
+ if resource_status:
+ delete_resource(client, module, params, result)
+
+ module.exit_json(changed=result['changed'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/config_delivery_channel.py b/ansible_collections/community/aws/plugins/modules/config_delivery_channel.py
new file mode 100644
index 000000000..371bd6685
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/config_delivery_channel.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: config_delivery_channel
+version_added: 1.0.0
+short_description: Manage AWS Config delivery channels
+description:
+ - This module manages AWS Config delivery locations for rule checks and configuration info.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_config_delivery_channel).
+ The usage did not change.
+author:
+ - "Aaron Smith (@slapula)"
+options:
+ name:
+ description:
+ - The name of the AWS Config resource.
+ required: true
+ type: str
+ state:
+ description:
+ - Whether the Config rule should be present or absent.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ s3_bucket:
+ description:
+ - The name of the Amazon S3 bucket to which AWS Config delivers configuration snapshots and configuration history files.
+ type: str
+ required: true
+ s3_prefix:
+ description:
+ - The prefix for the specified Amazon S3 bucket.
+ type: str
+ sns_topic_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the Amazon SNS topic to which AWS Config sends notifications about configuration changes.
+ type: str
+ delivery_frequency:
+ description:
+ - The frequency with which AWS Config delivers configuration snapshots.
+ choices: ['One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours']
+ type: str
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+- name: Create Delivery Channel for AWS Config
+ community.aws.config_delivery_channel:
+ name: test_delivery_channel
+ state: present
+ s3_bucket: 'test_aws_config_bucket'
+ sns_topic_arn: 'arn:aws:sns:us-east-1:123456789012:aws_config_topic:1234ab56-cdef-7g89-01hi-2jk34l5m67no'
+ delivery_frequency: 'Twelve_Hours'
+'''
+
+RETURN = '''#'''
+
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+# this waits for an IAM role to become fully available, at the cost of
+# taking a long time to fail when the IAM role/policy really is invalid
+retry_unavailable_iam_on_put_delivery = AWSRetry.jittered_backoff(
+ catch_extra_error_codes=['InsufficientDeliveryPolicyException'],
+)
+
+
+def resource_exists(client, module, params):
+ try:
+ channel = client.describe_delivery_channels(
+ DeliveryChannelNames=[params['name']],
+ aws_retry=True,
+ )
+ return channel['DeliveryChannels'][0]
+ except is_boto3_error_code('NoSuchDeliveryChannelException'):
+ return
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+
+
+def create_resource(client, module, params, result):
+ try:
+ retry_unavailable_iam_on_put_delivery(
+ client.put_delivery_channel,
+ )(
+ DeliveryChannel=params,
+ )
+ result['changed'] = True
+ result['channel'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
+ return result
+ except is_boto3_error_code('InvalidS3KeyPrefixException') as e:
+ module.fail_json_aws(e, msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix")
+ except is_boto3_error_code('InsufficientDeliveryPolicyException') as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="The `s3_prefix` or `s3_bucket` parameter is invalid. "
+ "Make sure the bucket exists and is available")
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't create AWS Config delivery channel")
+
+
+def update_resource(client, module, params, result):
+ current_params = client.describe_delivery_channels(
+ DeliveryChannelNames=[params['name']],
+ aws_retry=True,
+ )
+
+ if params != current_params['DeliveryChannels'][0]:
+ try:
+ retry_unavailable_iam_on_put_delivery(
+ client.put_delivery_channel,
+ )(
+ DeliveryChannel=params,
+ )
+ result['changed'] = True
+ result['channel'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
+ return result
+ except is_boto3_error_code('InvalidS3KeyPrefixException') as e:
+ module.fail_json_aws(e, msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix")
+ except is_boto3_error_code('InsufficientDeliveryPolicyException') as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="The `s3_prefix` or `s3_bucket` parameter is invalid. "
+ "Make sure the bucket exists and is available")
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't create AWS Config delivery channel")
+
+
+def delete_resource(client, module, params, result):
+ try:
+ response = client.delete_delivery_channel(
+ DeliveryChannelName=params['name']
+ )
+ result['changed'] = True
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete AWS Config delivery channel")
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec={
+ 'name': dict(type='str', required=True),
+ 'state': dict(type='str', choices=['present', 'absent'], default='present'),
+ 's3_bucket': dict(type='str', required=True),
+ 's3_prefix': dict(type='str'),
+ 'sns_topic_arn': dict(type='str'),
+ 'delivery_frequency': dict(
+ type='str',
+ choices=[
+ 'One_Hour',
+ 'Three_Hours',
+ 'Six_Hours',
+ 'Twelve_Hours',
+ 'TwentyFour_Hours'
+ ]
+ ),
+ },
+ supports_check_mode=False,
+ )
+
+ result = {
+ 'changed': False
+ }
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+
+ params = {}
+ if name:
+ params['name'] = name
+ if module.params.get('s3_bucket'):
+ params['s3BucketName'] = module.params.get('s3_bucket')
+ if module.params.get('s3_prefix'):
+ params['s3KeyPrefix'] = module.params.get('s3_prefix')
+ if module.params.get('sns_topic_arn'):
+ params['snsTopicARN'] = module.params.get('sns_topic_arn')
+ if module.params.get('delivery_frequency'):
+ params['configSnapshotDeliveryProperties'] = {
+ 'deliveryFrequency': module.params.get('delivery_frequency')
+ }
+
+ client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
+
+ resource_status = resource_exists(client, module, params)
+
+ if state == 'present':
+ if not resource_status:
+ create_resource(client, module, params, result)
+ if resource_status:
+ update_resource(client, module, params, result)
+
+ if state == 'absent':
+ if resource_status:
+ delete_resource(client, module, params, result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/config_recorder.py b/ansible_collections/community/aws/plugins/modules/config_recorder.py
new file mode 100644
index 000000000..d90ce46cd
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/config_recorder.py
@@ -0,0 +1,212 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: config_recorder
+version_added: 1.0.0
+short_description: Manage AWS Config Recorders
+description:
+ - Module manages AWS Config configuration recorder settings.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_config_recorder).
+ The usage did not change.
+author:
+ - "Aaron Smith (@slapula)"
+options:
+ name:
+ description:
+ - The name of the AWS Config resource.
+ required: true
+ type: str
+ state:
+ description:
+ - Whether the Config rule should be present or absent.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ role_arn:
+ description:
+ - Amazon Resource Name (ARN) of the IAM role used to describe the AWS resources associated with the account.
+ - Required when I(state=present).
+ type: str
+ recording_group:
+ description:
+ - Specifies the types of AWS resources for which AWS Config records configuration changes.
+ - Required when I(state=present)
+ suboptions:
+ all_supported:
+ description:
+ - Specifies whether AWS Config records configuration changes for every supported type of regional resource.
+ - If I(all_supported=true), when AWS Config adds support for a new type of regional resource, it starts
+ recording resources of that type automatically.
+ - If I(all_supported=true), you cannot enumerate a list of I(resource_types).
+ include_global_types:
+ description:
+ - Specifies whether AWS Config includes all supported types of global resources (for example, IAM resources)
+ with the resources that it records.
+ - The configuration details for any global resource are the same in all regions. To prevent duplicate configuration items,
+ you should consider customizing AWS Config in only one region to record global resources.
+ - If you set I(include_global_types=true), you must also set I(all_supported=true).
+ - If you set I(include_global_types=true), when AWS Config adds support for a new type of global resource, it starts recording
+ resources of that type automatically.
+ resource_types:
+ description:
+ - A list that specifies the types of AWS resources for which AWS Config records configuration changes (for example,
+ C(AWS::EC2::Instance) or C(AWS::CloudTrail::Trail)).
+ - Before you can set this option, you must set I(all_supported=false).
+ type: dict
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+- name: Create Configuration Recorder for AWS Config
+ community.aws.config_recorder:
+ name: test_configuration_recorder
+ state: present
+ role_arn: 'arn:aws:iam::123456789012:role/AwsConfigRecorder'
+ recording_group:
+ all_supported: true
+ include_global_types: true
+'''
+
+RETURN = '''#'''
+
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+def resource_exists(client, module, params):
+ try:
+ recorder = client.describe_configuration_recorders(
+ ConfigurationRecorderNames=[params['name']]
+ )
+ return recorder['ConfigurationRecorders'][0]
+ except is_boto3_error_code('NoSuchConfigurationRecorderException'):
+ return
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+
+
+def create_resource(client, module, params, result):
+ try:
+ response = client.put_configuration_recorder(
+ ConfigurationRecorder=params
+ )
+ result['changed'] = True
+ result['recorder'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create AWS Config configuration recorder")
+
+
+def update_resource(client, module, params, result):
+ current_params = client.describe_configuration_recorders(
+ ConfigurationRecorderNames=[params['name']]
+ )
+
+ if params != current_params['ConfigurationRecorders'][0]:
+ try:
+ response = client.put_configuration_recorder(
+ ConfigurationRecorder=params
+ )
+ result['changed'] = True
+ result['recorder'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't update AWS Config configuration recorder")
+
+
+def delete_resource(client, module, params, result):
+ try:
+ response = client.delete_configuration_recorder(
+ ConfigurationRecorderName=params['name']
+ )
+ result['changed'] = True
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration recorder")
+
+
+def main():
+
+ module = AnsibleAWSModule(
+ argument_spec={
+ 'name': dict(type='str', required=True),
+ 'state': dict(type='str', choices=['present', 'absent'], default='present'),
+ 'role_arn': dict(type='str'),
+ 'recording_group': dict(type='dict'),
+ },
+ supports_check_mode=False,
+ required_if=[
+ ('state', 'present', ['role_arn', 'recording_group']),
+ ],
+ )
+
+ result = {
+ 'changed': False
+ }
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+
+ params = {}
+ if name:
+ params['name'] = name
+ if module.params.get('role_arn'):
+ params['roleARN'] = module.params.get('role_arn')
+ if module.params.get('recording_group'):
+ params['recordingGroup'] = {}
+ if module.params.get('recording_group').get('all_supported') is not None:
+ params['recordingGroup'].update({
+ 'allSupported': module.params.get('recording_group').get('all_supported')
+ })
+ if module.params.get('recording_group').get('include_global_types') is not None:
+ params['recordingGroup'].update({
+ 'includeGlobalResourceTypes': module.params.get('recording_group').get('include_global_types')
+ })
+ if module.params.get('recording_group').get('resource_types'):
+ params['recordingGroup'].update({
+ 'resourceTypes': module.params.get('recording_group').get('resource_types')
+ })
+ else:
+ params['recordingGroup'].update({
+ 'resourceTypes': []
+ })
+
+ client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
+
+ resource_status = resource_exists(client, module, params)
+
+ if state == 'present':
+ if not resource_status:
+ create_resource(client, module, params, result)
+ if resource_status:
+ update_resource(client, module, params, result)
+
+ if state == 'absent':
+ if resource_status:
+ delete_resource(client, module, params, result)
+
+ module.exit_json(changed=result['changed'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/config_rule.py b/ansible_collections/community/aws/plugins/modules/config_rule.py
new file mode 100644
index 000000000..d5cb717fd
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/config_rule.py
@@ -0,0 +1,275 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: config_rule
+version_added: 1.0.0
+short_description: Manage AWS Config rule resources
+description:
+ - Module manages AWS Config rules.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_config_rule).
+ The usage did not change.
+author:
+ - "Aaron Smith (@slapula)"
+options:
+ name:
+ description:
+ - The name of the AWS Config resource.
+ required: true
+ type: str
+ state:
+ description:
+ - Whether the Config rule should be present or absent.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ description:
+ description:
+ - The description that you provide for the AWS Config rule.
+ type: str
+ scope:
+ description:
+ - Defines which resources can trigger an evaluation for the rule.
+ suboptions:
+ compliance_types:
+ description:
+ - The resource types of only those AWS resources that you want to trigger an evaluation for the rule.
+ You can only specify one type if you also specify a resource ID for I(compliance_id).
+ compliance_id:
+ description:
+ - The ID of the only AWS resource that you want to trigger an evaluation for the rule. If you specify a resource ID,
+ you must specify one resource type for I(compliance_types).
+ tag_key:
+ description:
+ - The tag key that is applied to only those AWS resources that you want to trigger an evaluation for the rule.
+ tag_value:
+ description:
+ - The tag value applied to only those AWS resources that you want to trigger an evaluation for the rule.
+ If you specify a value for I(tag_value), you must also specify a value for I(tag_key).
+ type: dict
+ source:
+ description:
+ - Provides the rule owner (AWS or customer), the rule identifier, and the notifications that cause the function to
+ evaluate your AWS resources.
+ suboptions:
+ owner:
+ description:
+ - The resource types of only those AWS resources that you want to trigger an evaluation for the rule.
+ You can only specify one type if you also specify a resource ID for I(compliance_id).
+ identifier:
+ description:
+ - The ID of the only AWS resource that you want to trigger an evaluation for the rule.
+ If you specify a resource ID, you must specify one resource type for I(compliance_types).
+ details:
+ description:
+ - Provides the source and type of the event that causes AWS Config to evaluate your AWS resources.
+ - This parameter expects a list of dictionaries. Each dictionary expects the following key/value pairs.
+ - Key C(EventSource) The source of the event, such as an AWS service, that triggers AWS Config to evaluate your AWS resources.
+ - Key C(MessageType) The type of notification that triggers AWS Config to run an evaluation for a rule.
+ - Key C(MaximumExecutionFrequency) The frequency at which you want AWS Config to run evaluations for a custom rule with a periodic trigger.
+ type: dict
+ required: true
+ input_parameters:
+ description:
+ - A string, in JSON format, that is passed to the AWS Config rule Lambda function.
+ type: str
+ execution_frequency:
+ description:
+ - The maximum frequency with which AWS Config runs evaluations for a rule.
+ choices: ['One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours']
+ type: str
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+- name: Create Config Rule for AWS Config
+ community.aws.config_rule:
+ name: test_config_rule
+ state: present
+ description: 'This AWS Config rule checks for public write access on S3 buckets'
+ scope:
+ compliance_types:
+ - 'AWS::S3::Bucket'
+ source:
+ owner: AWS
+ identifier: 'S3_BUCKET_PUBLIC_WRITE_PROHIBITED'
+
+'''
+
+RETURN = '''#'''
+
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+def rule_exists(client, module, params):
+ try:
+ rule = client.describe_config_rules(
+ ConfigRuleNames=[params['ConfigRuleName']],
+ aws_retry=True,
+ )
+ return rule['ConfigRules'][0]
+ except is_boto3_error_code('NoSuchConfigRuleException'):
+ return
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+
+
+def create_resource(client, module, params, result):
+ try:
+ client.put_config_rule(
+ ConfigRule=params
+ )
+ result['changed'] = True
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create AWS Config rule")
+
+
+def update_resource(client, module, params, result):
+ current_params = client.describe_config_rules(
+ ConfigRuleNames=[params['ConfigRuleName']],
+ aws_retry=True,
+ )
+
+ del current_params['ConfigRules'][0]['ConfigRuleArn']
+ del current_params['ConfigRules'][0]['ConfigRuleId']
+ del current_params['ConfigRules'][0]['EvaluationModes']
+
+ if params != current_params['ConfigRules'][0]:
+ try:
+ client.put_config_rule(
+ ConfigRule=params
+ )
+ result['changed'] = True
+ result['rule'] = camel_dict_to_snake_dict(rule_exists(client, module, params))
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create AWS Config rule")
+
+
+def delete_resource(client, module, params, result):
+ try:
+ response = client.delete_config_rule(
+ ConfigRuleName=params['ConfigRuleName'],
+ aws_retry=True,
+ )
+ result['changed'] = True
+ result['rule'] = {}
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete AWS Config rule")
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec={
+ 'name': dict(type='str', required=True),
+ 'state': dict(type='str', choices=['present', 'absent'], default='present'),
+ 'description': dict(type='str'),
+ 'scope': dict(type='dict'),
+ 'source': dict(type='dict', required=True),
+ 'input_parameters': dict(type='str'),
+ 'execution_frequency': dict(
+ type='str',
+ choices=[
+ 'One_Hour',
+ 'Three_Hours',
+ 'Six_Hours',
+ 'Twelve_Hours',
+ 'TwentyFour_Hours'
+ ]
+ ),
+ },
+ supports_check_mode=False,
+ )
+
+ result = {
+ 'changed': False
+ }
+
+ name = module.params.get('name')
+ resource_type = module.params.get('resource_type')
+ state = module.params.get('state')
+
+ params = {}
+ if name:
+ params['ConfigRuleName'] = name
+ if module.params.get('description'):
+ params['Description'] = module.params.get('description')
+ if module.params.get('scope'):
+ params['Scope'] = {}
+ if module.params.get('scope').get('compliance_types'):
+ params['Scope'].update({
+ 'ComplianceResourceTypes': module.params.get('scope').get('compliance_types')
+ })
+ if module.params.get('scope').get('tag_key'):
+ params['Scope'].update({
+ 'TagKey': module.params.get('scope').get('tag_key')
+ })
+ if module.params.get('scope').get('tag_value'):
+ params['Scope'].update({
+ 'TagValue': module.params.get('scope').get('tag_value')
+ })
+ if module.params.get('scope').get('compliance_id'):
+ params['Scope'].update({
+ 'ComplianceResourceId': module.params.get('scope').get('compliance_id')
+ })
+ if module.params.get('source'):
+ params['Source'] = {}
+ if module.params.get('source').get('owner'):
+ params['Source'].update({
+ 'Owner': module.params.get('source').get('owner')
+ })
+ if module.params.get('source').get('identifier'):
+ params['Source'].update({
+ 'SourceIdentifier': module.params.get('source').get('identifier')
+ })
+ if module.params.get('source').get('details'):
+ params['Source'].update({
+ 'SourceDetails': module.params.get('source').get('details')
+ })
+ if module.params.get('input_parameters'):
+ params['InputParameters'] = module.params.get('input_parameters')
+ if module.params.get('execution_frequency'):
+ params['MaximumExecutionFrequency'] = module.params.get('execution_frequency')
+ params['ConfigRuleState'] = 'ACTIVE'
+
+ client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
+
+ existing_rule = rule_exists(client, module, params)
+
+ if state == 'present':
+ if not existing_rule:
+ create_resource(client, module, params, result)
+ else:
+ update_resource(client, module, params, result)
+
+ if state == 'absent':
+ if existing_rule:
+ delete_resource(client, module, params, result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/data_pipeline.py b/ansible_collections/community/aws/plugins/modules/data_pipeline.py
new file mode 100644
index 000000000..fc441c10c
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/data_pipeline.py
@@ -0,0 +1,634 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: data_pipeline
+version_added: 1.0.0
+author:
+ - Raghu Udiyar (@raags) <raghusiddarth@gmail.com>
+ - Sloane Hertel (@s-hertel) <shertel@redhat.com>
+short_description: Create and manage AWS Datapipelines
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+description:
+ - Create and manage AWS Datapipelines. Creation is not idempotent in AWS, so the C(uniqueId) is created by hashing the options (minus objects)
+ given to the datapipeline.
+ - The pipeline definition must be in the format given here
+ U(https://docs.aws.amazon.com/datapipeline/latest/APIReference/API_PutPipelineDefinition.html#API_PutPipelineDefinition_RequestSyntax).
+ - Operations will wait for a configurable amount of time to ensure the pipeline is in the requested state.
+options:
+ name:
+ description:
+ - The name of the Datapipeline to create/modify/delete.
+ required: true
+ type: str
+ description:
+ description:
+ - An optional description for the pipeline being created.
+ default: ''
+ type: str
+ objects:
+ type: list
+ elements: dict
+ default: []
+ description:
+ - A list of pipeline object definitions, each of which is a dict that takes the keys I(id), I(name) and I(fields).
+ suboptions:
+ id:
+ description:
+ - The ID of the object.
+ type: str
+ name:
+ description:
+ - The name of the object.
+ type: str
+ fields:
+ description:
+ - Key-value pairs that define the properties of the object.
+ - The value is specified as a reference to another object I(refValue) or as a string value I(stringValue)
+ but not as both.
+ type: list
+ elements: dict
+ suboptions:
+ key:
+ type: str
+ description:
+ - The field identifier.
+ stringValue:
+ type: str
+ description:
+ - The field value.
+ - Exactly one of I(stringValue) and I(refValue) may be specified.
+ refValue:
+ type: str
+ description:
+ - The field value, expressed as the identifier of another object.
+ - Exactly one of I(stringValue) and I(refValue) may be specified.
+ parameters:
+ description:
+ - A list of parameter objects (dicts) in the pipeline definition.
+ type: list
+ elements: dict
+ default: []
+ suboptions:
+ id:
+ description:
+ - The ID of the parameter object.
+ attributes:
+ description:
+ - A list of attributes (dicts) of the parameter object.
+ type: list
+ elements: dict
+ suboptions:
+ key:
+ description: The field identifier.
+ type: str
+ stringValue:
+ description: The field value.
+ type: str
+
+ values:
+ description:
+ - A list of parameter values (dicts) in the pipeline definition.
+ type: list
+ elements: dict
+ default: []
+ suboptions:
+ id:
+ description: The ID of the parameter value
+ type: str
+ stringValue:
+ description: The field value
+ type: str
+ timeout:
+ description:
+ - Time in seconds to wait for the pipeline to transition to the requested state, fail otherwise.
+ default: 300
+ type: int
+ state:
+ description:
+ - The requested state of the pipeline.
+ choices: ['present', 'absent', 'active', 'inactive']
+ default: present
+ type: str
+ tags:
+ description:
+ - A dict of key:value pair(s) to add to the pipeline.
+ type: dict
+ default: {}
+ aliases: ['resource_tags']
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create pipeline
+- community.aws.data_pipeline:
+ name: test-dp
+ region: us-west-2
+ objects: "{{pipelineObjects}}"
+ parameters: "{{pipelineParameters}}"
+ values: "{{pipelineValues}}"
+ tags:
+ key1: val1
+ key2: val2
+ state: present
+
+# Example populating and activating a pipeline that demonstrates two ways of providing pipeline objects
+- community.aws.data_pipeline:
+ name: test-dp
+ objects:
+ - "id": "DefaultSchedule"
+ "name": "Every 1 day"
+ "fields":
+ - "key": "period"
+ "stringValue": "1 days"
+ - "key": "type"
+ "stringValue": "Schedule"
+ - "key": "startAt"
+ "stringValue": "FIRST_ACTIVATION_DATE_TIME"
+ - "id": "Default"
+ "name": "Default"
+ "fields": [ { "key": "resourceRole", "stringValue": "my_resource_role" },
+ { "key": "role", "stringValue": "DataPipelineDefaultRole" },
+ { "key": "pipelineLogUri", "stringValue": "s3://my_s3_log.txt" },
+ { "key": "scheduleType", "stringValue": "cron" },
+ { "key": "schedule", "refValue": "DefaultSchedule" },
+ { "key": "failureAndRerunMode", "stringValue": "CASCADE" } ]
+ state: active
+
+# Activate pipeline
+- community.aws.data_pipeline:
+ name: test-dp
+ region: us-west-2
+ state: active
+
+# Delete pipeline
+- community.aws.data_pipeline:
+ name: test-dp
+ region: us-west-2
+ state: absent
+
+'''
+
+RETURN = r'''
+changed:
+ description: whether the data pipeline has been modified
+ type: bool
+ returned: always
+ sample:
+ changed: true
+result:
+ description:
+ - Contains the data pipeline data (data_pipeline) and a return message (msg).
+ If the data pipeline exists data_pipeline will contain the keys description, name,
+ pipeline_id, state, tags, and unique_id. If the data pipeline does not exist then
+ data_pipeline will be an empty dict. The msg describes the status of the operation.
+ returned: always
+ type: dict
+'''
+
+import hashlib
+import json
+import time
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+
+
+DP_ACTIVE_STATES = ['ACTIVE', 'SCHEDULED']
+DP_INACTIVE_STATES = ['INACTIVE', 'PENDING', 'FINISHED', 'DELETING']
+DP_ACTIVATING_STATE = 'ACTIVATING'
+DP_DEACTIVATING_STATE = 'DEACTIVATING'
+PIPELINE_DOESNT_EXIST = '^.*Pipeline with id: {0} does not exist$'
+
+
+class DataPipelineNotFound(Exception):
+ pass
+
+
+class TimeOutException(Exception):
+ pass
+
+
+def pipeline_id(client, name):
+ """Return pipeline id for the given pipeline name
+
+ :param object client: boto3 datapipeline client
+ :param string name: pipeline name
+ :returns: pipeline id
+ :raises: DataPipelineNotFound
+
+ """
+ pipelines = client.list_pipelines()
+ for dp in pipelines['pipelineIdList']:
+ if dp['name'] == name:
+ return dp['id']
+ raise DataPipelineNotFound
+
+
+def pipeline_description(client, dp_id):
+ """Return pipeline description list
+
+ :param object client: boto3 datapipeline client
+ :returns: pipeline description dictionary
+ :raises: DataPipelineNotFound
+
+ """
+ try:
+ return client.describe_pipelines(pipelineIds=[dp_id])
+ except is_boto3_error_code(['PipelineNotFoundException', 'PipelineDeletedException']):
+ raise DataPipelineNotFound
+
+
+def pipeline_field(client, dp_id, field):
+ """Return a pipeline field from the pipeline description.
+
+ The available fields are listed in describe_pipelines output.
+
+ :param object client: boto3 datapipeline client
+ :param string dp_id: pipeline id
+ :param string field: pipeline description field
+ :returns: pipeline field information
+
+ """
+ dp_description = pipeline_description(client, dp_id)
+ for field_key in dp_description['pipelineDescriptionList'][0]['fields']:
+ if field_key['key'] == field:
+ return field_key['stringValue']
+ raise KeyError("Field key {0} not found!".format(field))
+
+
+def run_with_timeout(timeout, func, *func_args, **func_kwargs):
+ """Run func with the provided args and kwargs, and wait until
+ timeout for truthy return value
+
+ :param int timeout: time to wait for status
+ :param function func: function to run, should return True or False
+ :param args func_args: function args to pass to func
+ :param kwargs func_kwargs: function key word args
+ :returns: True if func returns truthy within timeout
+ :raises: TimeOutException
+
+ """
+
+ for count in range(timeout // 10):
+ if func(*func_args, **func_kwargs):
+ return True
+ else:
+ # check every 10s
+ time.sleep(10)
+
+ raise TimeOutException
+
+
+def check_dp_exists(client, dp_id):
+ """Check if datapipeline exists
+
+ :param object client: boto3 datapipeline client
+ :param string dp_id: pipeline id
+ :returns: True or False
+
+ """
+ try:
+ # pipeline_description raises DataPipelineNotFound
+ if pipeline_description(client, dp_id):
+ return True
+ else:
+ return False
+ except DataPipelineNotFound:
+ return False
+
+
+def check_dp_status(client, dp_id, status):
+ """Checks if datapipeline matches states in status list
+
+ :param object client: boto3 datapipeline client
+ :param string dp_id: pipeline id
+ :param list status: list of states to check against
+ :returns: True or False
+
+ """
+ if not isinstance(status, list):
+ raise AssertionError()
+ if pipeline_field(client, dp_id, field="@pipelineState") in status:
+ return True
+ else:
+ return False
+
+
+def pipeline_status_timeout(client, dp_id, status, timeout):
+ args = (client, dp_id, status)
+ return run_with_timeout(timeout, check_dp_status, *args)
+
+
+def pipeline_exists_timeout(client, dp_id, timeout):
+ args = (client, dp_id)
+ return run_with_timeout(timeout, check_dp_exists, *args)
+
+
+def activate_pipeline(client, module):
+ """Activates pipeline
+
+ """
+ dp_name = module.params.get('name')
+ timeout = module.params.get('timeout')
+
+ try:
+ dp_id = pipeline_id(client, dp_name)
+ except DataPipelineNotFound:
+ module.fail_json(msg='Data Pipeline {0} not found'.format(dp_name))
+
+ if pipeline_field(client, dp_id, field="@pipelineState") in DP_ACTIVE_STATES:
+ changed = False
+ else:
+ try:
+ client.activate_pipeline(pipelineId=dp_id)
+ except is_boto3_error_code('InvalidRequestException'):
+ module.fail_json(msg="You need to populate your pipeline before activation.")
+ try:
+ pipeline_status_timeout(client, dp_id, status=DP_ACTIVE_STATES,
+ timeout=timeout)
+ except TimeOutException:
+ if pipeline_field(client, dp_id, field="@pipelineState") == "FINISHED":
+ # activated but completed more rapidly than it was checked
+ pass
+ else:
+ module.fail_json(msg=('Data Pipeline {0} failed to activate '
+ 'within timeout {1} seconds').format(dp_name, timeout))
+ changed = True
+
+ data_pipeline = get_result(client, dp_id)
+ result = {'data_pipeline': data_pipeline,
+ 'msg': 'Data Pipeline {0} activated.'.format(dp_name)}
+
+ return (changed, result)
+
+
+def deactivate_pipeline(client, module):
+ """Deactivates pipeline
+
+ """
+ dp_name = module.params.get('name')
+ timeout = module.params.get('timeout')
+
+ try:
+ dp_id = pipeline_id(client, dp_name)
+ except DataPipelineNotFound:
+ module.fail_json(msg='Data Pipeline {0} not found'.format(dp_name))
+
+ if pipeline_field(client, dp_id, field="@pipelineState") in DP_INACTIVE_STATES:
+ changed = False
+ else:
+ client.deactivate_pipeline(pipelineId=dp_id)
+ try:
+ pipeline_status_timeout(client, dp_id, status=DP_INACTIVE_STATES,
+ timeout=timeout)
+ except TimeOutException:
+ module.fail_json(msg=('Data Pipeline {0} failed to deactivate'
+ 'within timeout {1} seconds').format(dp_name, timeout))
+ changed = True
+
+ data_pipeline = get_result(client, dp_id)
+ result = {'data_pipeline': data_pipeline,
+ 'msg': 'Data Pipeline {0} deactivated.'.format(dp_name)}
+
+ return (changed, result)
+
+
+def _delete_dp_with_check(dp_id, client, timeout):
+ client.delete_pipeline(pipelineId=dp_id)
+ try:
+ pipeline_status_timeout(client=client, dp_id=dp_id, status=[PIPELINE_DOESNT_EXIST], timeout=timeout)
+ except DataPipelineNotFound:
+ return True
+
+
+def delete_pipeline(client, module):
+ """Deletes pipeline
+
+ """
+ dp_name = module.params.get('name')
+ timeout = module.params.get('timeout')
+
+ try:
+ dp_id = pipeline_id(client, dp_name)
+ _delete_dp_with_check(dp_id, client, timeout)
+ changed = True
+ except DataPipelineNotFound:
+ changed = False
+ except TimeOutException:
+ module.fail_json(msg=('Data Pipeline {0} failed to delete'
+ 'within timeout {1} seconds').format(dp_name, timeout))
+ result = {'data_pipeline': {},
+ 'msg': 'Data Pipeline {0} deleted'.format(dp_name)}
+
+ return (changed, result)
+
+
+def build_unique_id(module):
+ data = dict(module.params)
+ # removing objects from the unique id so we can update objects or populate the pipeline after creation without needing to make a new pipeline
+ [data.pop(each, None) for each in ('objects', 'timeout')]
+ json_data = json.dumps(data, sort_keys=True).encode("utf-8")
+ hashed_data = hashlib.md5(json_data).hexdigest()
+ return hashed_data
+
+
+def format_tags(tags):
+ """ Reformats tags
+
+ :param dict tags: dict of data pipeline tags (e.g. {key1: val1, key2: val2, key3: val3})
+ :returns: list of dicts (e.g. [{key: key1, value: val1}, {key: key2, value: val2}, {key: key3, value: val3}])
+
+ """
+ return [dict(key=k, value=v) for k, v in tags.items()]
+
+
+def get_result(client, dp_id):
+ """ Get the current state of the data pipeline and reformat it to snake_case for exit_json
+
+ :param object client: boto3 datapipeline client
+ :param string dp_id: pipeline id
+ :returns: reformatted dict of pipeline description
+
+ """
+ # pipeline_description returns a pipelineDescriptionList of length 1
+ # dp is a dict with keys "description" (str), "fields" (list), "name" (str), "pipelineId" (str), "tags" (dict)
+ dp = pipeline_description(client, dp_id)['pipelineDescriptionList'][0]
+
+ # Get uniqueId and pipelineState in fields to add to the exit_json result
+ dp["unique_id"] = pipeline_field(client, dp_id, field="uniqueId")
+ dp["pipeline_state"] = pipeline_field(client, dp_id, field="@pipelineState")
+
+ # Remove fields; can't make a list snake_case and most of the data is redundant
+ del dp["fields"]
+
+ # Note: tags is already formatted fine so we don't need to do anything with it
+
+ # Reformat data pipeline and add reformatted fields back
+ dp = camel_dict_to_snake_dict(dp)
+ return dp
+
+
+def diff_pipeline(client, module, objects, unique_id, dp_name):
+ """Check if there's another pipeline with the same unique_id and if so, checks if the object needs to be updated
+ """
+ result = {}
+ changed = False
+ create_dp = False
+
+ # See if there is already a pipeline with the same unique_id
+ unique_id = build_unique_id(module)
+ try:
+ dp_id = pipeline_id(client, dp_name)
+ dp_unique_id = to_text(pipeline_field(client, dp_id, field="uniqueId"))
+ if dp_unique_id != unique_id:
+ # A change is expected but not determined. Updated to a bool in create_pipeline().
+ changed = "NEW_VERSION"
+ create_dp = True
+ # Unique ids are the same - check if pipeline needs modification
+ else:
+ dp_objects = client.get_pipeline_definition(pipelineId=dp_id)['pipelineObjects']
+ # Definition needs to be updated
+ if dp_objects != objects:
+ changed, msg = define_pipeline(client, module, objects, dp_id)
+ # No changes
+ else:
+ msg = 'Data Pipeline {0} is present'.format(dp_name)
+ data_pipeline = get_result(client, dp_id)
+ result = {'data_pipeline': data_pipeline,
+ 'msg': msg}
+ except DataPipelineNotFound:
+ create_dp = True
+
+ return create_dp, changed, result
+
+
+def define_pipeline(client, module, objects, dp_id):
+ """Puts pipeline definition
+
+ """
+ dp_name = module.params.get('name')
+
+ if pipeline_field(client, dp_id, field="@pipelineState") == "FINISHED":
+ msg = 'Data Pipeline {0} is unable to be updated while in state FINISHED.'.format(dp_name)
+ changed = False
+
+ elif objects:
+ parameters = module.params.get('parameters')
+ values = module.params.get('values')
+
+ try:
+ client.put_pipeline_definition(pipelineId=dp_id,
+ pipelineObjects=objects,
+ parameterObjects=parameters,
+ parameterValues=values)
+ msg = 'Data Pipeline {0} has been updated.'.format(dp_name)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to put the definition for pipeline {0}. Check that string/reference fields"
+ "are not empty and that the number of objects in the pipeline does not exceed maximum allowed"
+ "objects".format(dp_name))
+ else:
+ changed = False
+ msg = ""
+
+ return changed, msg
+
+
+def create_pipeline(client, module):
+ """Creates datapipeline. Uses uniqueId to achieve idempotency.
+
+ """
+ dp_name = module.params.get('name')
+ objects = module.params.get('objects', None)
+ description = module.params.get('description', '')
+ tags = module.params.get('tags')
+ timeout = module.params.get('timeout')
+
+ unique_id = build_unique_id(module)
+ create_dp, changed, result = diff_pipeline(client, module, objects, unique_id, dp_name)
+
+ if changed == "NEW_VERSION":
+ # delete old version
+ changed, creation_result = delete_pipeline(client, module)
+
+ # There isn't a pipeline or it has different parameters than the pipeline in existence.
+ if create_dp:
+ # Make pipeline
+ try:
+ tags = format_tags(tags)
+ dp = client.create_pipeline(name=dp_name,
+ uniqueId=unique_id,
+ description=description,
+ tags=tags)
+ dp_id = dp['pipelineId']
+ pipeline_exists_timeout(client, dp_id, timeout)
+ except TimeOutException:
+ module.fail_json(msg=('Data Pipeline {0} failed to create'
+ 'within timeout {1} seconds').format(dp_name, timeout))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to create the data pipeline {0}.".format(dp_name))
+ # Put pipeline definition
+ changed, msg = define_pipeline(client, module, objects, dp_id)
+
+ changed = True
+ data_pipeline = get_result(client, dp_id)
+ result = {'data_pipeline': data_pipeline,
+ 'msg': 'Data Pipeline {0} created.'.format(dp_name) + msg}
+
+ return (changed, result)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(required=False, default=''),
+ objects=dict(required=False, type='list', default=[], elements='dict'),
+ parameters=dict(required=False, type='list', default=[], elements='dict'),
+ timeout=dict(required=False, type='int', default=300),
+ state=dict(default='present', choices=['present', 'absent',
+ 'active', 'inactive']),
+ tags=dict(required=False, type='dict', default={}, aliases=['resource_tags']),
+ values=dict(required=False, type='list', default=[], elements='dict'),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ try:
+ client = module.client('datapipeline')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ state = module.params.get('state')
+ if state == 'present':
+ changed, result = create_pipeline(client, module)
+ elif state == 'absent':
+ changed, result = delete_pipeline(client, module)
+ elif state == 'active':
+ changed, result = activate_pipeline(client, module)
+ elif state == 'inactive':
+ changed, result = deactivate_pipeline(client, module)
+
+ module.exit_json(result=result, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/directconnect_confirm_connection.py b/ansible_collections/community/aws/plugins/modules/directconnect_confirm_connection.py
new file mode 100644
index 000000000..45180ac6c
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/directconnect_confirm_connection.py
@@ -0,0 +1,158 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+---
+module: directconnect_confirm_connection
+short_description: Confirms the creation of a hosted DirectConnect connection
+description:
+ - Confirms the creation of a hosted DirectConnect, which requires approval before it can be used.
+ - DirectConnect connections that require approval would be in the C(ordering).
+ - After confirmation, they will move to the C(pending) state and finally the C(available) state.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_direct_connect_confirm_connection).
+ The usage did not change.
+author:
+ - "Matt Traynham (@mtraynham)"
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+options:
+ name:
+ description:
+ - The name of the Direct Connect connection.
+ - One of I(connection_id) or I(name) must be specified.
+ type: str
+ connection_id:
+ description:
+ - The ID of the Direct Connect connection.
+ - One of I(connection_id) or I(name) must be specified.
+ type: str
+'''
+
+EXAMPLES = '''
+
+# confirm a Direct Connect by name
+- name: confirm the connection id
+ community.aws.directconnect_confirm_connection:
+ name: my_host_direct_connect
+
+# confirm a Direct Connect by connection_id
+- name: confirm the connection id
+ community.aws.directconnect_confirm_connection:
+ connection_id: dxcon-xxxxxxxx
+'''
+
+RETURN = '''
+
+connection_state:
+ description: The state of the connection.
+ returned: always
+ type: str
+ sample: pending
+'''
+
+import traceback
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # handled by imported AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+retry_params = {"retries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]}
+
+
+@AWSRetry.jittered_backoff(**retry_params)
+def describe_connections(client, params):
+ return client.describe_connections(**params)
+
+
+def find_connection_id(client, connection_id=None, connection_name=None):
+ params = {}
+ if connection_id:
+ params['connectionId'] = connection_id
+ try:
+ response = describe_connections(client, params)
+ except (BotoCoreError, ClientError) as e:
+ if connection_id:
+ msg = "Failed to describe DirectConnect ID {0}".format(connection_id)
+ else:
+ msg = "Failed to describe DirectConnect connections"
+ raise DirectConnectError(msg=msg,
+ last_traceback=traceback.format_exc(),
+ exception=e)
+
+ match = []
+ if len(response.get('connections', [])) == 1 and connection_id:
+ if response['connections'][0]['connectionState'] != 'deleted':
+ match.append(response['connections'][0]['connectionId'])
+
+ for conn in response.get('connections', []):
+ if connection_name == conn['connectionName'] and conn['connectionState'] != 'deleted':
+ match.append(conn['connectionId'])
+
+ if len(match) == 1:
+ return match[0]
+ else:
+ raise DirectConnectError(msg="Could not find a valid DirectConnect connection")
+
+
+def get_connection_state(client, connection_id):
+ try:
+ response = describe_connections(client, dict(connectionId=connection_id))
+ return response['connections'][0]['connectionState']
+ except (BotoCoreError, ClientError, IndexError) as e:
+ raise DirectConnectError(msg="Failed to describe DirectConnect connection {0} state".format(connection_id),
+ last_traceback=traceback.format_exc(),
+ exception=e)
+
+
+def main():
+ argument_spec = dict(
+ connection_id=dict(),
+ name=dict()
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ mutually_exclusive=[['connection_id', 'name']],
+ required_one_of=[['connection_id', 'name']])
+ client = module.client('directconnect')
+
+ connection_id = module.params['connection_id']
+ connection_name = module.params['name']
+
+ changed = False
+ connection_state = None
+ try:
+ connection_id = find_connection_id(client,
+ connection_id,
+ connection_name)
+ connection_state = get_connection_state(client, connection_id)
+ if connection_state == 'ordering':
+ client.confirm_connection(connectionId=connection_id)
+ changed = True
+ connection_state = get_connection_state(client, connection_id)
+ except DirectConnectError as e:
+ if e.last_traceback:
+ module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception.response))
+ else:
+ module.fail_json(msg=e.msg)
+
+ module.exit_json(changed=changed, connection_state=connection_state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/directconnect_connection.py b/ansible_collections/community/aws/plugins/modules/directconnect_connection.py
new file mode 100644
index 000000000..28d86717d
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/directconnect_connection.py
@@ -0,0 +1,345 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: directconnect_connection
+version_added: 1.0.0
+short_description: Creates, deletes, modifies a DirectConnect connection
+description:
+ - Create, update, or delete a Direct Connect connection between a network and a specific AWS Direct Connect location.
+ - Upon creation the connection may be added to a link aggregation group or established as a standalone connection.
+ - The connection may later be associated or disassociated with a link aggregation group.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_direct_connect_connection).
+ The usage did not change.
+author:
+ - "Sloane Hertel (@s-hertel)"
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+options:
+ state:
+ description:
+ - The state of the Direct Connect connection.
+ choices:
+ - present
+ - absent
+ type: str
+ required: true
+ name:
+ description:
+ - The name of the Direct Connect connection. This is required to create a
+ new connection.
+ - One of I(connection_id) or I(name) must be specified.
+ type: str
+ connection_id:
+ description:
+ - The ID of the Direct Connect connection.
+ - Modifying attributes of a connection with I(forced_update) will result in a new Direct Connect connection ID.
+ - One of I(connection_id) or I(name) must be specified.
+ type: str
+ location:
+ description:
+ - Where the Direct Connect connection is located.
+ - Required when I(state=present).
+ type: str
+ bandwidth:
+ description:
+ - The bandwidth of the Direct Connect connection.
+ - Required when I(state=present).
+ choices:
+ - 1Gbps
+ - 10Gbps
+ type: str
+ link_aggregation_group:
+ description:
+ - The ID of the link aggregation group you want to associate with the connection.
+ - This is optional when a stand-alone connection is desired.
+ type: str
+ forced_update:
+ description:
+ - To modify I(bandwidth) or I(location) the connection needs to be deleted and recreated.
+ - By default this will not happen. This option must be explicitly set to C(true) to change I(bandwith) or I(location).
+ type: bool
+ default: false
+'''
+
+EXAMPLES = """
+
+# create a Direct Connect connection
+- community.aws.directconnect_connection:
+ name: ansible-test-connection
+ state: present
+ location: EqDC2
+ link_aggregation_group: dxlag-xxxxxxxx
+ bandwidth: 1Gbps
+ register: dc
+
+# disassociate the LAG from the connection
+- community.aws.directconnect_connection:
+ state: present
+ connection_id: dc.connection.connection_id
+ location: EqDC2
+ bandwidth: 1Gbps
+
+# replace the connection with one with more bandwidth
+- community.aws.directconnect_connection:
+ state: present
+ name: ansible-test-connection
+ location: EqDC2
+ bandwidth: 10Gbps
+ forced_update: true
+
+# delete the connection
+- community.aws.directconnect_connection:
+ state: absent
+ name: ansible-test-connection
+"""
+
+RETURN = """
+connection:
+ description: The attributes of the direct connect connection.
+ type: complex
+ returned: I(state=present)
+ contains:
+ aws_device:
+ description: The endpoint which the physical connection terminates on.
+ returned: when the requested state is no longer 'requested'
+ type: str
+ sample: EqDC2-12pmo7hemtz1z
+ bandwidth:
+ description: The bandwidth of the connection.
+ returned: always
+ type: str
+ sample: 1Gbps
+ connection_id:
+ description: The ID of the connection.
+ returned: always
+ type: str
+ sample: dxcon-ffy9ywed
+ connection_name:
+ description: The name of the connection.
+ returned: always
+ type: str
+ sample: ansible-test-connection
+ connection_state:
+ description: The state of the connection.
+ returned: always
+ type: str
+ sample: pending
+ loa_issue_time:
+ description: The issue time of the connection's Letter of Authorization - Connecting Facility Assignment.
+ returned: when the LOA-CFA has been issued (the connection state will no longer be 'requested')
+ type: str
+ sample: '2018-03-20T17:36:26-04:00'
+ location:
+ description: The location of the connection.
+ returned: always
+ type: str
+ sample: EqDC2
+ owner_account:
+ description: The account that owns the direct connect connection.
+ returned: always
+ type: str
+ sample: '123456789012'
+ region:
+ description: The region in which the connection exists.
+ returned: always
+ type: str
+ sample: us-east-1
+"""
+
+import traceback
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # handled by imported AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError
+from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import associate_connection_and_lag
+from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_connection
+from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import disassociate_connection_and_lag
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+retry_params = {"retries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]}
+
+
+def connection_status(client, connection_id):
+ return connection_exists(client, connection_id=connection_id, connection_name=None, verify=False)
+
+
+def connection_exists(client, connection_id=None, connection_name=None, verify=True):
+ params = {}
+ if connection_id:
+ params['connectionId'] = connection_id
+ try:
+ response = AWSRetry.jittered_backoff(**retry_params)(client.describe_connections)(**params)
+ except (BotoCoreError, ClientError) as e:
+ if connection_id:
+ msg = "Failed to describe DirectConnect ID {0}".format(connection_id)
+ else:
+ msg = "Failed to describe DirectConnect connections"
+ raise DirectConnectError(msg=msg,
+ last_traceback=traceback.format_exc(),
+ exception=e)
+
+ match = []
+ connection = []
+
+ # look for matching connections
+
+ if len(response.get('connections', [])) == 1 and connection_id:
+ if response['connections'][0]['connectionState'] != 'deleted':
+ match.append(response['connections'][0]['connectionId'])
+ connection.extend(response['connections'])
+
+ for conn in response.get('connections', []):
+ if connection_name == conn['connectionName'] and conn['connectionState'] != 'deleted':
+ match.append(conn['connectionId'])
+ connection.append(conn)
+
+ # verifying if the connections exists; if true, return connection identifier, otherwise return False
+ if verify and len(match) == 1:
+ return match[0]
+ elif verify:
+ return False
+ # not verifying if the connection exists; just return current connection info
+ elif len(connection) == 1:
+ return {'connection': connection[0]}
+ return {'connection': {}}
+
+
+def create_connection(client, location, bandwidth, name, lag_id):
+ if not name:
+ raise DirectConnectError(msg="Failed to create a Direct Connect connection: name required.")
+ params = {
+ 'location': location,
+ 'bandwidth': bandwidth,
+ 'connectionName': name,
+ }
+ if lag_id:
+ params['lagId'] = lag_id
+
+ try:
+ connection = AWSRetry.jittered_backoff(**retry_params)(client.create_connection)(**params)
+ except (BotoCoreError, ClientError) as e:
+ raise DirectConnectError(msg="Failed to create DirectConnect connection {0}".format(name),
+ last_traceback=traceback.format_exc(),
+ exception=e)
+ return connection['connectionId']
+
+
+def changed_properties(current_status, location, bandwidth):
+ current_bandwidth = current_status['bandwidth']
+ current_location = current_status['location']
+
+ return current_bandwidth != bandwidth or current_location != location
+
+
+@AWSRetry.jittered_backoff(**retry_params)
+def update_associations(client, latest_state, connection_id, lag_id):
+ changed = False
+ if 'lagId' in latest_state and lag_id != latest_state['lagId']:
+ disassociate_connection_and_lag(client, connection_id, lag_id=latest_state['lagId'])
+ changed = True
+ if (changed and lag_id) or (lag_id and 'lagId' not in latest_state):
+ associate_connection_and_lag(client, connection_id, lag_id)
+ changed = True
+ return changed
+
+
+def ensure_present(client, connection_id, connection_name, location, bandwidth, lag_id, forced_update):
+ # the connection is found; get the latest state and see if it needs to be updated
+ if connection_id:
+ latest_state = connection_status(client, connection_id=connection_id)['connection']
+ if changed_properties(latest_state, location, bandwidth) and forced_update:
+ ensure_absent(client, connection_id)
+ return ensure_present(client=client,
+ connection_id=None,
+ connection_name=connection_name,
+ location=location,
+ bandwidth=bandwidth,
+ lag_id=lag_id,
+ forced_update=forced_update)
+ elif update_associations(client, latest_state, connection_id, lag_id):
+ return True, connection_id
+
+ # no connection found; create a new one
+ else:
+ return True, create_connection(client, location, bandwidth, connection_name, lag_id)
+
+ return False, connection_id
+
+
+@AWSRetry.jittered_backoff(**retry_params)
+def ensure_absent(client, connection_id):
+ changed = False
+ if connection_id:
+ delete_connection(client, connection_id)
+ changed = True
+
+ return changed
+
+
+def main():
+ argument_spec = dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(),
+ location=dict(),
+ bandwidth=dict(choices=['1Gbps', '10Gbps']),
+ link_aggregation_group=dict(),
+ connection_id=dict(),
+ forced_update=dict(type='bool', default=False)
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_one_of=[('connection_id', 'name')],
+ required_if=[('state', 'present', ('location', 'bandwidth'))]
+ )
+
+ connection = module.client('directconnect')
+
+ state = module.params.get('state')
+ try:
+ connection_id = connection_exists(
+ connection,
+ connection_id=module.params.get('connection_id'),
+ connection_name=module.params.get('name')
+ )
+ if not connection_id and module.params.get('connection_id'):
+ module.fail_json(msg="The Direct Connect connection {0} does not exist.".format(module.params.get('connection_id')))
+
+ if state == 'present':
+ changed, connection_id = ensure_present(connection,
+ connection_id=connection_id,
+ connection_name=module.params.get('name'),
+ location=module.params.get('location'),
+ bandwidth=module.params.get('bandwidth'),
+ lag_id=module.params.get('link_aggregation_group'),
+ forced_update=module.params.get('forced_update'))
+ response = connection_status(connection, connection_id)
+ elif state == 'absent':
+ changed = ensure_absent(connection, connection_id)
+ response = {}
+ except DirectConnectError as e:
+ if e.last_traceback:
+ module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception.response))
+ else:
+ module.fail_json(msg=e.msg)
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/directconnect_gateway.py b/ansible_collections/community/aws/plugins/modules/directconnect_gateway.py
new file mode 100644
index 000000000..1433b387b
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/directconnect_gateway.py
@@ -0,0 +1,368 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: directconnect_gateway
+author:
+ - Gobin Sougrakpam (@gobins)
+version_added: 1.0.0
+short_description: Manage AWS Direct Connect gateway
+description:
+ - Creates AWS Direct Connect Gateway.
+ - Deletes AWS Direct Connect Gateway.
+ - Attaches Virtual Gateways to Direct Connect Gateway.
+ - Detaches Virtual Gateways to Direct Connect Gateway.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_direct_connect_gateway).
+ The usage did not change.
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+options:
+ state:
+ description:
+ - Set I(state=present) to ensure a resource is created.
+ - Set I(state=absent) to remove a resource.
+ default: present
+ choices: [ "present", "absent"]
+ type: str
+ name:
+ description:
+ - Name of the Direct Connect Gateway to be created or deleted.
+ type: str
+ amazon_asn:
+ description:
+ - The Amazon side ASN.
+ - Required when I(state=present).
+ type: str
+ direct_connect_gateway_id:
+ description:
+ - The ID of an existing Direct Connect Gateway.
+ - Required when I(state=absent).
+ type: str
+ virtual_gateway_id:
+ description:
+ - The VPN gateway ID of an existing virtual gateway.
+ type: str
+ wait_timeout:
+ description:
+ - How long to wait for the association to be deleted.
+ type: int
+ default: 320
+'''
+
+EXAMPLES = '''
+- name: Create a new direct connect gateway attached to virtual private gateway
+ community.aws.directconnect_gateway:
+ state: present
+ name: my-dx-gateway
+ amazon_asn: 7224
+ virtual_gateway_id: vpg-12345
+ register: created_dxgw
+
+- name: Create a new unattached dxgw
+ community.aws.directconnect_gateway:
+ state: present
+ name: my-dx-gateway
+ amazon_asn: 7224
+ register: created_dxgw
+'''
+
+RETURN = '''
+result:
+ description:
+ - The attributes of the Direct Connect Gateway
+ type: complex
+ returned: I(state=present)
+ contains:
+ amazon_side_asn:
+ description: ASN on the amazon side.
+ type: str
+ direct_connect_gateway_id:
+ description: The ID of the direct connect gateway.
+ type: str
+ direct_connect_gateway_name:
+ description: The name of the direct connect gateway.
+ type: str
+ direct_connect_gateway_state:
+ description: The state of the direct connect gateway.
+ type: str
+ owner_account:
+ description: The AWS account ID of the owner of the direct connect gateway.
+ type: str
+'''
+
+import time
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+def dx_gateway_info(client, gateway_id, module):
+ try:
+ resp = client.describe_direct_connect_gateways(
+ directConnectGatewayId=gateway_id)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to fetch gateway information.")
+ if resp['directConnectGateways']:
+ return resp['directConnectGateways'][0]
+
+
+def wait_for_status(client, module, gateway_id, virtual_gateway_id, status):
+ polling_increment_secs = 15
+ max_retries = 3
+ status_achieved = False
+
+ for x in range(0, max_retries):
+ try:
+ response = check_dxgw_association(
+ client,
+ module,
+ gateway_id=gateway_id,
+ virtual_gateway_id=virtual_gateway_id)
+ if response['directConnectGatewayAssociations']:
+ if response['directConnectGatewayAssociations'][0]['associationState'] == status:
+ status_achieved = True
+ break
+ else:
+ time.sleep(polling_increment_secs)
+ else:
+ status_achieved = True
+ break
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed while waiting for gateway association.")
+
+ result = response
+ return status_achieved, result
+
+
+def associate_direct_connect_gateway(client, module, gateway_id):
+ params = dict()
+ params['virtual_gateway_id'] = module.params.get('virtual_gateway_id')
+ try:
+ response = client.create_direct_connect_gateway_association(
+ directConnectGatewayId=gateway_id,
+ virtualGatewayId=params['virtual_gateway_id'])
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, 'Failed to associate gateway')
+
+ status_achieved, dxgw = wait_for_status(client, module, gateway_id, params['virtual_gateway_id'], 'associating')
+ if not status_achieved:
+ module.fail_json(msg='Error waiting for dxgw to attach to vpg - please check the AWS console')
+
+ result = response
+ return result
+
+
+def delete_association(client, module, gateway_id, virtual_gateway_id):
+ try:
+ response = client.delete_direct_connect_gateway_association(
+ directConnectGatewayId=gateway_id,
+ virtualGatewayId=virtual_gateway_id)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete gateway association.")
+
+ status_achieved, dxgw = wait_for_status(client, module, gateway_id, virtual_gateway_id, 'disassociating')
+ if not status_achieved:
+ module.fail_json(msg='Error waiting for dxgw to detach from vpg - please check the AWS console')
+
+ result = response
+ return result
+
+
+def create_dx_gateway(client, module):
+ params = dict()
+ params['name'] = module.params.get('name')
+ params['amazon_asn'] = module.params.get('amazon_asn')
+ try:
+ response = client.create_direct_connect_gateway(
+ directConnectGatewayName=params['name'],
+ amazonSideAsn=int(params['amazon_asn']))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to create direct connect gateway.")
+
+ result = response
+ return result
+
+
+def find_dx_gateway(client, module, gateway_id=None):
+ params = dict()
+ gateways = list()
+ if gateway_id is not None:
+ params['directConnectGatewayId'] = gateway_id
+ while True:
+ try:
+ resp = client.describe_direct_connect_gateways(**params)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to describe gateways")
+ gateways.extend(resp['directConnectGateways'])
+ if 'nextToken' in resp:
+ params['nextToken'] = resp['nextToken']
+ else:
+ break
+ if gateways != []:
+ count = 0
+ for gateway in gateways:
+ if module.params.get('name') == gateway['directConnectGatewayName']:
+ count += 1
+ return gateway
+ return None
+
+
+def check_dxgw_association(client, module, gateway_id, virtual_gateway_id=None):
+ try:
+ if virtual_gateway_id is None:
+ resp = client.describe_direct_connect_gateway_associations(
+ directConnectGatewayId=gateway_id
+ )
+ else:
+ resp = client.describe_direct_connect_gateway_associations(
+ directConnectGatewayId=gateway_id,
+ virtualGatewayId=virtual_gateway_id,
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to check gateway association")
+ return resp
+
+
+def ensure_present(client, module):
+ # If an existing direct connect gateway matches our args
+ # then a match is considered to have been found and we will not create another dxgw.
+
+ changed = False
+ params = dict()
+ result = dict()
+ params['name'] = module.params.get('name')
+ params['amazon_asn'] = module.params.get('amazon_asn')
+ params['virtual_gateway_id'] = module.params.get('virtual_gateway_id')
+
+ # check if a gateway matching our module args already exists
+ existing_dxgw = find_dx_gateway(client, module)
+
+ if existing_dxgw is not None and existing_dxgw['directConnectGatewayState'] != 'deleted':
+ gateway_id = existing_dxgw['directConnectGatewayId']
+ # if a gateway_id was provided, check if it is attach to the DXGW
+ if params['virtual_gateway_id']:
+ resp = check_dxgw_association(
+ client,
+ module,
+ gateway_id=gateway_id,
+ virtual_gateway_id=params['virtual_gateway_id'])
+ if not resp["directConnectGatewayAssociations"]:
+ # attach the dxgw to the supplied virtual_gateway_id
+ associate_direct_connect_gateway(client, module, gateway_id)
+ changed = True
+ # if params['virtual_gateway_id'] is not provided, check the dxgw is attached to a VPG. If so, detach it.
+ else:
+ existing_dxgw = find_dx_gateway(client, module)
+
+ resp = check_dxgw_association(client, module, gateway_id=gateway_id)
+ if resp["directConnectGatewayAssociations"]:
+ for association in resp['directConnectGatewayAssociations']:
+ if association['associationState'] not in ['disassociating', 'disassociated']:
+ delete_association(
+ client,
+ module,
+ gateway_id=gateway_id,
+ virtual_gateway_id=association['virtualGatewayId'])
+ else:
+ # create a new dxgw
+ new_dxgw = create_dx_gateway(client, module)
+ changed = True
+ gateway_id = new_dxgw['directConnectGateway']['directConnectGatewayId']
+
+ # if a vpc-id was supplied, attempt to attach it to the dxgw
+ if params['virtual_gateway_id']:
+ associate_direct_connect_gateway(client, module, gateway_id)
+ resp = check_dxgw_association(client,
+ module,
+ gateway_id=gateway_id
+ )
+ if resp["directConnectGatewayAssociations"]:
+ changed = True
+
+ result = dx_gateway_info(client, gateway_id, module)
+ return changed, result
+
+
+def ensure_absent(client, module):
+ # If an existing direct connect gateway matches our args
+ # then a match is considered to have been found and we will not create another dxgw.
+
+ changed = False
+ result = dict()
+ dx_gateway_id = module.params.get('direct_connect_gateway_id')
+ existing_dxgw = find_dx_gateway(client, module, dx_gateway_id)
+ if existing_dxgw is not None:
+ resp = check_dxgw_association(client, module,
+ gateway_id=dx_gateway_id)
+ if resp["directConnectGatewayAssociations"]:
+ for association in resp['directConnectGatewayAssociations']:
+ if association['associationState'] not in ['disassociating', 'disassociated']:
+ delete_association(client, module,
+ gateway_id=dx_gateway_id,
+ virtual_gateway_id=association['virtualGatewayId'])
+ # wait for deleting association
+ timeout = time.time() + module.params.get('wait_timeout')
+ while time.time() < timeout:
+ resp = check_dxgw_association(client,
+ module,
+ gateway_id=dx_gateway_id)
+ if resp["directConnectGatewayAssociations"] != []:
+ time.sleep(15)
+ else:
+ break
+
+ try:
+ resp = client.delete_direct_connect_gateway(
+ directConnectGatewayId=dx_gateway_id
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete gateway")
+ result = resp['directConnectGateway']
+ return changed
+
+
+def main():
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ name=dict(),
+ amazon_asn=dict(),
+ virtual_gateway_id=dict(),
+ direct_connect_gateway_id=dict(),
+ wait_timeout=dict(type='int', default=320),
+ )
+ required_if = [('state', 'present', ['name', 'amazon_asn']),
+ ('state', 'absent', ['direct_connect_gateway_id'])]
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=required_if)
+
+ state = module.params.get('state')
+
+ try:
+ client = module.client('directconnect')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ if state == 'present':
+ (changed, results) = ensure_present(client, module)
+ elif state == 'absent':
+ changed = ensure_absent(client, module)
+ results = {}
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(results))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/directconnect_link_aggregation_group.py b/ansible_collections/community/aws/plugins/modules/directconnect_link_aggregation_group.py
new file mode 100644
index 000000000..cc7122712
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/directconnect_link_aggregation_group.py
@@ -0,0 +1,466 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: directconnect_link_aggregation_group
+version_added: 1.0.0
+short_description: Manage Direct Connect LAG bundles
+description:
+ - Create, delete, or modify a Direct Connect link aggregation group.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_direct_connect_link_aggregation_group).
+ The usage did not change.
+author:
+ - "Sloane Hertel (@s-hertel)"
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+options:
+ state:
+ description:
+ - The state of the Direct Connect link aggregation group.
+ choices:
+ - present
+ - absent
+ type: str
+ required: true
+ name:
+ description:
+ - The name of the Direct Connect link aggregation group.
+ type: str
+ link_aggregation_group_id:
+ description:
+ - The ID of the Direct Connect link aggregation group.
+ type: str
+ num_connections:
+ description:
+ - The number of connections with which to initialize the link aggregation group.
+ type: int
+ min_links:
+ description:
+ - The minimum number of physical connections that must be operational for the LAG itself to be operational.
+ type: int
+ location:
+ description:
+ - The location of the link aggregation group.
+ type: str
+ bandwidth:
+ description:
+ - The bandwidth of the link aggregation group.
+ type: str
+ force_delete:
+ description:
+ - This allows the minimum number of links to be set to 0, any hosted connections disassociated,
+ and any virtual interfaces associated to the LAG deleted.
+ type: bool
+ default: false
+ connection_id:
+ description:
+ - A connection ID to link with the link aggregation group upon creation.
+ type: str
+ delete_with_disassociation:
+ description:
+ - To be used with I(state=absent) to delete connections after disassociating them with the LAG.
+ type: bool
+ default: false
+ wait:
+ description:
+ - Whether or not to wait for the operation to complete.
+ - May be useful when waiting for virtual interfaces to be deleted.
+ - The time to wait can be controlled by setting I(wait_timeout).
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - The duration in seconds to wait if I(wait=true).
+ default: 120
+ type: int
+'''
+
+EXAMPLES = """
+
+# create a Direct Connect connection
+- community.aws.directconnect_link_aggregation_group:
+ state: present
+ location: EqDC2
+ lag_id: dxlag-xxxxxxxx
+ bandwidth: 1Gbps
+"""
+
+RETURN = """
+changed:
+ type: str
+ description: Whether or not the LAG has changed.
+ returned: always
+aws_device:
+ type: str
+ description: The AWS Direct Connection endpoint that hosts the LAG.
+ sample: "EqSe2-1bwfvazist2k0"
+ returned: when I(state=present)
+connections:
+ type: list
+ description: A list of connections bundled by this LAG.
+ sample:
+ "connections": [
+ {
+ "aws_device": "EqSe2-1bwfvazist2k0",
+ "bandwidth": "1Gbps",
+ "connection_id": "dxcon-fgzjah5a",
+ "connection_name": "Requested Connection 1 for Lag dxlag-fgtoh97h",
+ "connection_state": "down",
+ "lag_id": "dxlag-fgnsp4rq",
+ "location": "EqSe2",
+ "owner_account": "123456789012",
+ "region": "us-west-2"
+ }
+ ]
+ returned: when I(state=present)
+connections_bandwidth:
+ type: str
+ description: The individual bandwidth of the physical connections bundled by the LAG.
+ sample: "1Gbps"
+ returned: when I(state=present)
+lag_id:
+ type: str
+ description: Unique identifier for the link aggregation group.
+ sample: "dxlag-fgnsp4rq"
+ returned: when I(state=present)
+lag_name:
+ type: str
+ description: User-provided name for the link aggregation group.
+ returned: when I(state=present)
+lag_state:
+ type: str
+ description: State of the LAG.
+ sample: "pending"
+ returned: when I(state=present)
+location:
+ type: str
+ description: Where the connection is located.
+ sample: "EqSe2"
+ returned: when I(state=present)
+minimum_links:
+ type: int
+ description: The minimum number of physical connections that must be operational for the LAG itself to be operational.
+ returned: when I(state=present)
+number_of_connections:
+ type: int
+ description: The number of physical connections bundled by the LAG.
+ returned: when I(state=present)
+owner_account:
+ type: str
+ description: Owner account ID of the LAG.
+ returned: when I(state=present)
+region:
+ type: str
+ description: The region in which the LAG exists.
+ returned: when I(state=present)
+"""
+
+import traceback
+import time
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError
+from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_connection
+from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_virtual_interface
+from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import disassociate_connection_and_lag
+
+
+def lag_status(client, lag_id):
+ return lag_exists(client, lag_id=lag_id, lag_name=None, verify=False)
+
+
+def lag_exists(client, lag_id=None, lag_name=None, verify=True):
+ """ If verify=True, returns the LAG ID or None
+ If verify=False, returns the LAG's data (or an empty dict)
+ """
+ try:
+ if lag_id:
+ response = client.describe_lags(lagId=lag_id)
+ else:
+ response = client.describe_lags()
+ except botocore.exceptions.ClientError as e:
+ if lag_id and verify:
+ return False
+ elif lag_id:
+ return {}
+ else:
+ failed_op = "Failed to describe DirectConnect link aggregation groups."
+ raise DirectConnectError(msg=failed_op,
+ last_traceback=traceback.format_exc(),
+ exception=e)
+
+ match = [] # List of LAG IDs that are exact matches
+ lag = [] # List of LAG data that are exact matches
+
+ # look for matching connections
+ if len(response.get('lags', [])) == 1 and lag_id:
+ if response['lags'][0]['lagState'] != 'deleted':
+ match.append(response['lags'][0]['lagId'])
+ lag.append(response['lags'][0])
+ else:
+ for each in response.get('lags', []):
+ if each['lagState'] != 'deleted':
+ if not lag_id:
+ if lag_name == each['lagName']:
+ match.append(each['lagId'])
+ else:
+ match.append(each['lagId'])
+
+ # verifying if the connections exists; if true, return connection identifier, otherwise return False
+ if verify and len(match) == 1:
+ return match[0]
+ elif verify:
+ return False
+
+ # not verifying if the connection exists; just return current connection info
+ else:
+ if len(lag) == 1:
+ return lag[0]
+ else:
+ return {}
+
+
+def create_lag(client, num_connections, location, bandwidth, name, connection_id):
+ if not name:
+ raise DirectConnectError(msg="Failed to create a Direct Connect link aggregation group: name required.",
+ last_traceback=None,
+ exception="")
+
+ parameters = dict(numberOfConnections=num_connections,
+ location=location,
+ connectionsBandwidth=bandwidth,
+ lagName=name)
+ if connection_id:
+ parameters.update(connectionId=connection_id)
+ try:
+ lag = client.create_lag(**parameters)
+ except botocore.exceptions.ClientError as e:
+ raise DirectConnectError(msg="Failed to create DirectConnect link aggregation group {0}".format(name),
+ last_traceback=traceback.format_exc(),
+ exception=e)
+
+ return lag['lagId']
+
+
+def delete_lag(client, lag_id):
+ try:
+ client.delete_lag(lagId=lag_id)
+ except botocore.exceptions.ClientError as e:
+ raise DirectConnectError(msg="Failed to delete Direct Connect link aggregation group {0}.".format(lag_id),
+ last_traceback=traceback.format_exc(),
+ exception=e)
+
+
+@AWSRetry.jittered_backoff(retries=5, delay=2, backoff=2.0, catch_extra_error_codes=['DirectConnectClientException'])
+def _update_lag(client, lag_id, lag_name, min_links):
+ params = {}
+ if min_links:
+ params.update(minimumLinks=min_links)
+ if lag_name:
+ params.update(lagName=lag_name)
+
+ client.update_lag(lagId=lag_id, **params)
+
+
+def update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_timeout):
+ start = time.time()
+
+ if min_links and min_links > num_connections:
+ raise DirectConnectError(
+ msg="The number of connections {0} must be greater than the minimum number of links "
+ "{1} to update the LAG {2}".format(num_connections, min_links, lag_id),
+ last_traceback=None,
+ exception=None
+ )
+
+ while True:
+ try:
+ _update_lag(client, lag_id, lag_name, min_links)
+ except botocore.exceptions.ClientError as e:
+ if wait and time.time() - start <= wait_timeout:
+ continue
+ msg = "Failed to update Direct Connect link aggregation group {0}.".format(lag_id)
+ if "MinimumLinks cannot be set higher than the number of connections" in e.response['Error']['Message']:
+ msg += "Unable to set the min number of links to {0} while the LAG connections are being requested".format(min_links)
+ raise DirectConnectError(msg=msg,
+ last_traceback=traceback.format_exc(),
+ exception=e)
+ else:
+ break
+
+
+def lag_changed(current_status, name, min_links):
+ """ Determines if a modifiable link aggregation group attribute has been modified. """
+ return (name and name != current_status['lagName']) or (min_links and min_links != current_status['minimumLinks'])
+
+
+def ensure_present(client, num_connections, lag_id, lag_name, location, bandwidth, connection_id, min_links, wait, wait_timeout):
+ exists = lag_exists(client, lag_id, lag_name)
+ if not exists and lag_id:
+ raise DirectConnectError(msg="The Direct Connect link aggregation group {0} does not exist.".format(lag_id),
+ last_traceback=None,
+ exception="")
+
+ # the connection is found; get the latest state and see if it needs to be updated
+ if exists:
+ lag_id = exists
+ latest_state = lag_status(client, lag_id)
+ if lag_changed(latest_state, lag_name, min_links):
+ update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_timeout)
+ return True, lag_id
+ return False, lag_id
+
+ # no connection found; create a new one
+ else:
+ lag_id = create_lag(client, num_connections, location, bandwidth, lag_name, connection_id)
+ update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_timeout)
+ return True, lag_id
+
+
+def describe_virtual_interfaces(client, lag_id):
+ try:
+ response = client.describe_virtual_interfaces(connectionId=lag_id)
+ except botocore.exceptions.ClientError as e:
+ raise DirectConnectError(msg="Failed to describe any virtual interfaces associated with LAG: {0}".format(lag_id),
+ last_traceback=traceback.format_exc(),
+ exception=e)
+ return response.get('virtualInterfaces', [])
+
+
+def get_connections_and_virtual_interfaces(client, lag_id):
+ virtual_interfaces = describe_virtual_interfaces(client, lag_id)
+ connections = lag_status(client, lag_id=lag_id).get('connections', [])
+ return virtual_interfaces, connections
+
+
+def disassociate_vis(client, lag_id, virtual_interfaces):
+ for vi in virtual_interfaces:
+ delete_virtual_interface(client, vi['virtualInterfaceId'])
+ try:
+ response = client.delete_virtual_interface(virtualInterfaceId=vi['virtualInterfaceId'])
+ except botocore.exceptions.ClientError as e:
+ raise DirectConnectError(msg="Could not delete virtual interface {0} to delete link aggregation group {1}.".format(vi, lag_id),
+ last_traceback=traceback.format_exc(),
+ exception=e)
+
+
+def ensure_absent(client, lag_id, lag_name, force_delete, delete_with_disassociation, wait, wait_timeout):
+ lag_id = lag_exists(client, lag_id, lag_name)
+ if not lag_id:
+ return False
+
+ latest_status = lag_status(client, lag_id)
+
+ # determine the associated connections and virtual interfaces to disassociate
+ virtual_interfaces, connections = get_connections_and_virtual_interfaces(client, lag_id)
+
+ # If min_links is not 0, there are associated connections, or if there are virtual interfaces, ask for force_delete
+ if any((latest_status['minimumLinks'], virtual_interfaces, connections)) and not force_delete:
+ raise DirectConnectError(msg="There are a minimum number of links, hosted connections, or associated virtual interfaces for LAG {0}. "
+ "To force deletion of the LAG use delete_force: True (if the LAG has virtual interfaces they will be deleted). "
+ "Optionally, to ensure hosted connections are deleted after disassociation use delete_with_disassociation: True "
+ "and wait: True (as Virtual Interfaces may take a few moments to delete)".format(lag_id),
+ last_traceback=None,
+ exception=None)
+
+ # update min_links to be 0 so we can remove the LAG
+ update_lag(client, lag_id, None, 0, len(connections), wait, wait_timeout)
+
+ # if virtual_interfaces and not delete_vi_with_disassociation: Raise failure; can't delete while vi attached
+ for connection in connections:
+ disassociate_connection_and_lag(client, connection['connectionId'], lag_id)
+ if delete_with_disassociation:
+ delete_connection(client, connection['connectionId'])
+
+ for vi in virtual_interfaces:
+ delete_virtual_interface(client, vi['virtualInterfaceId'])
+
+ start_time = time.time()
+ while True:
+ try:
+ delete_lag(client, lag_id)
+ except DirectConnectError as e:
+ if ('until its Virtual Interfaces are deleted' in e.exception) and (time.time() - start_time < wait_timeout) and wait:
+ continue
+ else:
+ return True
+
+
+def main():
+ argument_spec = dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(),
+ link_aggregation_group_id=dict(),
+ num_connections=dict(type='int'),
+ min_links=dict(type='int'),
+ location=dict(),
+ bandwidth=dict(),
+ connection_id=dict(),
+ delete_with_disassociation=dict(type='bool', default=False),
+ force_delete=dict(type='bool', default=False),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=120),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_one_of=[('link_aggregation_group_id', 'name')],
+ required_if=[('state', 'present', ('location', 'bandwidth'))],
+ )
+
+ try:
+ connection = module.client('directconnect')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ state = module.params.get('state')
+ response = {}
+ try:
+ if state == 'present':
+ changed, lag_id = ensure_present(connection,
+ num_connections=module.params.get("num_connections"),
+ lag_id=module.params.get("link_aggregation_group_id"),
+ lag_name=module.params.get("name"),
+ location=module.params.get("location"),
+ bandwidth=module.params.get("bandwidth"),
+ connection_id=module.params.get("connection_id"),
+ min_links=module.params.get("min_links"),
+ wait=module.params.get("wait"),
+ wait_timeout=module.params.get("wait_timeout"))
+ response = lag_status(connection, lag_id)
+ elif state == "absent":
+ changed = ensure_absent(connection,
+ lag_id=module.params.get("link_aggregation_group_id"),
+ lag_name=module.params.get("name"),
+ force_delete=module.params.get("force_delete"),
+ delete_with_disassociation=module.params.get("delete_with_disassociation"),
+ wait=module.params.get('wait'),
+ wait_timeout=module.params.get('wait_timeout'))
+ except DirectConnectError as e:
+ if e.last_traceback:
+ module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception))
+ else:
+ module.fail_json(msg=e.msg)
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/directconnect_virtual_interface.py b/ansible_collections/community/aws/plugins/modules/directconnect_virtual_interface.py
new file mode 100644
index 000000000..059cd7425
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/directconnect_virtual_interface.py
@@ -0,0 +1,519 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: directconnect_virtual_interface
+version_added: 1.0.0
+short_description: Manage Direct Connect virtual interfaces
+description:
+ - Create, delete, or modify a Direct Connect public or private virtual interface.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_direct_connect_virtual_interface).
+ The usage did not change.
+author:
+ - "Sloane Hertel (@s-hertel)"
+options:
+ state:
+ description:
+ - The desired state of the Direct Connect virtual interface.
+ choices: [present, absent]
+ type: str
+ required: true
+ id_to_associate:
+ description:
+ - The ID of the link aggregation group or connection to associate with the virtual interface.
+ aliases: [link_aggregation_group_id, connection_id]
+ type: str
+ required: true
+ public:
+ description:
+ - The type of virtual interface.
+ type: bool
+ name:
+ description:
+ - The name of the virtual interface.
+ type: str
+ vlan:
+ description:
+ - The VLAN ID.
+ default: 100
+ type: int
+ bgp_asn:
+ description:
+ - The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration.
+ default: 65000
+ type: int
+ authentication_key:
+ description:
+ - The authentication key for BGP configuration.
+ type: str
+ amazon_address:
+ description:
+ - The amazon address CIDR with which to create the virtual interface.
+ type: str
+ customer_address:
+ description:
+ - The customer address CIDR with which to create the virtual interface.
+ type: str
+ address_type:
+ description:
+ - The type of IP address for the BGP peer.
+ type: str
+ cidr:
+ description:
+ - A list of route filter prefix CIDRs with which to create the public virtual interface.
+ type: list
+ elements: str
+ virtual_gateway_id:
+ description:
+ - The virtual gateway ID required for creating a private virtual interface.
+ - To create a private virtual interface I(virtual_gateway_id) or I(direct_connect_gateway_id) is required.
+ These options are mutually exclusive.
+ type: str
+ direct_connect_gateway_id:
+ description:
+ - The direct connect gateway ID for creating a private virtual interface.
+ - To create a private virtual interface I(virtual_gateway_id) or I(direct_connect_gateway_id) is required.
+ These options are mutually exclusive.
+ type: str
+ virtual_interface_id:
+ description:
+ - The virtual interface ID.
+ type: str
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+RETURN = r'''
+address_family:
+ description: The address family for the BGP peer.
+ returned: always
+ type: str
+ sample: ipv4
+amazon_address:
+ description: IP address assigned to the Amazon interface.
+ returned: always
+ type: str
+ sample: 169.254.255.1/30
+asn:
+ description: The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration.
+ returned: always
+ type: int
+ sample: 65000
+auth_key:
+ description: The authentication key for BGP configuration.
+ returned: always
+ type: str
+ sample: 0xZ59Y1JZ2oDOSh6YriIlyRE
+bgp_peers:
+ description: A list of the BGP peers configured on this virtual interface.
+ returned: always
+ type: complex
+ contains:
+ address_family:
+ description: The address family for the BGP peer.
+ returned: always
+ type: str
+ sample: ipv4
+ amazon_address:
+ description: IP address assigned to the Amazon interface.
+ returned: always
+ type: str
+ sample: 169.254.255.1/30
+ asn:
+ description: The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration.
+ returned: always
+ type: int
+ sample: 65000
+ auth_key:
+ description: The authentication key for BGP configuration.
+ returned: always
+ type: str
+ sample: 0xZ59Y1JZ2oDOSh6YriIlyRE
+ bgp_peer_state:
+ description: The state of the BGP peer (verifying, pending, available)
+ returned: always
+ type: str
+ sample: available
+ bgp_status:
+ description: The up/down state of the BGP peer.
+ returned: always
+ type: str
+ sample: up
+ customer_address:
+ description: IP address assigned to the customer interface.
+ returned: always
+ type: str
+ sample: 169.254.255.2/30
+changed:
+ description: Indicated if the virtual interface has been created/modified/deleted
+ returned: always
+ type: bool
+ sample: false
+connection_id:
+ description:
+ - The ID of the connection. This field is also used as the ID type for operations that
+ use multiple connection types (LAG, interconnect, and/or connection).
+ returned: always
+ type: str
+ sample: dxcon-fgb175av
+customer_address:
+ description: IP address assigned to the customer interface.
+ returned: always
+ type: str
+ sample: 169.254.255.2/30
+customer_router_config:
+ description: Information for generating the customer router configuration.
+ returned: always
+ type: str
+location:
+ description: Where the connection is located.
+ returned: always
+ type: str
+ sample: EqDC2
+owner_account:
+ description: The AWS account that will own the new virtual interface.
+ returned: always
+ type: str
+ sample: '123456789012'
+route_filter_prefixes:
+ description: A list of routes to be advertised to the AWS network in this region (public virtual interface).
+ returned: always
+ type: complex
+ contains:
+ cidr:
+ description: A routes to be advertised to the AWS network in this region.
+ returned: always
+ type: str
+ sample: 54.227.92.216/30
+virtual_gateway_id:
+ description: The ID of the virtual private gateway to a VPC. This only applies to private virtual interfaces.
+ returned: when I(public=False)
+ type: str
+ sample: vgw-f3ce259a
+direct_connect_gateway_id:
+ description: The ID of the Direct Connect gateway. This only applies to private virtual interfaces.
+ returned: when I(public=False)
+ type: str
+ sample: f7593767-eded-44e8-926d-a2234175835d
+virtual_interface_id:
+ description: The ID of the virtual interface.
+ returned: always
+ type: str
+ sample: dxvif-fh0w7cex
+virtual_interface_name:
+ description: The name of the virtual interface assigned by the customer.
+ returned: always
+ type: str
+ sample: test_virtual_interface
+virtual_interface_state:
+ description: State of the virtual interface (confirming, verifying, pending, available, down, rejected).
+ returned: always
+ type: str
+ sample: available
+virtual_interface_type:
+ description: The type of virtual interface (private, public).
+ returned: always
+ type: str
+ sample: private
+vlan:
+ description: The VLAN ID.
+ returned: always
+ type: int
+ sample: 100
+'''
+
+EXAMPLES = r'''
+---
+- name: create an association between a LAG and connection
+ community.aws.directconnect_virtual_interface:
+ state: present
+ name: "{{ name }}"
+ link_aggregation_group_id: LAG-XXXXXXXX
+ connection_id: dxcon-XXXXXXXX
+
+- name: remove an association between a connection and virtual interface
+ community.aws.directconnect_virtual_interface:
+ state: absent
+ connection_id: dxcon-XXXXXXXX
+ virtual_interface_id: dxv-XXXXXXXX
+
+'''
+
+import traceback
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ # handled by AnsibleAWSModule
+ pass
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError
+from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_virtual_interface
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+def try_except_ClientError(failure_msg):
+ '''
+ Wrapper for boto3 calls that uses AWSRetry and handles exceptions
+ '''
+ def wrapper(f):
+ def run_func(*args, **kwargs):
+ try:
+ result = AWSRetry.jittered_backoff(retries=8, delay=5, catch_extra_error_codes=['DirectConnectClientException'])(f)(*args, **kwargs)
+ except (ClientError, BotoCoreError) as e:
+ raise DirectConnectError(failure_msg, traceback.format_exc(), e)
+ return result
+ return run_func
+ return wrapper
+
+
+def find_unique_vi(client, connection_id, virtual_interface_id, name):
+ '''
+ Determines if the virtual interface exists. Returns the virtual interface ID if an exact match is found.
+ If multiple matches are found False is returned. If no matches are found None is returned.
+ '''
+
+ # Get the virtual interfaces, filtering by the ID if provided.
+ vi_params = {}
+ if virtual_interface_id:
+ vi_params = {'virtualInterfaceId': virtual_interface_id}
+
+ virtual_interfaces = try_except_ClientError(
+ failure_msg="Failed to describe virtual interface")(
+ client.describe_virtual_interfaces)(**vi_params).get('virtualInterfaces')
+
+ # Remove deleting/deleted matches from the results.
+ virtual_interfaces = [vi for vi in virtual_interfaces if vi['virtualInterfaceState'] not in ('deleting', 'deleted')]
+
+ matching_virtual_interfaces = filter_virtual_interfaces(virtual_interfaces, name, connection_id)
+ return exact_match(matching_virtual_interfaces)
+
+
+def exact_match(virtual_interfaces):
+ '''
+ Returns the virtual interface ID if one was found,
+ None if the virtual interface ID needs to be created,
+ False if an exact match was not found
+ '''
+
+ if not virtual_interfaces:
+ return None
+ if len(virtual_interfaces) == 1:
+ return virtual_interfaces[0]['virtualInterfaceId']
+ else:
+ return False
+
+
+def filter_virtual_interfaces(virtual_interfaces, name, connection_id):
+ '''
+ Filters the available virtual interfaces to try to find a unique match
+ '''
+ # Filter by name if provided.
+ if name:
+ matching_by_name = find_virtual_interface_by_name(virtual_interfaces, name)
+ if len(matching_by_name) == 1:
+ return matching_by_name
+ else:
+ matching_by_name = virtual_interfaces
+
+ # If there isn't a unique match filter by connection ID as last resort (because connection_id may be a connection yet to be associated)
+ if connection_id and len(matching_by_name) > 1:
+ matching_by_connection_id = find_virtual_interface_by_connection_id(matching_by_name, connection_id)
+ if len(matching_by_connection_id) == 1:
+ return matching_by_connection_id
+ else:
+ matching_by_connection_id = matching_by_name
+
+ return matching_by_connection_id
+
+
+def find_virtual_interface_by_connection_id(virtual_interfaces, connection_id):
+ '''
+ Return virtual interfaces that have the connection_id associated
+ '''
+ return [vi for vi in virtual_interfaces if vi['connectionId'] == connection_id]
+
+
+def find_virtual_interface_by_name(virtual_interfaces, name):
+ '''
+ Return virtual interfaces that match the provided name
+ '''
+ return [vi for vi in virtual_interfaces if vi['virtualInterfaceName'] == name]
+
+
+def vi_state(client, virtual_interface_id):
+ '''
+ Returns the state of the virtual interface.
+ '''
+ err_msg = "Failed to describe virtual interface: {0}".format(virtual_interface_id)
+ vi = try_except_ClientError(failure_msg=err_msg)(client.describe_virtual_interfaces)(virtualInterfaceId=virtual_interface_id)
+ return vi['virtualInterfaces'][0]
+
+
+def assemble_params_for_creating_vi(params):
+ '''
+ Returns kwargs to use in the call to create the virtual interface
+
+ Params for public virtual interfaces:
+ virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, cidr
+ Params for private virtual interfaces:
+ virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, virtualGatewayId
+ '''
+
+ public = params['public']
+ name = params['name']
+ vlan = params['vlan']
+ bgp_asn = params['bgp_asn']
+ auth_key = params['authentication_key']
+ amazon_addr = params['amazon_address']
+ customer_addr = params['customer_address']
+ family_addr = params['address_type']
+ cidr = params['cidr']
+ virtual_gateway_id = params['virtual_gateway_id']
+ direct_connect_gateway_id = params['direct_connect_gateway_id']
+
+ parameters = dict(virtualInterfaceName=name, vlan=vlan, asn=bgp_asn)
+ opt_params = dict(authKey=auth_key, amazonAddress=amazon_addr, customerAddress=customer_addr, addressFamily=family_addr)
+
+ for name, value in opt_params.items():
+ if value:
+ parameters[name] = value
+
+ # virtual interface type specific parameters
+ if public and cidr:
+ parameters['routeFilterPrefixes'] = [{'cidr': c} for c in cidr]
+ if not public:
+ if virtual_gateway_id:
+ parameters['virtualGatewayId'] = virtual_gateway_id
+ elif direct_connect_gateway_id:
+ parameters['directConnectGatewayId'] = direct_connect_gateway_id
+
+ return parameters
+
+
+def create_vi(client, public, associated_id, creation_params):
+ '''
+ :param public: a boolean
+ :param associated_id: a link aggregation group ID or connection ID to associate
+ with the virtual interface.
+ :param creation_params: a dict of parameters to use in the AWS SDK call
+ :return The ID of the created virtual interface
+ '''
+ err_msg = "Failed to create virtual interface"
+ if public:
+ vi = try_except_ClientError(failure_msg=err_msg)(client.create_public_virtual_interface)(connectionId=associated_id,
+ newPublicVirtualInterface=creation_params)
+ else:
+ vi = try_except_ClientError(failure_msg=err_msg)(client.create_private_virtual_interface)(connectionId=associated_id,
+ newPrivateVirtualInterface=creation_params)
+ return vi['virtualInterfaceId']
+
+
+def modify_vi(client, virtual_interface_id, connection_id):
+ '''
+ Associate a new connection ID
+ '''
+ err_msg = "Unable to associate {0} with virtual interface {1}".format(connection_id, virtual_interface_id)
+ try_except_ClientError(failure_msg=err_msg)(client.associate_virtual_interface)(virtualInterfaceId=virtual_interface_id,
+ connectionId=connection_id)
+
+
+def needs_modification(client, virtual_interface_id, connection_id):
+ '''
+ Determine if the associated connection ID needs to be updated
+ '''
+ return vi_state(client, virtual_interface_id).get('connectionId') != connection_id
+
+
+def ensure_state(connection, module):
+ changed = False
+
+ state = module.params['state']
+ connection_id = module.params['id_to_associate']
+ public = module.params['public']
+ name = module.params['name']
+
+ virtual_interface_id = find_unique_vi(connection, connection_id, module.params.get('virtual_interface_id'), name)
+
+ if virtual_interface_id is False:
+ module.fail_json(msg="Multiple virtual interfaces were found. Use the virtual_interface_id, name, "
+ "and connection_id options if applicable to find a unique match.")
+
+ if state == 'present':
+
+ if not virtual_interface_id and module.params['virtual_interface_id']:
+ module.fail_json(msg="The virtual interface {0} does not exist.".format(module.params['virtual_interface_id']))
+
+ elif not virtual_interface_id:
+ assembled_params = assemble_params_for_creating_vi(module.params)
+ virtual_interface_id = create_vi(connection, public, connection_id, assembled_params)
+ changed = True
+
+ if needs_modification(connection, virtual_interface_id, connection_id):
+ modify_vi(connection, virtual_interface_id, connection_id)
+ changed = True
+
+ latest_state = vi_state(connection, virtual_interface_id)
+
+ else:
+ if virtual_interface_id:
+ delete_virtual_interface(connection, virtual_interface_id)
+ changed = True
+
+ latest_state = {}
+
+ return changed, latest_state
+
+
+def main():
+ argument_spec = dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ id_to_associate=dict(required=True, aliases=['link_aggregation_group_id', 'connection_id']),
+ public=dict(type='bool'),
+ name=dict(),
+ vlan=dict(type='int', default=100),
+ bgp_asn=dict(type='int', default=65000),
+ authentication_key=dict(no_log=True),
+ amazon_address=dict(),
+ customer_address=dict(),
+ address_type=dict(),
+ cidr=dict(type='list', elements='str'),
+ virtual_gateway_id=dict(),
+ direct_connect_gateway_id=dict(),
+ virtual_interface_id=dict()
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_one_of=[['virtual_interface_id', 'name']],
+ required_if=[['state', 'present', ['public']],
+ ['public', True, ['amazon_address']],
+ ['public', True, ['customer_address']],
+ ['public', True, ['cidr']]],
+ mutually_exclusive=[['virtual_gateway_id', 'direct_connect_gateway_id']])
+
+ connection = module.client('directconnect')
+
+ try:
+ changed, latest_state = ensure_state(connection, module)
+ except DirectConnectError as e:
+ if e.exception:
+ module.fail_json_aws(exception=e.exception, msg=e.msg)
+ else:
+ module.fail_json(msg=e.msg)
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(latest_state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/dms_endpoint.py b/ansible_collections/community/aws/plugins/modules/dms_endpoint.py
new file mode 100644
index 000000000..fb899d669
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/dms_endpoint.py
@@ -0,0 +1,698 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dms_endpoint
+version_added: 1.0.0
+short_description: Creates or destroys a data migration services endpoint
+description:
+ - Creates or destroys a data migration services endpoint,
+ that can be used to replicate data.
+options:
+ state:
+ description:
+ - State of the endpoint.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ endpointidentifier:
+ description:
+ - An identifier name for the endpoint.
+ type: str
+ required: true
+ endpointtype:
+ description:
+ - Type of endpoint we want to manage.
+ - Required when I(state=present).
+ choices: ['source', 'target']
+ type: str
+ enginename:
+ description:
+ - Database engine that we want to use, please refer to
+ the AWS DMS for more information on the supported
+ engines and their limitations.
+ - Required when I(state=present).
+ choices: ['mysql', 'oracle', 'postgres', 'mariadb', 'aurora',
+ 'redshift', 's3', 'db2', 'azuredb', 'sybase',
+ 'dynamodb', 'mongodb', 'sqlserver']
+ type: str
+ username:
+ description:
+ - Username our endpoint will use to connect to the database.
+ type: str
+ password:
+ description:
+ - Password used to connect to the database
+ this attribute can only be written
+ the AWS API does not return this parameter.
+ type: str
+ servername:
+ description:
+ - Servername that the endpoint will connect to.
+ type: str
+ port:
+ description:
+ - TCP port for access to the database.
+ type: int
+ databasename:
+ description:
+ - Name for the database on the origin or target side.
+ type: str
+ extraconnectionattributes:
+ description:
+ - Extra attributes for the database connection, the AWS documentation
+ states " For more information about extra connection attributes,
+ see the documentation section for your data store."
+ type: str
+ kmskeyid:
+ description:
+ - Encryption key to use to encrypt replication storage and
+ connection information.
+ type: str
+ tags:
+ description:
+ - A list of tags to add to the endpoint.
+ type: dict
+ certificatearn:
+ description:
+ - Amazon Resource Name (ARN) for the certificate.
+ type: str
+ sslmode:
+ description:
+ - Mode used for the SSL connection.
+ default: none
+ choices: ['none', 'require', 'verify-ca', 'verify-full']
+ type: str
+ serviceaccessrolearn:
+ description:
+ - Amazon Resource Name (ARN) for the service access role that you
+ want to use to create the endpoint.
+ type: str
+ externaltabledefinition:
+ description:
+ - The external table definition.
+ type: str
+ dynamodbsettings:
+ description:
+ - Settings in JSON format for the target Amazon DynamoDB endpoint
+ if source or target is dynamodb.
+ type: dict
+ s3settings:
+ description:
+ - S3 buckets settings for the target Amazon S3 endpoint.
+ type: dict
+ dmstransfersettings:
+ description:
+ - The settings in JSON format for the DMS transfer type of
+ source endpoint.
+ type: dict
+ mongodbsettings:
+ description:
+ - Settings in JSON format for the source MongoDB endpoint.
+ type: dict
+ kinesissettings:
+ description:
+ - Settings in JSON format for the target Amazon Kinesis
+ Data Streams endpoint.
+ type: dict
+ elasticsearchsettings:
+ description:
+ - Settings in JSON format for the target Elasticsearch endpoint.
+ type: dict
+ wait:
+ description:
+ - Whether Ansible should wait for the object to be deleted when I(state=absent).
+ type: bool
+ default: false
+ timeout:
+ description:
+ - Time in seconds we should wait for when deleting a resource.
+ - Required when I(wait=true).
+ type: int
+ retries:
+ description:
+ - number of times we should retry when deleting a resource
+ - Required when I(wait=true).
+ type: int
+author:
+ - "Rui Moreira (@ruimoreira)"
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+ - amazon.aws.tags
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details
+- name: Endpoint Creation
+ community.aws.dms_endpoint:
+ state: absent
+ endpointidentifier: 'testsource'
+ endpointtype: source
+ enginename: aurora
+ username: testing1
+ password: testint1234
+ servername: testing.domain.com
+ port: 3306
+ databasename: 'testdb'
+ sslmode: none
+ wait: false
+'''
+
+RETURN = '''
+endpoint:
+ description:
+ - A description of the DMS endpoint.
+ returned: success
+ type: dict
+ contains:
+ database_name:
+ description:
+ - The name of the database at the endpoint.
+ type: str
+ returned: success
+ example: "exampledb"
+ endpoint_arn:
+ description:
+ - The ARN that uniquely identifies the endpoint.
+ type: str
+ returned: success
+ example: "arn:aws:dms:us-east-1:123456789012:endpoint:1234556789ABCDEFGHIJKLMNOPQRSTUVWXYZ012"
+ endpoint_identifier:
+ description:
+ - The database endpoint identifier.
+ type: str
+ returned: success
+ example: "ansible-test-12345678-dms"
+ endpoint_type:
+ description:
+ - The type of endpoint. Valid values are C(SOURCE) and C(TARGET).
+ type: str
+ returned: success
+ example: "SOURCE"
+ engine_display_name:
+ description:
+ - The expanded name for the engine name.
+ type: str
+ returned: success
+ example: "Amazon Aurora MySQL"
+ engine_name:
+ description:
+ - The database engine name.
+ type: str
+ returned: success
+ example: "aurora"
+ kms_key_id:
+ description:
+ - An KMS key ID that is used to encrypt the connection parameters for the endpoint.
+ type: str
+ returned: success
+ example: "arn:aws:kms:us-east-1:123456789012:key/01234567-abcd-12ab-98fe-123456789abc"
+ port:
+ description:
+ - The port used to access the endpoint.
+ type: str
+ returned: success
+ example: 3306
+ server_name:
+ description:
+ - The name of the server at the endpoint.
+ type: str
+ returned: success
+ example: "ansible-test-123456789.example.com"
+ ssl_mode:
+ description:
+ - The SSL mode used to connect to the endpoint.
+ type: str
+ returned: success
+ example: "none"
+ tags:
+ description:
+ - A dictionary representing the tags attached to the endpoint.
+ type: dict
+ returned: success
+ example: {"MyTagKey": "MyTagValue"}
+ username:
+ description:
+ - The user name used to connect to the endpoint.
+ type: str
+ returned: success
+ example: "example-username"
+ dms_transfer_settings:
+ description:
+ - Additional transfer related settings.
+ type: dict
+ returned: when additional DMS Transfer settings have been configured.
+ s3_settings:
+ description:
+ - Additional settings for S3 endpoints.
+ type: dict
+ returned: when the I(endpoint_type) is C(s3)
+ mongo_db_settings:
+ description:
+ - Additional settings for MongoDB endpoints.
+ type: dict
+ returned: when the I(endpoint_type) is C(mongodb)
+ kinesis_settings:
+ description:
+ - Additional settings for Kinesis endpoints.
+ type: dict
+ returned: when the I(endpoint_type) is C(kinesis)
+ kafka_settings:
+ description:
+ - Additional settings for Kafka endpoints.
+ type: dict
+ returned: when the I(endpoint_type) is C(kafka)
+ elasticsearch_settings:
+ description:
+ - Additional settings for Elasticsearch endpoints.
+ type: dict
+ returned: when the I(endpoint_type) is C(elasticsearch)
+ neptune_settings:
+ description:
+ - Additional settings for Amazon Neptune endpoints.
+ type: dict
+ returned: when the I(endpoint_type) is C(neptune)
+ redshift_settings:
+ description:
+ - Additional settings for Redshift endpoints.
+ type: dict
+ returned: when the I(endpoint_type) is C(redshift)
+ postgre_sql_settings:
+ description:
+ - Additional settings for PostgrSQL endpoints.
+ type: dict
+ returned: when the I(endpoint_type) is C(postgres)
+ my_sql_settings:
+ description:
+ - Additional settings for MySQL endpoints.
+ type: dict
+ returned: when the I(endpoint_type) is C(mysql)
+ oracle_settings:
+ description:
+ - Additional settings for Oracle endpoints.
+ type: dict
+ returned: when the I(endpoint_type) is C(oracle)
+ sybase_settings:
+ description:
+ - Additional settings for Sybase endpoints.
+ type: dict
+ returned: when the I(endpoint_type) is C(sybase)
+ microsoft_sql_server_settings:
+ description:
+ - Additional settings for Microsoft SQL Server endpoints.
+ type: dict
+ returned: when the I(endpoint_type) is C(sqlserver)
+ i_b_m_db_settings:
+ description:
+ - Additional settings for IBM DB2 endpoints.
+ type: dict
+ returned: when the I(endpoint_type) is C(db2)
+ doc_db_settings:
+ description:
+ - Additional settings for DocumentDB endpoints.
+ type: dict
+ returned: when the I(endpoint_type) is C(documentdb)
+ redis_settings:
+ description:
+ - Additional settings for Redis endpoints.
+ type: dict
+ returned: when the I(endpoint_type) is C(redshift)
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+backoff_params = dict(retries=5, delay=1, backoff=1.5)
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def dms_describe_tags(connection, **params):
+ """ checks if the endpoint exists """
+ tags = connection.list_tags_for_resource(**params).get('TagList', [])
+ return boto3_tag_list_to_ansible_dict(tags)
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def dms_describe_endpoints(connection, **params):
+ try:
+ endpoints = connection.describe_endpoints(**params)
+ except is_boto3_error_code('ResourceNotFoundFault'):
+ return None
+ return endpoints.get('Endpoints', None)
+
+
+def describe_endpoint(connection, endpoint_identifier):
+ """ checks if the endpoint exists """
+ endpoint_filter = dict(Name='endpoint-id',
+ Values=[endpoint_identifier])
+ try:
+ endpoints = dms_describe_endpoints(connection, Filters=[endpoint_filter])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe the DMS endpoint.")
+
+ if not endpoints:
+ return None
+
+ endpoint = endpoints[0]
+ try:
+ tags = dms_describe_tags(connection, ResourceArn=endpoint['EndpointArn'])
+ endpoint['tags'] = tags
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe the DMS endpoint tags")
+ return endpoint
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def dms_delete_endpoint(client, **params):
+ """deletes the DMS endpoint based on the EndpointArn"""
+ if module.params.get('wait'):
+ return delete_dms_endpoint(client)
+ else:
+ return client.delete_endpoint(**params)
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def dms_create_endpoint(client, **params):
+ """ creates the DMS endpoint"""
+ return client.create_endpoint(**params)
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def dms_modify_endpoint(client, **params):
+ """ updates the endpoint"""
+ return client.modify_endpoint(**params)
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def get_endpoint_deleted_waiter(client):
+ return client.get_waiter('endpoint_deleted')
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def dms_remove_tags(client, **params):
+ return client.remove_tags_from_resource(**params)
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def dms_add_tags(client, **params):
+ return client.add_tags_to_resource(**params)
+
+
+def endpoint_exists(endpoint):
+ """ Returns boolean based on the existence of the endpoint
+ :param endpoint: dict containing the described endpoint
+ :return: bool
+ """
+ return bool(len(endpoint['Endpoints']))
+
+
+def delete_dms_endpoint(connection, endpoint_arn):
+ try:
+ delete_arn = dict(
+ EndpointArn=endpoint_arn
+ )
+ if module.params.get('wait'):
+
+ delete_output = connection.delete_endpoint(**delete_arn)
+ delete_waiter = get_endpoint_deleted_waiter(connection)
+ delete_waiter.wait(
+ Filters=[{
+ 'Name': 'endpoint-arn',
+ 'Values': [endpoint_arn]
+
+ }],
+ WaiterConfig={
+ 'Delay': module.params.get('timeout'),
+ 'MaxAttempts': module.params.get('retries')
+ }
+ )
+ return delete_output
+ else:
+ return connection.delete_endpoint(**delete_arn)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to delete the DMS endpoint.")
+
+
+def create_module_params():
+ """
+ Reads the module parameters and returns a dict
+ :return: dict
+ """
+ endpoint_parameters = dict(
+ EndpointIdentifier=module.params.get('endpointidentifier'),
+ EndpointType=module.params.get('endpointtype'),
+ EngineName=module.params.get('enginename'),
+ Username=module.params.get('username'),
+ Password=module.params.get('password'),
+ ServerName=module.params.get('servername'),
+ Port=module.params.get('port'),
+ DatabaseName=module.params.get('databasename'),
+ SslMode=module.params.get('sslmode')
+ )
+ if module.params.get('EndpointArn'):
+ endpoint_parameters['EndpointArn'] = module.params.get('EndpointArn')
+ if module.params.get('certificatearn'):
+ endpoint_parameters['CertificateArn'] = \
+ module.params.get('certificatearn')
+
+ if module.params.get('dmstransfersettings'):
+ endpoint_parameters['DmsTransferSettings'] = \
+ module.params.get('dmstransfersettings')
+
+ if module.params.get('extraconnectionattributes'):
+ endpoint_parameters['ExtraConnectionAttributes'] =\
+ module.params.get('extraconnectionattributes')
+
+ if module.params.get('kmskeyid'):
+ endpoint_parameters['KmsKeyId'] = module.params.get('kmskeyid')
+
+ if module.params.get('tags'):
+ endpoint_parameters['Tags'] = module.params.get('tags')
+
+ if module.params.get('serviceaccessrolearn'):
+ endpoint_parameters['ServiceAccessRoleArn'] = \
+ module.params.get('serviceaccessrolearn')
+
+ if module.params.get('externaltabledefinition'):
+ endpoint_parameters['ExternalTableDefinition'] = \
+ module.params.get('externaltabledefinition')
+
+ if module.params.get('dynamodbsettings'):
+ endpoint_parameters['DynamoDbSettings'] = \
+ module.params.get('dynamodbsettings')
+
+ if module.params.get('s3settings'):
+ endpoint_parameters['S3Settings'] = module.params.get('s3settings')
+
+ if module.params.get('mongodbsettings'):
+ endpoint_parameters['MongoDbSettings'] = \
+ module.params.get('mongodbsettings')
+
+ if module.params.get('kinesissettings'):
+ endpoint_parameters['KinesisSettings'] = \
+ module.params.get('kinesissettings')
+
+ if module.params.get('elasticsearchsettings'):
+ endpoint_parameters['ElasticsearchSettings'] = \
+ module.params.get('elasticsearchsettings')
+
+ if module.params.get('wait'):
+ endpoint_parameters['wait'] = module.boolean(module.params.get('wait'))
+
+ if module.params.get('timeout'):
+ endpoint_parameters['timeout'] = module.params.get('timeout')
+
+ if module.params.get('retries'):
+ endpoint_parameters['retries'] = module.params.get('retries')
+
+ return endpoint_parameters
+
+
+def compare_params(param_described):
+ """
+ Compares the dict obtained from the describe DMS endpoint and
+ what we are reading from the values in the template We can
+ never compare the password as boto3's method for describing
+ a DMS endpoint does not return the value for
+ the password for security reasons ( I assume )
+ """
+ param_described = dict(param_described)
+ modparams = create_module_params()
+ # modify can't update tags
+ param_described.pop('Tags', None)
+ modparams.pop('Tags', None)
+ changed = False
+ for paramname in modparams:
+ if paramname == 'Password' or paramname in param_described \
+ and param_described[paramname] == modparams[paramname] or \
+ str(param_described[paramname]).lower() \
+ == modparams[paramname]:
+ pass
+ else:
+ changed = True
+ return changed
+
+
+def modify_dms_endpoint(connection, endpoint):
+ arn = endpoint['EndpointArn']
+ try:
+ params = create_module_params()
+ # modify can't update tags
+ params.pop('Tags', None)
+ return dms_modify_endpoint(connection, EndpointArn=arn, **params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to update DMS endpoint.", params=params)
+
+
+def ensure_tags(connection, endpoint):
+ desired_tags = module.params.get('tags', None)
+ if desired_tags is None:
+ return False
+
+ current_tags = endpoint.get('tags', {})
+
+ tags_to_add, tags_to_remove = compare_aws_tags(current_tags, desired_tags,
+ module.params.get('purge_tags'))
+
+ if not tags_to_remove and not tags_to_add:
+ return False
+
+ if module.check_mode:
+ return True
+
+ arn = endpoint.get('EndpointArn')
+
+ try:
+ if tags_to_remove:
+ dms_remove_tags(connection, ResourceArn=arn, TagKeys=tags_to_remove)
+ if tags_to_add:
+ tag_list = ansible_dict_to_boto3_tag_list(tags_to_add)
+ dms_add_tags(connection, ResourceArn=arn, Tags=tag_list)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to update DMS endpoint tags.")
+
+ return True
+
+
+def create_dms_endpoint(connection):
+ """
+ Function to create the dms endpoint
+ :param connection: boto3 aws connection
+ :return: information about the dms endpoint object
+ """
+
+ try:
+ params = create_module_params()
+ return dms_create_endpoint(connection, **params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to create DMS endpoint.")
+
+
+def main():
+ argument_spec = dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ endpointidentifier=dict(required=True),
+ endpointtype=dict(choices=['source', 'target']),
+ enginename=dict(choices=['mysql', 'oracle', 'postgres', 'mariadb',
+ 'aurora', 'redshift', 's3', 'db2', 'azuredb',
+ 'sybase', 'dynamodb', 'mongodb', 'sqlserver'],
+ required=False),
+ username=dict(),
+ password=dict(no_log=True),
+ servername=dict(),
+ port=dict(type='int'),
+ databasename=dict(),
+ extraconnectionattributes=dict(),
+ kmskeyid=dict(no_log=False),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ certificatearn=dict(),
+ sslmode=dict(choices=['none', 'require', 'verify-ca', 'verify-full'],
+ default='none'),
+ serviceaccessrolearn=dict(),
+ externaltabledefinition=dict(),
+ dynamodbsettings=dict(type='dict'),
+ s3settings=dict(type='dict'),
+ dmstransfersettings=dict(type='dict'),
+ mongodbsettings=dict(type='dict'),
+ kinesissettings=dict(type='dict'),
+ elasticsearchsettings=dict(type='dict'),
+ wait=dict(type='bool', default=False),
+ timeout=dict(type='int'),
+ retries=dict(type='int')
+ )
+ global module
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ["state", "present", ["endpointtype"]],
+ ["state", "present", ["enginename"]],
+ ["state", "absent", ["wait"]],
+ ["wait", "True", ["timeout"]],
+ ["wait", "True", ["retries"]],
+ ],
+ supports_check_mode=False
+ )
+ exit_message = None
+ changed = False
+
+ state = module.params.get('state')
+
+ dmsclient = module.client('dms')
+ endpoint = describe_endpoint(dmsclient,
+ module.params.get('endpointidentifier'))
+ if state == 'present':
+ if endpoint:
+ changed |= ensure_tags(dmsclient, endpoint)
+ params_changed = compare_params(endpoint)
+ if params_changed:
+ updated_dms = modify_dms_endpoint(dmsclient, endpoint)
+ exit_message = updated_dms
+ endpoint = exit_message.get('Endpoint')
+ changed = True
+ else:
+ exit_message = "Endpoint Already Exists"
+ else:
+ exit_message = create_dms_endpoint(dmsclient)
+ endpoint = exit_message.get('Endpoint')
+ changed = True
+
+ if changed:
+ # modify and create don't return tags
+ tags = dms_describe_tags(dmsclient, ResourceArn=endpoint['EndpointArn'])
+ endpoint['tags'] = tags
+ elif state == 'absent':
+ if endpoint:
+ delete_results = delete_dms_endpoint(dmsclient, endpoint['EndpointArn'])
+ exit_message = delete_results
+ endpoint = None
+ changed = True
+ else:
+ changed = False
+ exit_message = 'DMS Endpoint does not exist'
+
+ endpoint = camel_dict_to_snake_dict(endpoint or {}, ignore_list=['tags'])
+ module.exit_json(changed=changed, endpoint=endpoint, msg=exit_message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/dms_replication_subnet_group.py b/ansible_collections/community/aws/plugins/modules/dms_replication_subnet_group.py
new file mode 100644
index 000000000..fb5d59613
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/dms_replication_subnet_group.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dms_replication_subnet_group
+version_added: 1.0.0
+short_description: creates or destroys a data migration services subnet group
+description:
+ - Creates or destroys a data migration services subnet group.
+options:
+ state:
+ description:
+ - State of the subnet group.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ identifier:
+ description:
+ - The name for the replication subnet group.
+ This value is stored as a lowercase string.
+ Must contain no more than 255 alphanumeric characters,
+ periods, spaces, underscores, or hyphens. Must not be "default".
+ type: str
+ required: true
+ description:
+ description:
+ - The description for the subnet group.
+ type: str
+ required: true
+ subnet_ids:
+ description:
+ - A list containing the subnet ids for the replication subnet group,
+ needs to be at least 2 items in the list.
+ type: list
+ elements: str
+ required: true
+author:
+ - "Rui Moreira (@ruimoreira)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+- community.aws.dms_replication_subnet_group:
+ state: present
+ identifier: "dev-sngroup"
+ description: "Development Subnet Group asdasdas"
+ subnet_ids: ['subnet-id1','subnet-id2']
+'''
+
+RETURN = ''' # '''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+backoff_params = dict(retries=5, delay=1, backoff=1.5)
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def describe_subnet_group(connection, subnet_group):
+ """checks if instance exists"""
+ try:
+ subnet_group_filter = dict(Name='replication-subnet-group-id',
+ Values=[subnet_group])
+ return connection.describe_replication_subnet_groups(Filters=[subnet_group_filter])
+ except botocore.exceptions.ClientError:
+ return {'ReplicationSubnetGroups': []}
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def replication_subnet_group_create(connection, **params):
+ """ creates the replication subnet group """
+ return connection.create_replication_subnet_group(**params)
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def replication_subnet_group_modify(connection, **modify_params):
+ return connection.modify_replication_subnet_group(**modify_params)
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def replication_subnet_group_delete(module, connection):
+ subnetid = module.params.get('identifier')
+ delete_parameters = dict(ReplicationSubnetGroupIdentifier=subnetid)
+ return connection.delete_replication_subnet_group(**delete_parameters)
+
+
+def replication_subnet_exists(subnet):
+ """ Returns boolean based on the existence of the endpoint
+ :param endpoint: dict containing the described endpoint
+ :return: bool
+ """
+ return bool(len(subnet['ReplicationSubnetGroups']))
+
+
+def create_module_params(module):
+ """
+ Reads the module parameters and returns a dict
+ :return: dict
+ """
+ instance_parameters = dict(
+ # ReplicationSubnetGroupIdentifier gets translated to lower case anyway by the API
+ ReplicationSubnetGroupIdentifier=module.params.get('identifier').lower(),
+ ReplicationSubnetGroupDescription=module.params.get('description'),
+ SubnetIds=module.params.get('subnet_ids'),
+ )
+
+ return instance_parameters
+
+
+def compare_params(module, param_described):
+ """
+ Compares the dict obtained from the describe function and
+ what we are reading from the values in the template We can
+ never compare passwords as boto3's method for describing
+ a DMS endpoint does not return the value for
+ the password for security reasons ( I assume )
+ """
+ modparams = create_module_params(module)
+ changed = False
+ # need to sanitize values that get returned from the API
+ if 'VpcId' in param_described.keys():
+ param_described.pop('VpcId')
+ if 'SubnetGroupStatus' in param_described.keys():
+ param_described.pop('SubnetGroupStatus')
+ for paramname in modparams.keys():
+ if paramname in param_described.keys() and \
+ param_described.get(paramname) == modparams[paramname]:
+ pass
+ elif paramname == 'SubnetIds':
+ subnets = []
+ for subnet in param_described.get('Subnets'):
+ subnets.append(subnet.get('SubnetIdentifier'))
+ for modulesubnet in modparams['SubnetIds']:
+ if modulesubnet in subnets:
+ pass
+ else:
+ changed = True
+ return changed
+
+
+def create_replication_subnet_group(module, connection):
+ try:
+ params = create_module_params(module)
+ return replication_subnet_group_create(connection, **params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to create DMS replication subnet group.")
+
+
+def modify_replication_subnet_group(module, connection):
+ try:
+ modify_params = create_module_params(module)
+ return replication_subnet_group_modify(connection, **modify_params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to Modify the DMS replication subnet group.")
+
+
+def main():
+ argument_spec = dict(
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ identifier=dict(type='str', required=True),
+ description=dict(type='str', required=True),
+ subnet_ids=dict(type='list', elements='str', required=True),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+ exit_message = None
+ changed = False
+
+ state = module.params.get('state')
+ dmsclient = module.client('dms')
+ subnet_group = describe_subnet_group(dmsclient,
+ module.params.get('identifier'))
+ if state == 'present':
+ if replication_subnet_exists(subnet_group):
+ if compare_params(module, subnet_group["ReplicationSubnetGroups"][0]):
+ if not module.check_mode:
+ exit_message = modify_replication_subnet_group(module, dmsclient)
+ else:
+ exit_message = dmsclient
+ changed = True
+ else:
+ exit_message = "No changes to Subnet group"
+ else:
+ if not module.check_mode:
+ exit_message = create_replication_subnet_group(module, dmsclient)
+ changed = True
+ else:
+ exit_message = "Check mode enabled"
+
+ elif state == 'absent':
+ if replication_subnet_exists(subnet_group):
+ if not module.check_mode:
+ replication_subnet_group_delete(module, dmsclient)
+ changed = True
+ exit_message = "Replication subnet group Deleted"
+ else:
+ exit_message = dmsclient
+ changed = True
+
+ else:
+ changed = False
+ exit_message = "Replication subnet group does not exist"
+
+ module.exit_json(changed=changed, msg=exit_message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/dynamodb_table.py b/ansible_collections/community/aws/plugins/modules/dynamodb_table.py
new file mode 100644
index 000000000..28d334fc9
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/dynamodb_table.py
@@ -0,0 +1,1087 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: dynamodb_table
+version_added: 1.0.0
+short_description: Create, update or delete AWS Dynamo DB tables
+description:
+ - Create or delete AWS Dynamo DB tables.
+ - Can update the provisioned throughput on existing tables.
+ - Returns the status of the specified table.
+author:
+ - Alan Loi (@loia)
+options:
+ state:
+ description:
+ - Create or delete the table.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ name:
+ description:
+ - Name of the table.
+ required: true
+ type: str
+ hash_key_name:
+ description:
+ - Name of the hash key.
+ - Required when I(state=present) and table doesn't exist.
+ type: str
+ hash_key_type:
+ description:
+ - Type of the hash key.
+ - Defaults to C('STRING') when creating a new table.
+ choices: ['STRING', 'NUMBER', 'BINARY']
+ type: str
+ range_key_name:
+ description:
+ - Name of the range key.
+ type: str
+ range_key_type:
+ description:
+ - Type of the range key.
+ - Defaults to C('STRING') when creating a new range key.
+ choices: ['STRING', 'NUMBER', 'BINARY']
+ type: str
+ billing_mode:
+ description:
+ - Controls whether provisoned pr on-demand tables are created.
+ choices: ['PROVISIONED', 'PAY_PER_REQUEST']
+ type: str
+ read_capacity:
+ description:
+ - Read throughput capacity (units) to provision.
+ - Defaults to C(1) when creating a new table.
+ type: int
+ write_capacity:
+ description:
+ - Write throughput capacity (units) to provision.
+ - Defaults to C(1) when creating a new table.
+ type: int
+ indexes:
+ description:
+ - list of dictionaries describing indexes to add to the table. global indexes can be updated. local indexes don't support updates or have throughput.
+ - "required options: ['name', 'type', 'hash_key_name']"
+ - "other options: ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity']"
+ suboptions:
+ name:
+ description: The name of the index.
+ type: str
+ required: true
+ type:
+ description:
+ - The type of index.
+ type: str
+ required: true
+ choices: ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only']
+ hash_key_name:
+ description:
+ - The name of the hash-based key.
+ - Required if index doesn't already exist.
+ - Can not be modified once the index has been created.
+ required: false
+ type: str
+ hash_key_type:
+ description:
+ - The type of the hash-based key.
+ - Defaults to C('STRING') when creating a new index.
+ - Can not be modified once the index has been created.
+ type: str
+ choices: ['STRING', 'NUMBER', 'BINARY']
+ range_key_name:
+ description:
+ - The name of the range-based key.
+ - Can not be modified once the index has been created.
+ type: str
+ range_key_type:
+ type: str
+ description:
+ - The type of the range-based key.
+ - Defaults to C('STRING') when creating a new index.
+ - Can not be modified once the index has been created.
+ choices: ['STRING', 'NUMBER', 'BINARY']
+ includes:
+ type: list
+ description: A list of fields to include when using C(global_include) or C(include) indexes.
+ elements: str
+ read_capacity:
+ description:
+ - Read throughput capacity (units) to provision for the index.
+ type: int
+ write_capacity:
+ description:
+ - Write throughput capacity (units) to provision for the index.
+ type: int
+ default: []
+ type: list
+ elements: dict
+ table_class:
+ description:
+ - The class of the table.
+ - Requires at least botocore version 1.23.18.
+ choices: ['STANDARD', 'STANDARD_INFREQUENT_ACCESS']
+ type: str
+ version_added: 3.1.0
+ wait_timeout:
+ description:
+ - How long (in seconds) to wait for creation / update / deletion to complete.
+ aliases: ['wait_for_active_timeout']
+ default: 300
+ type: int
+ wait:
+ description:
+ - When I(wait=True) the module will wait for up to I(wait_timeout) seconds
+ for table creation or deletion to complete before returning.
+ default: True
+ type: bool
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+ - amazon.aws.tags
+'''
+
+EXAMPLES = r'''
+- name: Create dynamo table with hash and range primary key
+ community.aws.dynamodb_table:
+ name: my-table
+ region: us-east-1
+ hash_key_name: id
+ hash_key_type: STRING
+ range_key_name: create_time
+ range_key_type: NUMBER
+ read_capacity: 2
+ write_capacity: 2
+ tags:
+ tag_name: tag_value
+
+- name: Update capacity on existing dynamo table
+ community.aws.dynamodb_table:
+ name: my-table
+ region: us-east-1
+ read_capacity: 10
+ write_capacity: 10
+
+- name: Create pay-per-request table
+ community.aws.dynamodb_table:
+ name: my-table
+ region: us-east-1
+ hash_key_name: id
+ hash_key_type: STRING
+ billing_mode: PAY_PER_REQUEST
+
+- name: set index on existing dynamo table
+ community.aws.dynamodb_table:
+ name: my-table
+ region: us-east-1
+ indexes:
+ - name: NamedIndex
+ type: global_include
+ hash_key_name: id
+ range_key_name: create_time
+ includes:
+ - other_field
+ - other_field2
+ read_capacity: 10
+ write_capacity: 10
+
+- name: Delete dynamo table
+ community.aws.dynamodb_table:
+ name: my-table
+ region: us-east-1
+ state: absent
+'''
+
+RETURN = r'''
+table:
+ description: The returned table params from the describe API call.
+ returned: success
+ type: complex
+ contains: {}
+ sample: {
+ "arn": "arn:aws:dynamodb:us-east-1:721066863947:table/ansible-test-table",
+ "attribute_definitions": [
+ {
+ "attribute_name": "id",
+ "attribute_type": "N"
+ }
+ ],
+ "billing_mode": "PROVISIONED",
+ "creation_date_time": "2022-02-04T13:36:01.578000+00:00",
+ "id": "533b45fe-0870-4b66-9b00-d2afcfe96f19",
+ "item_count": 0,
+ "key_schema": [
+ {
+ "attribute_name": "id",
+ "key_type": "HASH"
+ }
+ ],
+ "name": "ansible-test-14482047-alinas-mbp",
+ "provisioned_throughput": {
+ "number_of_decreases_today": 0,
+ "read_capacity_units": 1,
+ "write_capacity_units": 1
+ },
+ "size": 0,
+ "status": "ACTIVE",
+ "table_arn": "arn:aws:dynamodb:us-east-1:721066863947:table/ansible-test-table",
+ "table_id": "533b45fe-0870-4b66-9b00-d2afcfe96f19",
+ "table_name": "ansible-test-table",
+ "table_size_bytes": 0,
+ "table_status": "ACTIVE",
+ "tags": {}
+ }
+table_status:
+ description: The current status of the table.
+ returned: success
+ type: str
+ sample: ACTIVE
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+
+
+DYNAMO_TYPE_DEFAULT = 'STRING'
+INDEX_REQUIRED_OPTIONS = ['name', 'type', 'hash_key_name']
+INDEX_OPTIONS = INDEX_REQUIRED_OPTIONS + ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity']
+INDEX_TYPE_OPTIONS = ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only']
+# Map in both directions
+DYNAMO_TYPE_MAP_LONG = {'STRING': 'S', 'NUMBER': 'N', 'BINARY': 'B'}
+DYNAMO_TYPE_MAP_SHORT = dict((v, k) for k, v in DYNAMO_TYPE_MAP_LONG.items())
+KEY_TYPE_CHOICES = list(DYNAMO_TYPE_MAP_LONG.keys())
+
+
+# If you try to update an index while another index is updating, it throws
+# LimitExceededException/ResourceInUseException exceptions at you. This can be
+# pretty slow, so add plenty of retries...
+@AWSRetry.jittered_backoff(
+ retries=45, delay=5, max_delay=30,
+ catch_extra_error_codes=['LimitExceededException', 'ResourceInUseException', 'ResourceNotFoundException'],
+)
+def _update_table_with_long_retry(**changes):
+ return client.update_table(
+ TableName=module.params.get('name'),
+ **changes
+ )
+
+
+# ResourceNotFoundException is expected here if the table doesn't exist
+@AWSRetry.jittered_backoff(catch_extra_error_codes=['LimitExceededException', 'ResourceInUseException'])
+def _describe_table(**params):
+ return client.describe_table(**params)
+
+
+def wait_exists():
+ table_name = module.params.get('name')
+ wait_timeout = module.params.get('wait_timeout')
+
+ delay = min(wait_timeout, 5)
+ max_attempts = wait_timeout // delay
+
+ try:
+ waiter = client.get_waiter('table_exists')
+ waiter.wait(
+ WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts},
+ TableName=table_name,
+ )
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, msg='Timeout while waiting on table creation')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Failed while waiting on table creation')
+
+
+def wait_not_exists():
+ table_name = module.params.get('name')
+ wait_timeout = module.params.get('wait_timeout')
+
+ delay = min(wait_timeout, 5)
+ max_attempts = wait_timeout // delay
+
+ try:
+ waiter = client.get_waiter('table_not_exists')
+ waiter.wait(
+ WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts},
+ TableName=table_name,
+ )
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, msg='Timeout while waiting on table deletion')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Failed while waiting on table deletion')
+
+
+def _short_type_to_long(short_key):
+ if not short_key:
+ return None
+ return DYNAMO_TYPE_MAP_SHORT.get(short_key, None)
+
+
+def _long_type_to_short(long_key):
+ if not long_key:
+ return None
+ return DYNAMO_TYPE_MAP_LONG.get(long_key, None)
+
+
+def _schema_dict(key_name, key_type):
+ return dict(
+ AttributeName=key_name,
+ KeyType=key_type,
+ )
+
+
+def _merge_index_params(index, current_index):
+ idx = dict(current_index)
+ idx.update(index)
+ return idx
+
+
+def _decode_primary_index(current_table):
+ """
+ Decodes the primary index info from the current table definition
+ splitting it up into the keys we use as parameters
+ """
+ # The schema/attribute definitions are a list of dicts which need the same
+ # treatment as boto3's tag lists
+ schema = boto3_tag_list_to_ansible_dict(
+ current_table.get('key_schema', []),
+ # Map from 'HASH'/'RANGE' to attribute name
+ tag_name_key_name='key_type',
+ tag_value_key_name='attribute_name',
+ )
+ attributes = boto3_tag_list_to_ansible_dict(
+ current_table.get('attribute_definitions', []),
+ # Map from attribute name to 'S'/'N'/'B'.
+ tag_name_key_name='attribute_name',
+ tag_value_key_name='attribute_type',
+ )
+
+ hash_key_name = schema.get('HASH')
+ hash_key_type = _short_type_to_long(attributes.get(hash_key_name, None))
+ range_key_name = schema.get('RANGE', None)
+ range_key_type = _short_type_to_long(attributes.get(range_key_name, None))
+
+ return dict(
+ hash_key_name=hash_key_name,
+ hash_key_type=hash_key_type,
+ range_key_name=range_key_name,
+ range_key_type=range_key_type,
+ )
+
+
+def _decode_index(index_data, attributes, type_prefix=''):
+ try:
+ index_map = dict(
+ name=index_data['index_name'],
+ )
+
+ index_data = dict(index_data)
+ index_data['attribute_definitions'] = attributes
+
+ index_map.update(_decode_primary_index(index_data))
+
+ throughput = index_data.get('provisioned_throughput', {})
+ index_map['provisioned_throughput'] = throughput
+ if throughput:
+ index_map['read_capacity'] = throughput.get('read_capacity_units')
+ index_map['write_capacity'] = throughput.get('write_capacity_units')
+
+ projection = index_data.get('projection', {})
+ if projection:
+ index_map['type'] = type_prefix + projection.get('projection_type')
+ index_map['includes'] = projection.get('non_key_attributes', [])
+
+ return index_map
+ except Exception as e:
+ module.fail_json_aws(e, msg='Decode failure', index_data=index_data)
+
+
+def compatability_results(current_table):
+ if not current_table:
+ return dict()
+
+ billing_mode = current_table.get('billing_mode')
+
+ primary_indexes = _decode_primary_index(current_table)
+
+ hash_key_name = primary_indexes.get('hash_key_name')
+ hash_key_type = primary_indexes.get('hash_key_type')
+ range_key_name = primary_indexes.get('range_key_name')
+ range_key_type = primary_indexes.get('range_key_type')
+
+ indexes = list()
+ global_indexes = current_table.get('_global_index_map', {})
+ local_indexes = current_table.get('_local_index_map', {})
+ for index in global_indexes:
+ idx = dict(global_indexes[index])
+ idx.pop('provisioned_throughput', None)
+ indexes.append(idx)
+ for index in local_indexes:
+ idx = dict(local_indexes[index])
+ idx.pop('provisioned_throughput', None)
+ indexes.append(idx)
+
+ compat_results = dict(
+ hash_key_name=hash_key_name,
+ hash_key_type=hash_key_type,
+ range_key_name=range_key_name,
+ range_key_type=range_key_type,
+ indexes=indexes,
+ billing_mode=billing_mode,
+ region=module.region,
+ table_name=current_table.get('table_name', None),
+ table_class=current_table.get('table_class_summary', {}).get('table_class', None),
+ table_status=current_table.get('table_status', None),
+ tags=current_table.get('tags', {}),
+ )
+
+ if billing_mode == "PROVISIONED":
+ throughput = current_table.get('provisioned_throughput', {})
+ compat_results['read_capacity'] = throughput.get('read_capacity_units', None)
+ compat_results['write_capacity'] = throughput.get('write_capacity_units', None)
+
+ return compat_results
+
+
+def get_dynamodb_table():
+ table_name = module.params.get('name')
+ try:
+ table = _describe_table(TableName=table_name)
+ except is_boto3_error_code('ResourceNotFoundException'):
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Failed to describe table')
+
+ table = table['Table']
+ try:
+ tags = client.list_tags_of_resource(aws_retry=True, ResourceArn=table['TableArn'])['Tags']
+ except is_boto3_error_code('AccessDeniedException'):
+ module.warn('Permission denied when listing tags')
+ tags = []
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Failed to list table tags')
+
+ tags = boto3_tag_list_to_ansible_dict(tags)
+
+ table = camel_dict_to_snake_dict(table)
+
+ # Put some of the values into places people will expect them
+ table['arn'] = table['table_arn']
+ table['name'] = table['table_name']
+ table['status'] = table['table_status']
+ table['id'] = table['table_id']
+ table['size'] = table['table_size_bytes']
+ table['tags'] = tags
+
+ if 'table_class_summary' in table:
+ table['table_class'] = table['table_class_summary']['table_class']
+
+ # billing_mode_summary doesn't always seem to be set but is always set for PAY_PER_REQUEST
+ # and when updating the billing_mode
+ if 'billing_mode_summary' in table:
+ table['billing_mode'] = table['billing_mode_summary']['billing_mode']
+ else:
+ table['billing_mode'] = "PROVISIONED"
+
+ # convert indexes into something we can easily search against
+ attributes = table['attribute_definitions']
+ global_index_map = dict()
+ local_index_map = dict()
+ for index in table.get('global_secondary_indexes', []):
+ idx = _decode_index(index, attributes, type_prefix='global_')
+ global_index_map[idx['name']] = idx
+ for index in table.get('local_secondary_indexes', []):
+ idx = _decode_index(index, attributes)
+ local_index_map[idx['name']] = idx
+ table['_global_index_map'] = global_index_map
+ table['_local_index_map'] = local_index_map
+
+ return table
+
+
+def _generate_attribute_map():
+ """
+ Builds a map of Key Names to Type
+ """
+ attributes = dict()
+
+ for index in (module.params, *module.params.get('indexes')):
+ # run through hash_key_name and range_key_name
+ for t in ['hash', 'range']:
+ key_name = index.get(t + '_key_name')
+ if not key_name:
+ continue
+ key_type = index.get(t + '_key_type') or DYNAMO_TYPE_DEFAULT
+ _type = _long_type_to_short(key_type)
+ if key_name in attributes:
+ if _type != attributes[key_name]:
+ module.fail_json(msg='Conflicting attribute type',
+ type_1=_type, type_2=attributes[key_name],
+ key_name=key_name)
+ else:
+ attributes[key_name] = _type
+
+ return attributes
+
+
+def _generate_attributes():
+ attributes = _generate_attribute_map()
+
+ # Use ansible_dict_to_boto3_tag_list to generate the list of dicts
+ # format we need
+ attrs = ansible_dict_to_boto3_tag_list(
+ attributes,
+ tag_name_key_name='AttributeName',
+ tag_value_key_name='AttributeType'
+ )
+ return list(attrs)
+
+
+def _generate_throughput(params=None):
+ if not params:
+ params = module.params
+
+ read_capacity = params.get('read_capacity') or 1
+ write_capacity = params.get('write_capacity') or 1
+ throughput = dict(
+ ReadCapacityUnits=read_capacity,
+ WriteCapacityUnits=write_capacity,
+ )
+
+ return throughput
+
+
+def _generate_schema(params=None):
+ if not params:
+ params = module.params
+
+ schema = list()
+ hash_key_name = params.get('hash_key_name')
+ range_key_name = params.get('range_key_name')
+
+ if hash_key_name:
+ entry = _schema_dict(hash_key_name, 'HASH')
+ schema.append(entry)
+ if range_key_name:
+ entry = _schema_dict(range_key_name, 'RANGE')
+ schema.append(entry)
+
+ return schema
+
+
+def _primary_index_changes(current_table):
+
+ primary_index = _decode_primary_index(current_table)
+
+ hash_key_name = primary_index.get('hash_key_name')
+ _hash_key_name = module.params.get('hash_key_name')
+ hash_key_type = primary_index.get('hash_key_type')
+ _hash_key_type = module.params.get('hash_key_type')
+ range_key_name = primary_index.get('range_key_name')
+ _range_key_name = module.params.get('range_key_name')
+ range_key_type = primary_index.get('range_key_type')
+ _range_key_type = module.params.get('range_key_type')
+
+ changed = list()
+
+ if _hash_key_name and (_hash_key_name != hash_key_name):
+ changed.append('hash_key_name')
+ if _hash_key_type and (_hash_key_type != hash_key_type):
+ changed.append('hash_key_type')
+ if _range_key_name and (_range_key_name != range_key_name):
+ changed.append('range_key_name')
+ if _range_key_type and (_range_key_type != range_key_type):
+ changed.append('range_key_type')
+
+ return changed
+
+
+def _throughput_changes(current_table, params=None):
+
+ if not params:
+ params = module.params
+
+ throughput = current_table.get('provisioned_throughput', {})
+ read_capacity = throughput.get('read_capacity_units', None)
+ _read_capacity = params.get('read_capacity') or read_capacity
+ write_capacity = throughput.get('write_capacity_units', None)
+ _write_capacity = params.get('write_capacity') or write_capacity
+
+ if (read_capacity != _read_capacity) or (write_capacity != _write_capacity):
+ return dict(
+ ReadCapacityUnits=_read_capacity,
+ WriteCapacityUnits=_write_capacity,
+ )
+
+ return dict()
+
+
+def _generate_global_indexes(billing_mode):
+ index_exists = dict()
+ indexes = list()
+
+ include_throughput = True
+
+ if billing_mode == "PAY_PER_REQUEST":
+ include_throughput = False
+
+ for index in module.params.get('indexes'):
+ if index.get('type') not in ['global_all', 'global_include', 'global_keys_only']:
+ continue
+ name = index.get('name')
+ if name in index_exists:
+ module.fail_json(msg='Duplicate key {0} in list of global indexes'.format(name))
+ # Convert the type name to upper case and remove the global_
+ index['type'] = index['type'].upper()[7:]
+ index = _generate_index(index, include_throughput)
+ index_exists[name] = True
+ indexes.append(index)
+
+ return indexes
+
+
+def _generate_local_indexes():
+ index_exists = dict()
+ indexes = list()
+
+ for index in module.params.get('indexes'):
+ index = dict()
+ if index.get('type') not in ['all', 'include', 'keys_only']:
+ continue
+ name = index.get('name')
+ if name in index_exists:
+ module.fail_json(msg='Duplicate key {0} in list of local indexes'.format(name))
+ index['type'] = index['type'].upper()
+ index = _generate_index(index, False)
+ index_exists[name] = True
+ indexes.append(index)
+
+ return indexes
+
+
+def _generate_global_index_map(current_table):
+ global_index_map = dict()
+ existing_indexes = current_table['_global_index_map']
+ for index in module.params.get('indexes'):
+ if index.get('type') not in ['global_all', 'global_include', 'global_keys_only']:
+ continue
+ name = index.get('name')
+ if name in global_index_map:
+ module.fail_json(msg='Duplicate key {0} in list of global indexes'.format(name))
+ idx = _merge_index_params(index, existing_indexes.get(name, {}))
+ # Convert the type name to upper case and remove the global_
+ idx['type'] = idx['type'].upper()[7:]
+ global_index_map[name] = idx
+ return global_index_map
+
+
+def _generate_local_index_map(current_table):
+ local_index_map = dict()
+ existing_indexes = current_table['_local_index_map']
+ for index in module.params.get('indexes'):
+ if index.get('type') not in ['all', 'include', 'keys_only']:
+ continue
+ name = index.get('name')
+ if name in local_index_map:
+ module.fail_json(msg='Duplicate key {0} in list of local indexes'.format(name))
+ idx = _merge_index_params(index, existing_indexes.get(name, {}))
+ # Convert the type name to upper case
+ idx['type'] = idx['type'].upper()
+ local_index_map[name] = idx
+ return local_index_map
+
+
+def _generate_index(index, include_throughput=True):
+ key_schema = _generate_schema(index)
+ throughput = _generate_throughput(index)
+ non_key_attributes = index['includes'] or []
+ projection = dict(
+ ProjectionType=index['type'],
+ )
+ if index['type'] != 'ALL':
+ if non_key_attributes:
+ projection['NonKeyAttributes'] = non_key_attributes
+ else:
+ if non_key_attributes:
+ module.fail_json(
+ "DynamoDB does not support specifying non-key-attributes ('includes') for "
+ "indexes of type 'all'. Index name: {0}".format(index['name']))
+
+ idx = dict(
+ IndexName=index['name'],
+ KeySchema=key_schema,
+ Projection=projection,
+ )
+
+ if include_throughput:
+ idx['ProvisionedThroughput'] = throughput
+
+ return idx
+
+
+def _attribute_changes(current_table):
+ # TODO (future) It would be nice to catch attempts to change types here.
+ return _generate_attributes()
+
+
+def _global_index_changes(current_table):
+ current_global_index_map = current_table['_global_index_map']
+ global_index_map = _generate_global_index_map(current_table)
+
+ current_billing_mode = current_table.get('billing_mode')
+
+ if module.params.get('billing_mode') is None:
+ billing_mode = current_billing_mode
+ else:
+ billing_mode = module.params.get('billing_mode')
+
+ include_throughput = True
+
+ if billing_mode == "PAY_PER_REQUEST":
+ include_throughput = False
+
+ index_changes = list()
+
+ # TODO (future) it would be nice to add support for deleting an index
+ for name in global_index_map:
+
+ idx = dict(_generate_index(global_index_map[name], include_throughput=include_throughput))
+ if name not in current_global_index_map:
+ index_changes.append(dict(Create=idx))
+ else:
+ # The only thing we can change is the provisioned throughput.
+ # TODO (future) it would be nice to throw a deprecation here
+ # rather than dropping other changes on the floor
+ _current = current_global_index_map[name]
+ _new = global_index_map[name]
+
+ if include_throughput:
+ change = dict(_throughput_changes(_current, _new))
+ if change:
+ update = dict(
+ IndexName=name,
+ ProvisionedThroughput=change,
+ )
+ index_changes.append(dict(Update=update))
+
+ return index_changes
+
+
+def _local_index_changes(current_table):
+ # TODO (future) Changes to Local Indexes aren't possible after creation,
+ # we should probably throw a deprecation warning here (original module
+ # also just dropped these changes on the floor)
+ return []
+
+
+def _update_table(current_table):
+ changes = dict()
+ additional_global_index_changes = list()
+
+ # Get throughput / billing_mode changes
+ throughput_changes = _throughput_changes(current_table)
+ if throughput_changes:
+ changes['ProvisionedThroughput'] = throughput_changes
+
+ current_billing_mode = current_table.get('billing_mode')
+ new_billing_mode = module.params.get('billing_mode')
+
+ if new_billing_mode is None:
+ new_billing_mode = current_billing_mode
+
+ if current_billing_mode != new_billing_mode:
+ changes['BillingMode'] = new_billing_mode
+
+ # Update table_class use exisiting if none is defined
+ if module.params.get('table_class'):
+ if module.params.get('table_class') != current_table.get('table_class'):
+ changes['TableClass'] = module.params.get('table_class')
+
+ global_index_changes = _global_index_changes(current_table)
+ if global_index_changes:
+ changes['GlobalSecondaryIndexUpdates'] = global_index_changes
+ # Only one index can be changed at a time except if changing the billing mode, pass the first during the
+ # main update and deal with the others on a slow retry to wait for
+ # completion
+
+ if current_billing_mode == new_billing_mode:
+ if len(global_index_changes) > 1:
+ changes['GlobalSecondaryIndexUpdates'] = [global_index_changes[0]]
+ additional_global_index_changes = global_index_changes[1:]
+
+ local_index_changes = _local_index_changes(current_table)
+ if local_index_changes:
+ changes['LocalSecondaryIndexUpdates'] = local_index_changes
+
+ if not changes:
+ return False
+
+ if module.check_mode:
+ return True
+
+ if global_index_changes or local_index_changes:
+ changes['AttributeDefinitions'] = _generate_attributes()
+
+ try:
+ client.update_table(
+ aws_retry=True,
+ TableName=module.params.get('name'),
+ **changes
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to update table")
+
+ if additional_global_index_changes:
+ for index in additional_global_index_changes:
+ try:
+ _update_table_with_long_retry(GlobalSecondaryIndexUpdates=[index], AttributeDefinitions=changes['AttributeDefinitions'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to update table", changes=changes,
+ additional_global_index_changes=additional_global_index_changes)
+
+ if module.params.get('wait'):
+ wait_exists()
+
+ return True
+
+
+def _update_tags(current_table):
+ _tags = module.params.get('tags')
+ if _tags is None:
+ return False
+
+ tags_to_add, tags_to_remove = compare_aws_tags(current_table['tags'], module.params.get('tags'),
+ purge_tags=module.params.get('purge_tags'))
+
+ # If neither need updating we can return already
+ if not (tags_to_add or tags_to_remove):
+ return False
+
+ if module.check_mode:
+ return True
+
+ if tags_to_add:
+ try:
+ client.tag_resource(
+ aws_retry=True,
+ ResourceArn=current_table['arn'],
+ Tags=ansible_dict_to_boto3_tag_list(tags_to_add),
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to tag table")
+ if tags_to_remove:
+ try:
+ client.untag_resource(
+ aws_retry=True,
+ ResourceArn=current_table['arn'],
+ TagKeys=tags_to_remove,
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to untag table")
+
+ return True
+
+
+def update_table(current_table):
+ primary_index_changes = _primary_index_changes(current_table)
+ if primary_index_changes:
+ module.fail_json("DynamoDB does not support updating the Primary keys on a table. Changed paramters are: {0}".format(primary_index_changes))
+
+ changed = False
+ changed |= _update_table(current_table)
+ changed |= _update_tags(current_table)
+
+ if module.params.get('wait'):
+ wait_exists()
+
+ return changed
+
+
+def create_table():
+ table_name = module.params.get('name')
+ table_class = module.params.get('table_class')
+ hash_key_name = module.params.get('hash_key_name')
+ billing_mode = module.params.get('billing_mode')
+
+ if billing_mode is None:
+ billing_mode = "PROVISIONED"
+
+ tags = ansible_dict_to_boto3_tag_list(module.params.get('tags') or {})
+
+ if not hash_key_name:
+ module.fail_json('"hash_key_name" must be provided when creating a new table.')
+
+ if module.check_mode:
+ return True
+
+ if billing_mode == "PROVISIONED":
+ throughput = _generate_throughput()
+
+ attributes = _generate_attributes()
+ key_schema = _generate_schema()
+ local_indexes = _generate_local_indexes()
+ global_indexes = _generate_global_indexes(billing_mode)
+
+ params = dict(
+ TableName=table_name,
+ AttributeDefinitions=attributes,
+ KeySchema=key_schema,
+ Tags=tags,
+ BillingMode=billing_mode
+ # TODO (future)
+ # StreamSpecification,
+ # SSESpecification,
+ )
+
+ if table_class:
+ params['TableClass'] = table_class
+ if billing_mode == "PROVISIONED":
+ params['ProvisionedThroughput'] = throughput
+ if local_indexes:
+ params['LocalSecondaryIndexes'] = local_indexes
+ if global_indexes:
+ params['GlobalSecondaryIndexes'] = global_indexes
+
+ try:
+ client.create_table(aws_retry=True, **params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to create table')
+
+ if module.params.get('wait'):
+ wait_exists()
+
+ return True
+
+
+def delete_table(current_table):
+ if not current_table:
+ return False
+
+ if module.check_mode:
+ return True
+
+ table_name = module.params.get('name')
+
+ # If an index is mid-update then we have to wait for the update to complete
+ # before deletion will succeed
+ long_retry = AWSRetry.jittered_backoff(
+ retries=45, delay=5, max_delay=30,
+ catch_extra_error_codes=['LimitExceededException', 'ResourceInUseException'],
+ )
+
+ try:
+ long_retry(client.delete_table)(TableName=table_name)
+ except is_boto3_error_code('ResourceNotFoundException'):
+ return False
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Failed to delete table')
+
+ if module.params.get('wait'):
+ wait_not_exists()
+
+ return True
+
+
+def main():
+
+ global module
+ global client
+
+ # TODO (future) It would be good to split global and local indexes. They have
+ # different parameters, use a separate namespace for names,
+ # and local indexes can't be updated.
+ index_options = dict(
+ name=dict(type='str', required=True),
+ # It would be nice to make this optional, but because Local and Global
+ # indexes are mixed in here we need this to be able to tell to which
+ # group of indexes the index belongs.
+ type=dict(type='str', required=True, choices=INDEX_TYPE_OPTIONS),
+ hash_key_name=dict(type='str', required=False),
+ hash_key_type=dict(type='str', required=False, choices=KEY_TYPE_CHOICES),
+ range_key_name=dict(type='str', required=False),
+ range_key_type=dict(type='str', required=False, choices=KEY_TYPE_CHOICES),
+ includes=dict(type='list', required=False, elements='str'),
+ read_capacity=dict(type='int', required=False),
+ write_capacity=dict(type='int', required=False),
+ )
+
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ hash_key_name=dict(type='str'),
+ hash_key_type=dict(type='str', choices=KEY_TYPE_CHOICES),
+ range_key_name=dict(type='str'),
+ range_key_type=dict(type='str', choices=KEY_TYPE_CHOICES),
+ billing_mode=dict(type='str', choices=['PROVISIONED', 'PAY_PER_REQUEST']),
+ read_capacity=dict(type='int'),
+ write_capacity=dict(type='int'),
+ indexes=dict(default=[], type='list', elements='dict', options=index_options),
+ table_class=dict(type='str', choices=['STANDARD', 'STANDARD_INFREQUENT_ACCESS']),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(default=300, type='int', aliases=['wait_for_active_timeout']),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ check_boto3=False,
+ )
+
+ retry_decorator = AWSRetry.jittered_backoff(
+ catch_extra_error_codes=['LimitExceededException', 'ResourceInUseException', 'ResourceNotFoundException'],
+ )
+ client = module.client('dynamodb', retry_decorator=retry_decorator)
+
+ if module.params.get('table_class'):
+ module.require_botocore_at_least('1.23.18', reason='to set table_class')
+
+ current_table = get_dynamodb_table()
+ changed = False
+ table = None
+ results = dict()
+
+ state = module.params.get('state')
+ if state == 'present':
+ if current_table:
+ changed |= update_table(current_table)
+ else:
+ changed |= create_table()
+ table = get_dynamodb_table()
+ elif state == 'absent':
+ changed |= delete_table(current_table)
+
+ compat_results = compatability_results(table)
+ if compat_results:
+ results.update(compat_results)
+
+ results['changed'] = changed
+ if table:
+ # These are used to pass computed data about, not needed for users
+ table.pop('_global_index_map', None)
+ table.pop('_local_index_map', None)
+ results['table'] = table
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/dynamodb_ttl.py b/ansible_collections/community/aws/plugins/modules/dynamodb_ttl.py
new file mode 100644
index 000000000..9cbbb3e5e
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/dynamodb_ttl.py
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dynamodb_ttl
+version_added: 1.0.0
+short_description: Set TTL for a given DynamoDB table
+description:
+- Sets the TTL for a given DynamoDB table.
+options:
+ state:
+ description:
+ - State to set DynamoDB table to.
+ choices: ['enable', 'disable']
+ required: false
+ type: str
+ table_name:
+ description:
+ - Name of the DynamoDB table to work on.
+ required: true
+ type: str
+ attribute_name:
+ description:
+ - The name of the Time To Live attribute used to store the expiration time for items in the table.
+ - This appears to be required by the API even when disabling TTL.
+ required: true
+ type: str
+
+author: Ted Timmons (@tedder)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+- name: enable TTL on my cowfacts table
+ community.aws.dynamodb_ttl:
+ state: enable
+ table_name: cowfacts
+ attribute_name: cow_deleted_date
+
+- name: disable TTL on my cowfacts table
+ community.aws.dynamodb_ttl:
+ state: disable
+ table_name: cowfacts
+ attribute_name: cow_deleted_date
+'''
+
+RETURN = '''
+current_status:
+ description: current or new TTL specification.
+ type: dict
+ returned: always
+ sample:
+ - { "AttributeName": "deploy_timestamp", "TimeToLiveStatus": "ENABLED" }
+ - { "AttributeName": "deploy_timestamp", "Enabled": true }
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+def get_current_ttl_state(c, table_name):
+ '''Fetch the state dict for a table.'''
+ current_state = c.describe_time_to_live(TableName=table_name)
+ return current_state.get('TimeToLiveDescription')
+
+
+def does_state_need_changing(attribute_name, desired_state, current_spec):
+ '''Run checks to see if the table needs to be modified. Basically a dirty check.'''
+ if not current_spec:
+ # we don't have an entry (or a table?)
+ return True
+
+ if desired_state.lower() == 'enable' and current_spec.get('TimeToLiveStatus') not in ['ENABLING', 'ENABLED']:
+ return True
+ if desired_state.lower() == 'disable' and current_spec.get('TimeToLiveStatus') not in ['DISABLING', 'DISABLED']:
+ return True
+ if attribute_name != current_spec.get('AttributeName'):
+ return True
+
+ return False
+
+
+def set_ttl_state(c, table_name, state, attribute_name):
+ '''Set our specification. Returns the update_time_to_live specification dict,
+ which is different than the describe_* call.'''
+ is_enabled = False
+ if state.lower() == 'enable':
+ is_enabled = True
+
+ ret = c.update_time_to_live(
+ TableName=table_name,
+ TimeToLiveSpecification={
+ 'Enabled': is_enabled,
+ 'AttributeName': attribute_name
+ }
+ )
+
+ return ret.get('TimeToLiveSpecification')
+
+
+def main():
+ argument_spec = dict(
+ state=dict(choices=['enable', 'disable']),
+ table_name=dict(required=True),
+ attribute_name=dict(required=True),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ )
+
+ try:
+ dbclient = module.client('dynamodb')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ result = {'changed': False}
+ state = module.params['state']
+
+ # wrap all our calls to catch the standard exceptions. We don't pass `module` in to the
+ # methods so it's easier to do here.
+ try:
+ current_state = get_current_ttl_state(dbclient, module.params['table_name'])
+
+ if does_state_need_changing(module.params['attribute_name'], module.params['state'], current_state):
+ # changes needed
+ new_state = set_ttl_state(dbclient, module.params['table_name'], module.params['state'], module.params['attribute_name'])
+ result['current_status'] = new_state
+ result['changed'] = True
+ else:
+ # no changes needed
+ result['current_status'] = current_state
+
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e, msg="Failed to get or update ttl state")
+ except botocore.exceptions.ParamValidationError as e:
+ module.fail_json_aws(e, msg="Failed due to invalid parameters")
+ except ValueError as e:
+ module.fail_json_aws(e, msg="Failed")
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_ami_copy.py b/ansible_collections/community/aws/plugins/modules/ec2_ami_copy.py
new file mode 100644
index 000000000..15a69163d
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ec2_ami_copy.py
@@ -0,0 +1,223 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_ami_copy
+version_added: 1.0.0
+short_description: copies AMI between AWS regions, return new image id
+description:
+ - Copies AMI from a source region to a destination region. B(Since version 2.3 this module depends on boto3.)
+options:
+ source_region:
+ description:
+ - The source region the AMI should be copied from.
+ required: true
+ type: str
+ source_image_id:
+ description:
+ - The ID of the AMI in source region that should be copied.
+ required: true
+ type: str
+ name:
+ description:
+ - The name of the new AMI to copy. (As of 2.3 the default is C(default), in prior versions it was C(null).)
+ default: "default"
+ type: str
+ description:
+ description:
+ - An optional human-readable string describing the contents and purpose of the new AMI.
+ type: str
+ default: ''
+ encrypted:
+ description:
+ - Whether or not the destination snapshots of the copied AMI should be encrypted.
+ type: bool
+ default: false
+ kms_key_id:
+ description:
+ - KMS key id used to encrypt the image. If not specified, uses default EBS Customer Master Key (CMK) for your account.
+ type: str
+ wait:
+ description:
+ - Wait for the copied AMI to be in state C(available) before returning.
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - How long before wait gives up, in seconds.
+ - Prior to 2.3 the default was C(1200).
+ - From 2.3-2.5 this option was deprecated in favor of boto3 waiter defaults.
+ - This was reenabled in 2.6 to allow timeouts greater than 10 minutes.
+ default: 600
+ type: int
+ tags:
+ description:
+ - 'A hash/dictionary of tags to add to the new copied AMI: C({"key":"value"}) and C({"key":"value","key":"value"})'
+ type: dict
+ aliases: ['resource_tags']
+ tag_equality:
+ description:
+ - Whether to use tags if the source AMI already exists in the target region. If this is set, and all tags match
+ in an existing AMI, the AMI will not be copied again.
+ default: false
+ type: bool
+author:
+ - Amir Moulavi (@amir343) <amir.moulavi@gmail.com>
+ - Tim C (@defunctio) <defunct@defunct.io>
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+- name: Basic AMI Copy
+ community.aws.ec2_ami_copy:
+ source_region: us-east-1
+ region: eu-west-1
+ source_image_id: ami-xxxxxxx
+
+- name: AMI copy wait until available
+ community.aws.ec2_ami_copy:
+ source_region: us-east-1
+ region: eu-west-1
+ source_image_id: ami-xxxxxxx
+ wait: true
+ wait_timeout: 1200 # Default timeout is 600
+ register: image_id
+
+- name: Named AMI copy
+ community.aws.ec2_ami_copy:
+ source_region: us-east-1
+ region: eu-west-1
+ source_image_id: ami-xxxxxxx
+ name: My-Awesome-AMI
+ description: latest patch
+
+- name: Tagged AMI copy (will not copy the same AMI twice)
+ community.aws.ec2_ami_copy:
+ source_region: us-east-1
+ region: eu-west-1
+ source_image_id: ami-xxxxxxx
+ tags:
+ Name: My-Super-AMI
+ Patch: 1.2.3
+ tag_equality: true
+
+- name: Encrypted AMI copy
+ community.aws.ec2_ami_copy:
+ source_region: us-east-1
+ region: eu-west-1
+ source_image_id: ami-xxxxxxx
+ encrypted: true
+
+- name: Encrypted AMI copy with specified key
+ community.aws.ec2_ami_copy:
+ source_region: us-east-1
+ region: eu-west-1
+ source_image_id: ami-xxxxxxx
+ encrypted: true
+ kms_key_id: arn:aws:kms:us-east-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b
+'''
+
+RETURN = '''
+image_id:
+ description: AMI ID of the copied AMI
+ returned: always
+ type: str
+ sample: ami-e689729e
+'''
+
+try:
+ from botocore.exceptions import ClientError, WaiterError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+
+
+def copy_image(module, ec2):
+ """
+ Copies an AMI
+
+ module : AnsibleAWSModule object
+ ec2: ec2 connection object
+ """
+
+ image = None
+ changed = False
+ tags = module.params.get('tags')
+
+ params = {'SourceRegion': module.params.get('source_region'),
+ 'SourceImageId': module.params.get('source_image_id'),
+ 'Name': module.params.get('name'),
+ 'Description': module.params.get('description'),
+ 'Encrypted': module.params.get('encrypted'),
+ }
+ if module.params.get('kms_key_id'):
+ params['KmsKeyId'] = module.params.get('kms_key_id')
+
+ try:
+ if module.params.get('tag_equality'):
+ filters = [{'Name': 'tag:%s' % k, 'Values': [v]} for (k, v) in module.params.get('tags').items()]
+ filters.append(dict(Name='state', Values=['available', 'pending']))
+ images = ec2.describe_images(Filters=filters)
+ if len(images['Images']) > 0:
+ image = images['Images'][0]
+ if not image:
+ image = ec2.copy_image(**params)
+ image_id = image['ImageId']
+ if tags:
+ ec2.create_tags(Resources=[image_id],
+ Tags=ansible_dict_to_boto3_tag_list(tags))
+ changed = True
+
+ if module.params.get('wait'):
+ delay = 15
+ max_attempts = module.params.get('wait_timeout') // delay
+ image_id = image.get('ImageId')
+ ec2.get_waiter('image_available').wait(
+ ImageIds=[image_id],
+ WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}
+ )
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(image))
+ except WaiterError as e:
+ module.fail_json_aws(e, msg='An error occurred waiting for the image to become available')
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Could not copy AMI")
+ except Exception as e:
+ module.fail_json(msg='Unhandled exception. (%s)' % to_native(e))
+
+
+def main():
+ argument_spec = dict(
+ source_region=dict(required=True),
+ source_image_id=dict(required=True),
+ name=dict(default='default'),
+ description=dict(default=''),
+ encrypted=dict(type='bool', default=False, required=False),
+ kms_key_id=dict(type='str', required=False),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=600),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ tag_equality=dict(type='bool', default=False))
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+ ec2 = module.client('ec2')
+ copy_image(module, ec2)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway.py b/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway.py
new file mode 100644
index 000000000..3b176b5ee
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_customer_gateway
+version_added: 1.0.0
+short_description: Manage an AWS customer gateway
+description:
+ - Manage an AWS customer gateway.
+author: Michael Baydoun (@MichaelBaydoun)
+notes:
+ - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the
+ first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent
+ requests do not create new customer gateway resources.
+ - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use
+ customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details.
+options:
+ bgp_asn:
+ description:
+ - Border Gateway Protocol (BGP) Autonomous System Number (ASN).
+ - Defaults to C(65000) if not specified when I(state=present).
+ type: int
+ ip_address:
+ description:
+ - Internet-routable IP address for customers gateway, must be a static address.
+ required: true
+ type: str
+ name:
+ description:
+ - Name of the customer gateway.
+ required: true
+ type: str
+ routing:
+ description:
+ - The type of routing.
+ choices: ['static', 'dynamic']
+ default: dynamic
+ type: str
+ state:
+ description:
+ - Create or terminate the Customer Gateway.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+- name: Create Customer Gateway
+ community.aws.ec2_customer_gateway:
+ bgp_asn: 12345
+ ip_address: 1.2.3.4
+ name: IndianapolisOffice
+ region: us-east-1
+ register: cgw
+
+- name: Delete Customer Gateway
+ community.aws.ec2_customer_gateway:
+ ip_address: 1.2.3.4
+ name: IndianapolisOffice
+ state: absent
+ region: us-east-1
+ register: cgw
+'''
+
+RETURN = '''
+gateway.customer_gateways:
+ description: details about the gateway that was created.
+ returned: success
+ type: complex
+ contains:
+ bgp_asn:
+ description: The Border Gateway Autonomous System Number.
+ returned: when exists and gateway is available.
+ sample: 65123
+ type: str
+ customer_gateway_id:
+ description: gateway id assigned by amazon.
+ returned: when exists and gateway is available.
+ sample: cgw-cb6386a2
+ type: str
+ ip_address:
+ description: ip address of your gateway device.
+ returned: when exists and gateway is available.
+ sample: 1.2.3.4
+ type: str
+ state:
+ description: state of gateway.
+ returned: when gateway exists and is available.
+ sample: available
+ type: str
+ tags:
+ description: Any tags on the gateway.
+ returned: when gateway exists and is available, and when tags exist.
+ type: list
+ type:
+ description: encryption type.
+ returned: when gateway exists and is available.
+ sample: ipsec.1
+ type: str
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+class Ec2CustomerGatewayManager:
+
+ def __init__(self, module):
+ self.module = module
+
+ try:
+ self.ec2 = module.client('ec2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=['IncorrectState'])
+ def ensure_cgw_absent(self, gw_id):
+ response = self.ec2.delete_customer_gateway(
+ DryRun=False,
+ CustomerGatewayId=gw_id
+ )
+ return response
+
+ def ensure_cgw_present(self, bgp_asn, ip_address):
+ if not bgp_asn:
+ bgp_asn = 65000
+ response = self.ec2.create_customer_gateway(
+ DryRun=False,
+ Type='ipsec.1',
+ PublicIp=ip_address,
+ BgpAsn=bgp_asn,
+ )
+ return response
+
+ def tag_cgw_name(self, gw_id, name):
+ response = self.ec2.create_tags(
+ DryRun=False,
+ Resources=[
+ gw_id,
+ ],
+ Tags=[
+ {
+ 'Key': 'Name',
+ 'Value': name
+ },
+ ]
+ )
+ return response
+
+ def describe_gateways(self, ip_address):
+ response = self.ec2.describe_customer_gateways(
+ DryRun=False,
+ Filters=[
+ {
+ 'Name': 'state',
+ 'Values': [
+ 'available',
+ ]
+ },
+ {
+ 'Name': 'ip-address',
+ 'Values': [
+ ip_address,
+ ]
+ }
+ ]
+ )
+ return response
+
+
+def main():
+ argument_spec = dict(
+ bgp_asn=dict(required=False, type='int'),
+ ip_address=dict(required=True),
+ name=dict(required=True),
+ routing=dict(default='dynamic', choices=['dynamic', 'static']),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ('routing', 'dynamic', ['bgp_asn'])
+ ]
+ )
+
+ gw_mgr = Ec2CustomerGatewayManager(module)
+
+ name = module.params.get('name')
+
+ existing = gw_mgr.describe_gateways(module.params['ip_address'])
+
+ results = dict(changed=False)
+ if module.params['state'] == 'present':
+ if existing['CustomerGateways']:
+ existing['CustomerGateway'] = existing['CustomerGateways'][0]
+ results['gateway'] = existing
+ if existing['CustomerGateway']['Tags']:
+ tag_array = existing['CustomerGateway']['Tags']
+ for key, value in enumerate(tag_array):
+ if value['Key'] == 'Name':
+ current_name = value['Value']
+ if current_name != name:
+ results['name'] = gw_mgr.tag_cgw_name(
+ results['gateway']['CustomerGateway']['CustomerGatewayId'],
+ module.params['name'],
+ )
+ results['changed'] = True
+ else:
+ if not module.check_mode:
+ results['gateway'] = gw_mgr.ensure_cgw_present(
+ module.params['bgp_asn'],
+ module.params['ip_address'],
+ )
+ results['name'] = gw_mgr.tag_cgw_name(
+ results['gateway']['CustomerGateway']['CustomerGatewayId'],
+ module.params['name'],
+ )
+ results['changed'] = True
+
+ elif module.params['state'] == 'absent':
+ if existing['CustomerGateways']:
+ existing['CustomerGateway'] = existing['CustomerGateways'][0]
+ results['gateway'] = existing
+ if not module.check_mode:
+ results['gateway'] = gw_mgr.ensure_cgw_absent(
+ existing['CustomerGateway']['CustomerGatewayId']
+ )
+ results['changed'] = True
+
+ pretty_results = camel_dict_to_snake_dict(results)
+ module.exit_json(**pretty_results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway_info.py b/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway_info.py
new file mode 100644
index 000000000..429ba2083
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway_info.py
@@ -0,0 +1,137 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_customer_gateway_info
+version_added: 1.0.0
+short_description: Gather information about customer gateways in AWS
+description:
+ - Gather information about customer gateways in AWS.
+author: Madhura Naniwadekar (@Madhura-CSI)
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeCustomerGateways.html) for possible filters.
+ type: dict
+ default: {}
+ customer_gateway_ids:
+ description:
+ - Get details of a specific customer gateways using customer gateway ID/IDs. This value should be provided as a list.
+ type: list
+ elements: str
+ default: []
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = r'''
+# # Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all customer gateways
+ community.aws.ec2_customer_gateway_info:
+
+- name: Gather information about a filtered list of customer gateways, based on tags
+ community.aws.ec2_customer_gateway_info:
+ region: ap-southeast-2
+ filters:
+ "tag:Name": test-customer-gateway
+ "tag:AltName": test-customer-gateway-alt
+ register: cust_gw_info
+
+- name: Gather information about a specific customer gateway by specifying customer gateway ID
+ community.aws.ec2_customer_gateway_info:
+ region: ap-southeast-2
+ customer_gateway_ids:
+ - 'cgw-48841a09'
+ - 'cgw-fec021ce'
+ register: cust_gw_info
+'''
+
+RETURN = r'''
+customer_gateways:
+ description: List of one or more customer gateways.
+ returned: always
+ type: list
+ sample: [
+ {
+ "bgp_asn": "65000",
+ "customer_gateway_id": "cgw-fec844ce",
+ "customer_gateway_name": "test-customer-gw",
+ "ip_address": "110.112.113.120",
+ "state": "available",
+ "tags": [
+ {
+ "key": "Name",
+ "value": "test-customer-gw"
+ }
+ ],
+ "type": "ipsec.1"
+ }
+ ]
+'''
+
+import json
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
+ boto3_tag_list_to_ansible_dict,
+ camel_dict_to_snake_dict,
+ )
+
+
+def date_handler(obj):
+ return obj.isoformat() if hasattr(obj, 'isoformat') else obj
+
+
+def list_customer_gateways(connection, module):
+ params = dict()
+
+ params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ params['CustomerGatewayIds'] = module.params.get('customer_gateway_ids')
+
+ try:
+ result = json.loads(json.dumps(connection.describe_customer_gateways(**params), default=date_handler))
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Could not describe customer gateways")
+ snaked_customer_gateways = [camel_dict_to_snake_dict(gateway) for gateway in result['CustomerGateways']]
+ if snaked_customer_gateways:
+ for customer_gateway in snaked_customer_gateways:
+ customer_gateway['tags'] = boto3_tag_list_to_ansible_dict(customer_gateway.get('tags', []))
+ customer_gateway_name = customer_gateway['tags'].get('Name')
+ if customer_gateway_name:
+ customer_gateway['customer_gateway_name'] = customer_gateway_name
+ module.exit_json(changed=False, customer_gateways=snaked_customer_gateways)
+
+
+def main():
+
+ argument_spec = dict(
+ customer_gateway_ids=dict(default=[], type='list', elements='str'),
+ filters=dict(default={}, type='dict')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ mutually_exclusive=[['customer_gateway_ids', 'filters']],
+ supports_check_mode=True)
+
+ connection = module.client('ec2')
+
+ list_customer_gateways(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_launch_template.py b/ansible_collections/community/aws/plugins/modules/ec2_launch_template.py
new file mode 100644
index 000000000..67fb0f43b
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ec2_launch_template.py
@@ -0,0 +1,827 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_launch_template
+version_added: 1.0.0
+short_description: Manage EC2 launch templates
+description:
+- Create, modify, and delete EC2 Launch Templates, which can be used to
+ create individual instances or with Autoscaling Groups.
+- The M(amazon.aws.ec2_instance) and M(community.aws.autoscaling_group) modules can, instead of specifying all
+ parameters on those tasks, be passed a Launch Template which contains
+ settings like instance size, disk type, subnet, and more.
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+author:
+- Ryan Scott Brown (@ryansb)
+options:
+ template_id:
+ description:
+ - The ID for the launch template, can be used for all cases except creating a new Launch Template.
+ aliases: [id]
+ type: str
+ template_name:
+ description:
+ - The template name. This must be unique in the region-account combination you are using.
+ - If no launch template exists with the specified name, a new launch template is created.
+ - If a launch template with the specified name already exists and the configuration has not changed,
+ nothing happens.
+ - If a launch template with the specified name already exists and the configuration has changed,
+ a new version of the launch template is created.
+ aliases: [name]
+ type: str
+ default_version:
+ description:
+ - Which version should be the default when users spin up new instances based on this template? By default, the latest version will be made the default.
+ type: str
+ default: latest
+ version_description:
+ version_added: 5.5.0
+ description:
+ - The description of a launch template version.
+ default: ""
+ type: str
+ state:
+ description:
+ - Whether the launch template should exist or not.
+ - Deleting specific versions of a launch template is not supported at this time.
+ choices: [present, absent]
+ default: present
+ type: str
+ block_device_mappings:
+ description:
+ - The block device mapping. Supplying both a snapshot ID and an encryption
+ value as arguments for block-device mapping results in an error. This is
+ because only blank volumes can be encrypted on start, and these are not
+ created from a snapshot. If a snapshot is the basis for the volume, it
+ contains data by definition and its encryption status cannot be changed
+ using this action.
+ type: list
+ elements: dict
+ suboptions:
+ device_name:
+ description: The device name (for example, /dev/sdh or xvdh).
+ type: str
+ no_device:
+ description: Suppresses the specified device included in the block device mapping of the AMI.
+ type: str
+ virtual_name:
+ description: >
+ The virtual device name (ephemeralN). Instance store volumes are
+ numbered starting from 0. An instance type with 2 available instance
+ store volumes can specify mappings for ephemeral0 and ephemeral1. The
+ number of available instance store volumes depends on the instance
+ type. After you connect to the instance, you must mount the volume.
+ type: str
+ ebs:
+ description: Parameters used to automatically set up EBS volumes when the instance is launched.
+ type: dict
+ suboptions:
+ delete_on_termination:
+ description: Indicates whether the EBS volume is deleted on instance termination.
+ type: bool
+ encrypted:
+ description: >
+ Indicates whether the EBS volume is encrypted. Encrypted volumes
+ can only be attached to instances that support Amazon EBS
+ encryption. If you are creating a volume from a snapshot, you
+ can't specify an encryption value.
+ type: bool
+ iops:
+ description:
+ - The number of I/O operations per second (IOPS) that the volume
+ supports. For io1, this represents the number of IOPS that are
+ provisioned for the volume. For gp2, this represents the baseline
+ performance of the volume and the rate at which the volume
+ accumulates I/O credits for bursting. For more information about
+ General Purpose SSD baseline performance, I/O credits, and
+ bursting, see Amazon EBS Volume Types in the Amazon Elastic
+ Compute Cloud User Guide.
+ - >
+ Condition: This parameter is required for requests to create io1
+ volumes; it is not used in requests to create gp2, st1, sc1, or
+ standard volumes.
+ type: int
+ kms_key_id:
+ description: The ARN of the AWS Key Management Service (AWS KMS) CMK used for encryption.
+ type: str
+ snapshot_id:
+ description: The ID of the snapshot to create the volume from.
+ type: str
+ volume_size:
+ description:
+ - The size of the volume, in GiB.
+ - "Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size."
+ type: int
+ volume_type:
+ description: The volume type
+ type: str
+ cpu_options:
+ description:
+ - Choose CPU settings for the EC2 instances that will be created with this template.
+ - For more information, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html)
+ type: dict
+ suboptions:
+ core_count:
+ description: The number of CPU cores for the instance.
+ type: int
+ threads_per_core:
+ description: >
+ The number of threads per CPU core. To disable Intel Hyper-Threading
+ Technology for the instance, specify a value of 1. Otherwise, specify
+ the default value of 2.
+ type: int
+ credit_specification:
+ description: The credit option for CPU usage of the instance. Valid for T2 or T3 instances only.
+ type: dict
+ suboptions:
+ cpu_credits:
+ description: >
+ The credit option for CPU usage of a T2 or T3 instance. Valid values
+ are C(standard) and C(unlimited).
+ type: str
+ disable_api_termination:
+ description: >
+ This helps protect instances from accidental termination. If set to true,
+ you can't terminate the instance using the Amazon EC2 console, CLI, or
+ API. To change this attribute to false after launch, use
+ I(ModifyInstanceAttribute).
+ type: bool
+ ebs_optimized:
+ description: >
+ Indicates whether the instance is optimized for Amazon EBS I/O. This
+ optimization provides dedicated throughput to Amazon EBS and an optimized
+ configuration stack to provide optimal Amazon EBS I/O performance. This
+ optimization isn't available with all instance types. Additional usage
+ charges apply when using an EBS-optimized instance.
+ type: bool
+ elastic_gpu_specifications:
+ type: list
+ elements: dict
+ description: Settings for Elastic GPU attachments. See U(https://aws.amazon.com/ec2/elastic-gpus/) for details.
+ suboptions:
+ type:
+ description: The type of Elastic GPU to attach
+ type: str
+ iam_instance_profile:
+ description: >
+ The name or ARN of an IAM instance profile. Requires permissions to
+ describe existing instance roles to confirm ARN is properly formed.
+ type: str
+ image_id:
+ description: >
+ The AMI ID to use for new instances launched with this template. This
+ value is region-dependent since AMIs are not global resources.
+ type: str
+ instance_initiated_shutdown_behavior:
+ description: >
+ Indicates whether an instance stops or terminates when you initiate
+ shutdown from the instance using the operating system shutdown command.
+ choices: [stop, terminate]
+ type: str
+ instance_market_options:
+ description: Options for alternative instance markets, currently only the spot market is supported.
+ type: dict
+ suboptions:
+ market_type:
+ description: The market type. This should always be 'spot'.
+ type: str
+ spot_options:
+ description: Spot-market specific settings.
+ type: dict
+ suboptions:
+ block_duration_minutes:
+ description: >
+ The required duration for the Spot Instances (also known as Spot
+ blocks), in minutes. This value must be a multiple of 60 (60,
+ 120, 180, 240, 300, or 360).
+ type: int
+ instance_interruption_behavior:
+ description: The behavior when a Spot Instance is interrupted. The default is C(terminate).
+ choices: [hibernate, stop, terminate]
+ type: str
+ max_price:
+ description: The highest hourly price you're willing to pay for this Spot Instance.
+ type: str
+ spot_instance_type:
+ description: The request type to send.
+ choices: [one-time, persistent]
+ type: str
+ instance_type:
+ description: >
+ The instance type, such as C(c5.2xlarge). For a full list of instance types, see
+ U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html).
+ type: str
+ kernel_id:
+ description: >
+ The ID of the kernel. We recommend that you use PV-GRUB instead of
+ kernels and RAM disks. For more information, see
+ U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html)
+ type: str
+ key_name:
+ description:
+ - The name of the key pair. You can create a key pair using M(amazon.aws.ec2_key).
+ - If you do not specify a key pair, you can't connect to the instance
+ unless you choose an AMI that is configured to allow users another way to
+ log in.
+ type: str
+ monitoring:
+ description: Settings for instance monitoring.
+ type: dict
+ suboptions:
+ enabled:
+ type: bool
+ description: Whether to turn on detailed monitoring for new instances. This will incur extra charges.
+ network_interfaces:
+ description: One or more network interfaces.
+ type: list
+ elements: dict
+ suboptions:
+ associate_public_ip_address:
+ description: Associates a public IPv4 address with eth0 for a new network interface.
+ type: bool
+ delete_on_termination:
+ description: Indicates whether the network interface is deleted when the instance is terminated.
+ type: bool
+ description:
+ description: A description for the network interface.
+ type: str
+ device_index:
+ description: The device index for the network interface attachment.
+ type: int
+ groups:
+ description: List of security group IDs to include on this instance.
+ type: list
+ elements: str
+ ipv6_address_count:
+ description: >
+ The number of IPv6 addresses to assign to a network interface. Amazon
+ EC2 automatically selects the IPv6 addresses from the subnet range.
+ You can't use this option if specifying the I(ipv6_addresses) option.
+ type: int
+ ipv6_addresses:
+ description: >
+ A list of one or more specific IPv6 addresses from the IPv6 CIDR
+ block range of your subnet. You can't use this option if you're
+ specifying the I(ipv6_address_count) option.
+ type: list
+ elements: str
+ network_interface_id:
+ description: The eni ID of a network interface to attach.
+ type: str
+ private_ip_address:
+ description: The primary private IPv4 address of the network interface.
+ type: str
+ subnet_id:
+ description: The ID of the subnet for the network interface.
+ type: str
+ placement:
+ description: The placement group settings for the instance.
+ type: dict
+ suboptions:
+ affinity:
+ description: The affinity setting for an instance on a Dedicated Host.
+ type: str
+ availability_zone:
+ description: The Availability Zone for the instance.
+ type: str
+ group_name:
+ description: The name of the placement group for the instance.
+ type: str
+ host_id:
+ description: The ID of the Dedicated Host for the instance.
+ type: str
+ tenancy:
+ description: >
+ The tenancy of the instance (if the instance is running in a VPC). An
+ instance with a tenancy of dedicated runs on single-tenant hardware.
+ type: str
+ ram_disk_id:
+ description: >
+ The ID of the RAM disk to launch the instance with. We recommend that you
+ use PV-GRUB instead of kernels and RAM disks. For more information, see
+ U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html)
+ type: str
+ security_group_ids:
+ description: A list of security group IDs (VPC or EC2-Classic) that the new instances will be added to.
+ type: list
+ elements: str
+ security_groups:
+ description: >
+ A list of security group names (Default VPC or EC2-Classic) that the new instances will be added to.
+ For any VPC other than Default, you must use I(security_group_ids).
+ type: list
+ elements: str
+ source_version:
+ description: >
+ The version number of the launch template version on which to base the new version.
+ The new version inherits the same launch parameters as the source version, except for parameters that you explicity specify.
+ Snapshots applied to the block device mapping are ignored when creating a new version unless they are explicitly included.
+ type: str
+ default: latest
+ version_added: 4.1.0
+ tags:
+ type: dict
+ description:
+ - A set of key-value pairs to be applied to resources when this Launch Template is used.
+ - "Tag key constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with I(aws:)"
+ - "Tag value constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters."
+ aliases: ['resource_tags']
+ user_data:
+ description: >
+ The Base64-encoded user data to make available to the instance. For more information, see the Linux
+ U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) and Windows
+ U(http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data)
+ documentation on user-data.
+ type: str
+ metadata_options:
+ description:
+ - Configure EC2 Metadata options.
+ - For more information see the IMDS documentation
+ U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html).
+ type: dict
+ version_added: 1.5.0
+ suboptions:
+ http_endpoint:
+ type: str
+ description: >
+ This parameter enables or disables the HTTP metadata endpoint on your instances.
+ choices: [enabled, disabled]
+ default: 'enabled'
+ http_put_response_hop_limit:
+ type: int
+ description: >
+ The desired HTTP PUT response hop limit for instance metadata requests.
+ The larger the number, the further instance metadata requests can travel.
+ default: 1
+ http_tokens:
+ type: str
+ description: >
+ The state of token usage for your instance metadata requests.
+ choices: [optional, required]
+ default: 'optional'
+ http_protocol_ipv6:
+ version_added: 3.1.0
+ type: str
+ description: >
+ - Wether the instance metadata endpoint is available via IPv6 (C(enabled)) or not (C(disabled)).
+ - Requires botocore >= 1.21.29
+ choices: [enabled, disabled]
+ default: 'disabled'
+ instance_metadata_tags:
+ version_added: 3.1.0
+ type: str
+ description:
+ - Wether the instance tags are availble (C(enabled)) via metadata endpoint or not (C(disabled)).
+ - Requires botocore >= 1.23.30
+ choices: [enabled, disabled]
+ default: 'disabled'
+'''
+
+EXAMPLES = '''
+- name: Create an ec2 launch template
+ community.aws.ec2_launch_template:
+ name: "my_template"
+ image_id: "ami-04b762b4289fba92b"
+ key_name: my_ssh_key
+ instance_type: t2.micro
+ iam_instance_profile: myTestProfile
+ disable_api_termination: true
+
+- name: >
+ Create a new version of an existing ec2 launch template with a different instance type,
+ while leaving an older version as the default version
+ community.aws.ec2_launch_template:
+ name: "my_template"
+ default_version: 1
+ instance_type: c5.4xlarge
+
+- name: Delete an ec2 launch template
+ community.aws.ec2_launch_template:
+ name: "my_template"
+ state: absent
+
+# This module does not yet allow deletion of specific versions of launch templates
+'''
+
+RETURN = '''
+latest_version:
+ description: Latest available version of the launch template
+ returned: when state=present
+ type: int
+default_version:
+ description: The version that will be used if only the template name is specified. Often this is the same as the latest version, but not always.
+ returned: when state=present
+ type: int
+'''
+import re
+from uuid import uuid4
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError, WaiterError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def determine_iam_role(module, name_or_arn):
+ if re.match(r'^arn:aws:iam::\d+:instance-profile/[\w+=/,.@-]+$', name_or_arn):
+ return {'arn': name_or_arn}
+ iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+ try:
+ role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True)
+ return {'arn': role['InstanceProfile']['Arn']}
+ except is_boto3_error_code('NoSuchEntity') as e:
+ module.fail_json_aws(e, msg="Could not find instance_role {0}".format(name_or_arn))
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format(name_or_arn))
+
+
+def existing_templates(module):
+ ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+ matches = None
+ try:
+ if module.params.get('template_id'):
+ matches = ec2.describe_launch_templates(LaunchTemplateIds=[module.params.get('template_id')], aws_retry=True)
+ elif module.params.get('template_name'):
+ matches = ec2.describe_launch_templates(LaunchTemplateNames=[module.params.get('template_name')], aws_retry=True)
+ except is_boto3_error_code('InvalidLaunchTemplateName.NotFoundException') as e:
+ # no named template was found, return nothing/empty versions
+ return None, []
+ except is_boto3_error_code('InvalidLaunchTemplateId.Malformed') as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Launch template with ID {0} is not a valid ID. It should start with `lt-....`'.format(
+ module.params.get('launch_template_id')))
+ except is_boto3_error_code('InvalidLaunchTemplateId.NotFoundException') as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(
+ e, msg='Launch template with ID {0} could not be found, please supply a name '
+ 'instead so that a new template can be created'.format(module.params.get('launch_template_id')))
+ except (ClientError, BotoCoreError, WaiterError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Could not check existing launch templates. This may be an IAM permission problem.')
+ else:
+ template = matches['LaunchTemplates'][0]
+ template_id, template_version, template_default = template['LaunchTemplateId'], template['LatestVersionNumber'], template['DefaultVersionNumber']
+ try:
+ return template, ec2.describe_launch_template_versions(LaunchTemplateId=template_id, aws_retry=True)['LaunchTemplateVersions']
+ except (ClientError, BotoCoreError, WaiterError) as e:
+ module.fail_json_aws(e, msg='Could not find launch template versions for {0} (ID: {1}).'.format(template['LaunchTemplateName'], template_id))
+
+
+def params_to_launch_data(module, template_params):
+ if template_params.get('tags'):
+ tag_list = ansible_dict_to_boto3_tag_list(template_params.get('tags'))
+ template_params['tag_specifications'] = [
+ {
+ 'resource_type': r_type,
+ 'tags': tag_list
+ }
+ for r_type in ('instance', 'volume')
+ ]
+ del template_params['tags']
+ if module.params.get('iam_instance_profile'):
+ template_params['iam_instance_profile'] = determine_iam_role(module, module.params['iam_instance_profile'])
+ params = snake_dict_to_camel_dict(
+ dict((k, v) for k, v in template_params.items() if v is not None),
+ capitalize_first=True,
+ )
+ return params
+
+
+def delete_template(module):
+ ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+ template, template_versions = existing_templates(module)
+ deleted_versions = []
+ if template or template_versions:
+ non_default_versions = [to_text(t['VersionNumber']) for t in template_versions if not t['DefaultVersion']]
+ if non_default_versions:
+ try:
+ v_resp = ec2.delete_launch_template_versions(
+ LaunchTemplateId=template['LaunchTemplateId'],
+ Versions=non_default_versions,
+ aws_retry=True,
+ )
+ if v_resp['UnsuccessfullyDeletedLaunchTemplateVersions']:
+ module.warn('Failed to delete template versions {0} on launch template {1}'.format(
+ v_resp['UnsuccessfullyDeletedLaunchTemplateVersions'],
+ template['LaunchTemplateId'],
+ ))
+ deleted_versions = [camel_dict_to_snake_dict(v) for v in v_resp['SuccessfullyDeletedLaunchTemplateVersions']]
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Could not delete existing versions of the launch template {0}".format(template['LaunchTemplateId']))
+ try:
+ resp = ec2.delete_launch_template(
+ LaunchTemplateId=template['LaunchTemplateId'],
+ aws_retry=True,
+ )
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Could not delete launch template {0}".format(template['LaunchTemplateId']))
+ return {
+ 'deleted_versions': deleted_versions,
+ 'deleted_template': camel_dict_to_snake_dict(resp['LaunchTemplate']),
+ 'changed': True,
+ }
+ else:
+ return {'changed': False}
+
+
+def create_or_update(module, template_options):
+ ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidLaunchTemplateId.NotFound']))
+ template, template_versions = existing_templates(module)
+ out = {}
+ lt_data = params_to_launch_data(module, dict((k, v) for k, v in module.params.items() if k in template_options))
+ lt_data = scrub_none_parameters(lt_data, descend_into_lists=True)
+
+ if lt_data.get('MetadataOptions'):
+ if not module.botocore_at_least('1.23.30'):
+ # fail only if enabled is requested
+ if lt_data['MetadataOptions'].get('InstanceMetadataTags') == 'enabled':
+ module.require_botocore_at_least('1.23.30', reason='to set instance_metadata_tags')
+ # pop if it's not requested to keep backwards compatibility.
+ # otherwise the modules failes because parameters are set due default values
+ lt_data['MetadataOptions'].pop('InstanceMetadataTags')
+
+ if not module.botocore_at_least('1.21.29'):
+ # fail only if enabled is requested
+ if lt_data['MetadataOptions'].get('HttpProtocolIpv6') == 'enabled':
+ module.require_botocore_at_least('1.21.29', reason='to set http_protocol_ipv6')
+ # pop if it's not requested to keep backwards compatibility.
+ # otherwise the modules failes because parameters are set due default values
+ lt_data['MetadataOptions'].pop('HttpProtocolIpv6')
+
+ if not (template or template_versions):
+ # create a full new one
+ try:
+ resp = ec2.create_launch_template(
+ LaunchTemplateName=module.params['template_name'],
+ LaunchTemplateData=lt_data,
+ ClientToken=uuid4().hex,
+ aws_retry=True,
+ )
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create launch template")
+ template, template_versions = existing_templates(module)
+ out['changed'] = True
+ elif template and template_versions:
+ most_recent = sorted(template_versions, key=lambda x: x["VersionNumber"])[-1]
+ if lt_data == most_recent["LaunchTemplateData"] and module.params["version_description"] == most_recent.get(
+ "VersionDescription", ""
+ ):
+ out['changed'] = False
+ return out
+ try:
+ if module.params.get('source_version') in (None, ''):
+ resp = ec2.create_launch_template_version(
+ LaunchTemplateId=template['LaunchTemplateId'],
+ LaunchTemplateData=lt_data,
+ ClientToken=uuid4().hex,
+ VersionDescription=str(module.params["version_description"]),
+ aws_retry=True,
+ )
+ elif module.params.get('source_version') == 'latest':
+ resp = ec2.create_launch_template_version(
+ LaunchTemplateId=template['LaunchTemplateId'],
+ LaunchTemplateData=lt_data,
+ ClientToken=uuid4().hex,
+ SourceVersion=str(most_recent["VersionNumber"]),
+ VersionDescription=str(module.params["version_description"]),
+ aws_retry=True,
+ )
+ else:
+ try:
+ int(module.params.get('source_version'))
+ except ValueError:
+ module.fail_json(msg='source_version param was not a valid integer, got "{0}"'.format(module.params.get('source_version')))
+ # get source template version
+ source_version = next((v for v in template_versions if v['VersionNumber'] == int(module.params.get('source_version'))), None)
+ if source_version is None:
+ module.fail_json(msg='source_version does not exist, got "{0}"'.format(module.params.get('source_version')))
+ resp = ec2.create_launch_template_version(
+ LaunchTemplateId=template['LaunchTemplateId'],
+ LaunchTemplateData=lt_data,
+ ClientToken=uuid4().hex,
+ SourceVersion=str(source_version["VersionNumber"]),
+ VersionDescription=str(module.params["version_description"]),
+ aws_retry=True,
+ )
+
+ if module.params.get('default_version') in (None, ''):
+ # no need to do anything, leave the existing version as default
+ pass
+ elif module.params.get('default_version') == 'latest':
+ set_default = ec2.modify_launch_template(
+ LaunchTemplateId=template['LaunchTemplateId'],
+ DefaultVersion=to_text(resp['LaunchTemplateVersion']['VersionNumber']),
+ ClientToken=uuid4().hex,
+ aws_retry=True,
+ )
+ else:
+ try:
+ int(module.params.get('default_version'))
+ except ValueError:
+ module.fail_json(msg='default_version param was not a valid integer, got "{0}"'.format(module.params.get('default_version')))
+ set_default = ec2.modify_launch_template(
+ LaunchTemplateId=template['LaunchTemplateId'],
+ DefaultVersion=to_text(int(module.params.get('default_version'))),
+ ClientToken=uuid4().hex,
+ aws_retry=True,
+ )
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create subsequent launch template version")
+ template, template_versions = existing_templates(module)
+ out['changed'] = True
+ return out
+
+
+def format_module_output(module):
+ output = {}
+ template, template_versions = existing_templates(module)
+ template = camel_dict_to_snake_dict(template)
+ template_versions = [camel_dict_to_snake_dict(v) for v in template_versions]
+ for v in template_versions:
+ for ts in (v['launch_template_data'].get('tag_specifications') or []):
+ ts['tags'] = boto3_tag_list_to_ansible_dict(ts.pop('tags'))
+ output.update(dict(template=template, versions=template_versions))
+ output['default_template'] = [
+ v for v in template_versions
+ if v.get('default_version')
+ ][0]
+ output['latest_template'] = [
+ v for v in template_versions
+ if (
+ v.get('version_number') and
+ int(v['version_number']) == int(template['latest_version_number'])
+ )
+ ][0]
+ if "version_number" in output['default_template']:
+ output['default_version'] = output['default_template']['version_number']
+ if "version_number" in output['latest_template']:
+ output['latest_version'] = output['latest_template']['version_number']
+ return output
+
+
+def main():
+ template_options = dict(
+ block_device_mappings=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ device_name=dict(),
+ ebs=dict(
+ type='dict',
+ options=dict(
+ delete_on_termination=dict(type='bool'),
+ encrypted=dict(type='bool'),
+ iops=dict(type='int'),
+ kms_key_id=dict(),
+ snapshot_id=dict(),
+ volume_size=dict(type='int'),
+ volume_type=dict(),
+ ),
+ ),
+ no_device=dict(),
+ virtual_name=dict(),
+ ),
+ ),
+ cpu_options=dict(
+ type='dict',
+ options=dict(
+ core_count=dict(type='int'),
+ threads_per_core=dict(type='int'),
+ ),
+ ),
+ credit_specification=dict(
+ dict(type='dict'),
+ options=dict(
+ cpu_credits=dict(),
+ ),
+ ),
+ disable_api_termination=dict(type='bool'),
+ ebs_optimized=dict(type='bool'),
+ elastic_gpu_specifications=dict(
+ options=dict(type=dict()),
+ type='list',
+ elements='dict',
+ ),
+ iam_instance_profile=dict(),
+ image_id=dict(),
+ instance_initiated_shutdown_behavior=dict(choices=['stop', 'terminate']),
+ instance_market_options=dict(
+ type='dict',
+ options=dict(
+ market_type=dict(),
+ spot_options=dict(
+ type='dict',
+ options=dict(
+ block_duration_minutes=dict(type='int'),
+ instance_interruption_behavior=dict(choices=['hibernate', 'stop', 'terminate']),
+ max_price=dict(),
+ spot_instance_type=dict(choices=['one-time', 'persistent']),
+ ),
+ ),
+ ),
+ ),
+ instance_type=dict(),
+ kernel_id=dict(),
+ key_name=dict(),
+ monitoring=dict(
+ type='dict',
+ options=dict(
+ enabled=dict(type='bool')
+ ),
+ ),
+ metadata_options=dict(
+ type='dict',
+ options=dict(
+ http_endpoint=dict(choices=['enabled', 'disabled'], default='enabled'),
+ http_put_response_hop_limit=dict(type='int', default=1),
+ http_tokens=dict(choices=['optional', 'required'], default='optional'),
+ http_protocol_ipv6=dict(choices=['disabled', 'enabled'], default='disabled'),
+ instance_metadata_tags=dict(choices=['disabled', 'enabled'], default='disabled'),
+ )
+ ),
+ network_interfaces=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ associate_public_ip_address=dict(type='bool'),
+ delete_on_termination=dict(type='bool'),
+ description=dict(),
+ device_index=dict(type='int'),
+ groups=dict(type='list', elements='str'),
+ ipv6_address_count=dict(type='int'),
+ ipv6_addresses=dict(type='list', elements='str'),
+ network_interface_id=dict(),
+ private_ip_address=dict(),
+ subnet_id=dict(),
+ ),
+ ),
+ placement=dict(
+ options=dict(
+ affinity=dict(),
+ availability_zone=dict(),
+ group_name=dict(),
+ host_id=dict(),
+ tenancy=dict(),
+ ),
+ type='dict',
+ ),
+ ram_disk_id=dict(),
+ security_group_ids=dict(type='list', elements='str'),
+ security_groups=dict(type='list', elements='str'),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ user_data=dict(),
+ )
+
+ arg_spec = dict(
+ state=dict(choices=["present", "absent"], default="present"),
+ template_name=dict(aliases=["name"]),
+ template_id=dict(aliases=["id"]),
+ default_version=dict(default="latest"),
+ source_version=dict(default="latest"),
+ version_description=dict(default=""),
+ )
+
+ arg_spec.update(template_options)
+
+ module = AnsibleAWSModule(
+ argument_spec=arg_spec,
+ required_one_of=[
+ ('template_name', 'template_id')
+ ],
+ supports_check_mode=True
+ )
+
+ for interface in (module.params.get('network_interfaces') or []):
+ if interface.get('ipv6_addresses'):
+ interface['ipv6_addresses'] = [{'ipv6_address': x} for x in interface['ipv6_addresses']]
+
+ if module.params.get('state') == 'present':
+ out = create_or_update(module, template_options)
+ out.update(format_module_output(module))
+ elif module.params.get('state') == 'absent':
+ out = delete_template(module)
+ else:
+ module.fail_json(msg='Unsupported value "{0}" for `state` parameter'.format(module.params.get('state')))
+
+ module.exit_json(**out)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_placement_group.py b/ansible_collections/community/aws/plugins/modules/ec2_placement_group.py
new file mode 100644
index 000000000..c27917df9
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ec2_placement_group.py
@@ -0,0 +1,251 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_placement_group
+version_added: 1.0.0
+short_description: Create or delete an EC2 Placement Group
+description:
+ - Create an EC2 Placement Group; if the placement group already exists,
+ nothing is done. Or, delete an existing placement group. If the placement
+ group is absent, do nothing. See also
+ U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html)
+author: "Brad Macpherson (@iiibrad)"
+options:
+ name:
+ description:
+ - The name for the placement group.
+ required: true
+ type: str
+ partition_count:
+ description:
+ - The number of partitions.
+ - Valid only when I(Strategy) is set to C(partition).
+ - Must be a value between C(1) and C(7).
+ type: int
+ version_added: 3.1.0
+ state:
+ description:
+ - Create or delete placement group.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ strategy:
+ description:
+ - Placement group strategy. Cluster will cluster instances into a
+ low-latency group in a single Availability Zone, while Spread spreads
+ instances across underlying hardware.
+ default: cluster
+ choices: [ 'cluster', 'spread', 'partition' ]
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide
+# for details.
+
+- name: Create a placement group.
+ community.aws.ec2_placement_group:
+ name: my-cluster
+ state: present
+
+- name: Create a Spread placement group.
+ community.aws.ec2_placement_group:
+ name: my-cluster
+ state: present
+ strategy: spread
+
+- name: Create a Partition strategy placement group.
+ community.aws.ec2_placement_group:
+ name: my-cluster
+ state: present
+ strategy: partition
+ partition_count: 3
+
+- name: Delete a placement group.
+ community.aws.ec2_placement_group:
+ name: my-cluster
+ state: absent
+
+'''
+
+
+RETURN = '''
+placement_group:
+ description: Placement group attributes
+ returned: when state != absent
+ type: complex
+ contains:
+ name:
+ description: PG name
+ type: str
+ sample: my-cluster
+ state:
+ description: PG state
+ type: str
+ sample: "available"
+ strategy:
+ description: PG strategy
+ type: str
+ sample: "cluster"
+
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+@AWSRetry.exponential_backoff()
+def search_placement_group(connection, module):
+ """
+ Check if a placement group exists.
+ """
+ name = module.params.get("name")
+ try:
+ response = connection.describe_placement_groups(
+ Filters=[{
+ "Name": "group-name",
+ "Values": [name]
+ }])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(
+ e,
+ msg="Couldn't find placement group named [%s]" % name)
+
+ if len(response['PlacementGroups']) != 1:
+ return None
+ else:
+ placement_group = response['PlacementGroups'][0]
+ return {
+ "name": placement_group['GroupName'],
+ "state": placement_group['State'],
+ "strategy": placement_group['Strategy'],
+ }
+
+
+@AWSRetry.exponential_backoff(catch_extra_error_codes=['InvalidPlacementGroup.Unknown'])
+def get_placement_group_information(connection, name):
+ """
+ Retrieve information about a placement group.
+ """
+ response = connection.describe_placement_groups(
+ GroupNames=[name]
+ )
+ placement_group = response['PlacementGroups'][0]
+ return {
+ "name": placement_group['GroupName'],
+ "state": placement_group['State'],
+ "strategy": placement_group['Strategy'],
+ }
+
+
+@AWSRetry.exponential_backoff()
+def create_placement_group(connection, module):
+ name = module.params.get("name")
+ strategy = module.params.get("strategy")
+ partition_count = module.params.get("partition_count")
+
+ if strategy != 'partition' and partition_count:
+ module.fail_json(
+ msg="'partition_count' can only be set when strategy is set to 'partition'.")
+
+ params = {}
+ params['GroupName'] = name
+ params['Strategy'] = strategy
+ if partition_count:
+ params['PartitionCount'] = partition_count
+ params['DryRun'] = module.check_mode
+
+ try:
+ connection.create_placement_group(**params)
+ except is_boto3_error_code('DryRunOperation'):
+ module.exit_json(changed=True, placement_group={
+ "name": name,
+ "state": 'DryRun',
+ "strategy": strategy,
+ })
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(
+ e,
+ msg="Couldn't create placement group [%s]" % name)
+
+ module.exit_json(changed=True,
+ placement_group=get_placement_group_information(connection, name))
+
+
+@AWSRetry.exponential_backoff()
+def delete_placement_group(connection, module):
+ name = module.params.get("name")
+
+ try:
+ connection.delete_placement_group(
+ GroupName=name, DryRun=module.check_mode)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(
+ e,
+ msg="Couldn't delete placement group [%s]" % name)
+
+ module.exit_json(changed=True)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True, type='str'),
+ partition_count=dict(type='int'),
+ state=dict(default='present', choices=['present', 'absent']),
+ strategy=dict(default='cluster', choices=['cluster', 'spread', 'partition'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ connection = module.client('ec2')
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ placement_group = search_placement_group(connection, module)
+ if placement_group is None:
+ create_placement_group(connection, module)
+ else:
+ strategy = module.params.get("strategy")
+ if placement_group['strategy'] == strategy:
+ module.exit_json(
+ changed=False, placement_group=placement_group)
+ else:
+ name = module.params.get("name")
+ module.fail_json(
+ msg=("Placement group '{}' exists, can't change strategy" +
+ " from '{}' to '{}'").format(
+ name,
+ placement_group['strategy'],
+ strategy))
+
+ elif state == 'absent':
+ placement_group = search_placement_group(connection, module)
+ if placement_group is None:
+ module.exit_json(changed=False)
+ else:
+ delete_placement_group(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_placement_group_info.py b/ansible_collections/community/aws/plugins/modules/ec2_placement_group_info.py
new file mode 100644
index 000000000..d22f133ae
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ec2_placement_group_info.py
@@ -0,0 +1,126 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_placement_group_info
+version_added: 1.0.0
+short_description: List EC2 Placement Group(s) details
+description:
+ - List details of EC2 Placement Group(s).
+author: "Brad Macpherson (@iiibrad)"
+options:
+ names:
+ description:
+ - A list of names to filter on. If a listed group does not exist, there
+ will be no corresponding entry in the result; no error will be raised.
+ type: list
+ elements: str
+ required: false
+ default: []
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details or the AWS region,
+# see the AWS Guide for details.
+
+- name: List all placement groups.
+ community.aws.ec2_placement_group_info:
+ register: all_ec2_placement_groups
+
+- name: List two placement groups.
+ community.aws.ec2_placement_group_info:
+ names:
+ - my-cluster
+ - my-other-cluster
+ register: specific_ec2_placement_groups
+
+- ansible.builtin.debug:
+ msg: >
+ {{ specific_ec2_placement_groups | json_query("[?name=='my-cluster']") }}
+
+'''
+
+
+RETURN = r'''
+placement_groups:
+ description: Placement group attributes
+ returned: always
+ type: complex
+ contains:
+ name:
+ description: PG name
+ type: str
+ sample: my-cluster
+ state:
+ description: PG state
+ type: str
+ sample: "available"
+ strategy:
+ description: PG strategy
+ type: str
+ sample: "cluster"
+
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+try:
+ from botocore.exceptions import (BotoCoreError, ClientError)
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def get_placement_groups_details(connection, module):
+ names = module.params.get("names")
+ try:
+ if len(names) > 0:
+ response = connection.describe_placement_groups(
+ Filters=[{
+ "Name": "group-name",
+ "Values": names
+ }])
+ else:
+ response = connection.describe_placement_groups()
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(
+ e,
+ msg="Couldn't find placement groups named [%s]" % names)
+
+ results = []
+ for placement_group in response['PlacementGroups']:
+ results.append({
+ "name": placement_group['GroupName'],
+ "state": placement_group['State'],
+ "strategy": placement_group['Strategy'],
+ })
+ return results
+
+
+def main():
+ argument_spec = dict(
+ names=dict(type='list', default=[], elements='str')
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ connection = module.client('ec2')
+
+ placement_groups = get_placement_groups_details(connection, module)
+ module.exit_json(changed=False, placement_groups=placement_groups)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_snapshot_copy.py b/ansible_collections/community/aws/plugins/modules/ec2_snapshot_copy.py
new file mode 100644
index 000000000..f45be4417
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ec2_snapshot_copy.py
@@ -0,0 +1,186 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_snapshot_copy
+version_added: 1.0.0
+short_description: Copies an EC2 snapshot and returns the new Snapshot ID
+description:
+ - Copies an EC2 Snapshot from a source region to a destination region.
+options:
+ source_region:
+ description:
+ - The source region the Snapshot should be copied from.
+ required: true
+ type: str
+ source_snapshot_id:
+ description:
+ - The ID of the Snapshot in source region that should be copied.
+ required: true
+ type: str
+ description:
+ description:
+ - An optional human-readable string describing purpose of the new Snapshot.
+ type: str
+ default: ''
+ encrypted:
+ description:
+ - Whether or not the destination Snapshot should be encrypted.
+ type: bool
+ default: false
+ kms_key_id:
+ description:
+ - KMS key id used to encrypt snapshot. If not specified, AWS defaults to C(alias/aws/ebs).
+ type: str
+ wait:
+ description:
+ - Wait for the copied Snapshot to be in the C(Available) state before returning.
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - How long before wait gives up, in seconds.
+ default: 600
+ type: int
+ tags:
+ description:
+ - A dictionary representing the tags to be applied to the newly created resource.
+ type: dict
+ aliases: ['resource_tags']
+author:
+ - Deepak Kothandan (@Deepakkothandan) <deepak.kdy@gmail.com>
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+- name: Basic Snapshot Copy
+ community.aws.ec2_snapshot_copy:
+ source_region: eu-central-1
+ region: eu-west-1
+ source_snapshot_id: snap-xxxxxxx
+
+- name: Copy Snapshot and wait until available
+ community.aws.ec2_snapshot_copy:
+ source_region: eu-central-1
+ region: eu-west-1
+ source_snapshot_id: snap-xxxxxxx
+ wait: true
+ wait_timeout: 1200 # Default timeout is 600
+ register: snapshot_id
+
+- name: Tagged Snapshot copy
+ community.aws.ec2_snapshot_copy:
+ source_region: eu-central-1
+ region: eu-west-1
+ source_snapshot_id: snap-xxxxxxx
+ tags:
+ Name: Snapshot-Name
+
+- name: Encrypted Snapshot copy
+ community.aws.ec2_snapshot_copy:
+ source_region: eu-central-1
+ region: eu-west-1
+ source_snapshot_id: snap-xxxxxxx
+ encrypted: true
+
+- name: Encrypted Snapshot copy with specified key
+ community.aws.ec2_snapshot_copy:
+ source_region: eu-central-1
+ region: eu-west-1
+ source_snapshot_id: snap-xxxxxxx
+ encrypted: true
+ kms_key_id: arn:aws:kms:eu-central-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b
+'''
+
+RETURN = '''
+snapshot_id:
+ description: snapshot id of the newly created snapshot
+ returned: when snapshot copy is successful
+ type: str
+ sample: "snap-e9095e8c"
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications
+
+
+def copy_snapshot(module, ec2):
+ """
+ Copies an EC2 Snapshot to another region
+
+ module : AnsibleAWSModule object
+ ec2: ec2 connection object
+ """
+
+ params = {
+ 'SourceRegion': module.params.get('source_region'),
+ 'SourceSnapshotId': module.params.get('source_snapshot_id'),
+ 'Description': module.params.get('description')
+ }
+
+ if module.params.get('encrypted'):
+ params['Encrypted'] = True
+
+ if module.params.get('kms_key_id'):
+ params['KmsKeyId'] = module.params.get('kms_key_id')
+
+ if module.params.get('tags'):
+ params['TagSpecifications'] = boto3_tag_specifications(module.params.get('tags'), types=['snapshot'])
+
+ try:
+ snapshot_id = ec2.copy_snapshot(**params)['SnapshotId']
+ if module.params.get('wait'):
+ delay = 15
+ # Add one to max_attempts as wait() increment
+ # its counter before assessing it for time.sleep()
+ max_attempts = (module.params.get('wait_timeout') // delay) + 1
+ ec2.get_waiter('snapshot_completed').wait(
+ SnapshotIds=[snapshot_id],
+ WaiterConfig=dict(Delay=delay, MaxAttempts=max_attempts)
+ )
+
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='An error occurred waiting for the snapshot to become available.')
+
+ module.exit_json(changed=True, snapshot_id=snapshot_id)
+
+
+def main():
+ argument_spec = dict(
+ source_region=dict(required=True),
+ source_snapshot_id=dict(required=True),
+ description=dict(default=''),
+ encrypted=dict(type='bool', default=False, required=False),
+ kms_key_id=dict(type='str', required=False),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=600),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ try:
+ client = module.client('ec2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ copy_snapshot(module, client)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway.py b/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway.py
new file mode 100644
index 000000000..298646cf8
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway.py
@@ -0,0 +1,512 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: ec2_transit_gateway
+short_description: Create and delete AWS Transit Gateways
+version_added: 1.0.0
+description:
+ - Creates AWS Transit Gateways.
+ - Deletes AWS Transit Gateways.
+ - Updates tags on existing transit gateways.
+options:
+ asn:
+ description:
+ - A private Autonomous System Number (ASN) for the Amazon side of a BGP session.
+ - The range is 64512 to 65534 for 16-bit ASNs and 4200000000 to 4294967294 for 32-bit ASNs.
+ type: int
+ auto_associate:
+ description:
+ - Enable or disable automatic association with the default association route table.
+ default: true
+ type: bool
+ auto_attach:
+ description:
+ - Enable or disable automatic acceptance of attachment requests.
+ default: false
+ type: bool
+ auto_propagate:
+ description:
+ - Enable or disable automatic propagation of routes to the default propagation route table.
+ default: true
+ type: bool
+ description:
+ description:
+ - The description of the transit gateway.
+ type: str
+ dns_support:
+ description:
+ - Whether to enable AWS DNS support.
+ default: true
+ type: bool
+ state:
+ description:
+ - C(present) to ensure resource is created.
+ - C(absent) to remove resource.
+ default: present
+ choices: [ "present", "absent"]
+ type: str
+ transit_gateway_id:
+ description:
+ - The ID of the transit gateway.
+ type: str
+ vpn_ecmp_support:
+ description:
+ - Enable or disable Equal Cost Multipath Protocol support.
+ default: true
+ type: bool
+ wait:
+ description:
+ - Whether to wait for status
+ default: true
+ type: bool
+ wait_timeout:
+ description:
+ - number of seconds to wait for status
+ default: 300
+ type: int
+
+author:
+ - "Bob Boldin (@BobBoldin)"
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+ - amazon.aws.tags
+'''
+
+EXAMPLES = '''
+- name: Create a new transit gateway using defaults
+ community.aws.ec2_transit_gateway:
+ state: present
+ region: us-east-1
+ description: personal-testing
+ register: created_tgw
+
+- name: Create a new transit gateway with options
+ community.aws.ec2_transit_gateway:
+ asn: 64514
+ auto_associate: false
+ auto_propagate: false
+ dns_support: True
+ description: "nonprod transit gateway"
+ purge_tags: False
+ state: present
+ region: us-east-1
+ tags:
+ Name: nonprod transit gateway
+ status: testing
+
+- name: Remove a transit gateway by description
+ community.aws.ec2_transit_gateway:
+ state: absent
+ region: us-east-1
+ description: personal-testing
+
+- name: Remove a transit gateway by id
+ community.aws.ec2_transit_gateway:
+ state: absent
+ region: ap-southeast-2
+ transit_gateway_id: tgw-3a9aa123
+ register: deleted_tgw
+'''
+
+RETURN = '''
+transit_gateway:
+ description: The attributes of the transit gateway.
+ type: complex
+ returned: I(state=present)
+ contains:
+ creation_time:
+ description: The creation time of the transit gateway.
+ returned: always
+ type: str
+ sample: "2019-03-06T17:13:51+00:00"
+ description:
+ description: The description of the transit gateway.
+ returned: always
+ type: str
+ sample: my test tgw
+ options:
+ description: The options attributes of the transit gateway
+ returned: always
+ type: complex
+ contains:
+ amazon_side_asn:
+ description:
+ - A private Autonomous System Number (ASN) for the Amazon side of a BGP session.
+ The range is 64512 to 65534 for 16-bit ASNs and 4200000000 to 4294967294 for 32-bit ASNs.
+ returned: always
+ type: str
+ sample: 64512
+ auto_accept_shared_attachements:
+ description: Indicates whether attachment requests are automatically accepted.
+ returned: always
+ type: str
+ sample: disable
+ default_route_table_association:
+ description:
+ - Indicates whether resource attachments are automatically
+ associated with the default association route table.
+ returned: always
+ type: str
+ sample: enable
+ association_default_route_table_id:
+ description: The ID of the default association route table.
+ returned: Iwhen exists
+ type: str
+ sample: tgw-rtb-abc123444
+ default_route_table_propagation:
+ description:
+ - Indicates whether resource attachments automatically
+ propagate routes to the default propagation route table.
+ returned: always
+ type: str
+ sample: disable
+ propagation_default_route_table_id:
+ description: The ID of the default propagation route table.
+ returned: when exists
+ type: str
+ sample: tgw-rtb-def456777
+ vpn_ecmp_support:
+ description: Indicates whether Equal Cost Multipath Protocol support is enabled.
+ returned: always
+ type: str
+ sample: enable
+ dns_support:
+ description: Indicates whether DNS support is enabled.
+ returned: always
+ type: str
+ sample: enable
+ owner_id:
+ description: The account that owns the transit gateway.
+ returned: always
+ type: str
+ sample: '123456789012'
+ state:
+ description: The state of the transit gateway.
+ returned: always
+ type: str
+ sample: pending
+ tags:
+ description: A dictionary of resource tags
+ returned: always
+ type: dict
+ sample:
+ tags:
+ Name: nonprod_tgw
+ transit_gateway_arn:
+ description: The ID of the transit_gateway.
+ returned: always
+ type: str
+ sample: tgw-3a9aa123
+ transit_gateway_id:
+ description: The ID of the transit_gateway.
+ returned: always
+ type: str
+ sample: tgw-3a9aa123
+'''
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # handled by imported AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from time import sleep, time
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags
+
+
+class AnsibleEc2Tgw(object):
+
+ def __init__(self, module, results):
+ self._module = module
+ self._results = results
+ retry_decorator = AWSRetry.jittered_backoff(
+ catch_extra_error_codes=['IncorrectState'],
+ )
+ connection = module.client('ec2', retry_decorator=retry_decorator)
+ self._connection = connection
+ self._check_mode = self._module.check_mode
+
+ def process(self):
+ """ Process the request based on state parameter .
+ state = present will search for an existing tgw based and return the object data.
+ if no object is found it will be created
+
+ state = absent will attempt to remove the tgw however will fail if it still has
+ attachments or associations
+ """
+ description = self._module.params.get('description')
+ state = self._module.params.get('state', 'present')
+ tgw_id = self._module.params.get('transit_gateway_id')
+
+ if state == 'present':
+ self.ensure_tgw_present(tgw_id, description)
+ elif state == 'absent':
+ self.ensure_tgw_absent(tgw_id, description)
+
+ def wait_for_status(self, wait_timeout, tgw_id, status, skip_deleted=True):
+ """
+ Wait for the Transit Gateway to reach the specified status
+ :param wait_timeout: Number of seconds to wait, until this timeout is reached.
+ :param tgw_id: The Amazon nat id.
+ :param status: The status to wait for.
+ examples. status=available, status=deleted
+ :param skip_deleted: ignore deleted transit gateways
+ :return dict: transit gateway object
+ """
+ polling_increment_secs = 5
+ wait_timeout = time() + wait_timeout
+ status_achieved = False
+ transit_gateway = dict()
+
+ while wait_timeout > time():
+ try:
+ transit_gateway = self.get_matching_tgw(tgw_id=tgw_id, skip_deleted=skip_deleted)
+
+ if transit_gateway:
+ if self._check_mode:
+ transit_gateway['state'] = status
+
+ if transit_gateway.get('state') == status:
+ status_achieved = True
+ break
+
+ elif transit_gateway.get('state') == 'failed':
+ break
+
+ else:
+ sleep(polling_increment_secs)
+
+ except ClientError as e:
+ self._module.fail_json_aws(e)
+
+ if not status_achieved:
+ self._module.fail_json(
+ msg="Wait time out reached, while waiting for results")
+
+ return transit_gateway
+
+ def get_matching_tgw(self, tgw_id, description=None, skip_deleted=True):
+ """ search for an existing tgw by either tgw_id or description
+ :param tgw_id: The AWS id of the transit gateway
+ :param description: The description of the transit gateway.
+ :param skip_deleted: ignore deleted transit gateways
+ :return dict: transit gateway object
+ """
+ filters = []
+ if tgw_id:
+ filters = ansible_dict_to_boto3_filter_list({'transit-gateway-id': tgw_id})
+
+ try:
+ response = AWSRetry.exponential_backoff()(self._connection.describe_transit_gateways)(Filters=filters)
+ except (ClientError, BotoCoreError) as e:
+ self._module.fail_json_aws(e)
+
+ tgw = None
+ tgws = []
+
+ if len(response.get('TransitGateways', [])) == 1 and tgw_id:
+ if (response['TransitGateways'][0]['State'] != 'deleted') or not skip_deleted:
+ tgws.extend(response['TransitGateways'])
+
+ for gateway in response.get('TransitGateways', []):
+ if description == gateway['Description'] and gateway['State'] != 'deleted':
+ tgws.append(gateway)
+
+ if len(tgws) > 1:
+ self._module.fail_json(
+ msg='EC2 returned more than one transit Gateway for description {0}, aborting'.format(description))
+ elif tgws:
+ tgw = camel_dict_to_snake_dict(tgws[0], ignore_list=['Tags'])
+ tgw['tags'] = boto3_tag_list_to_ansible_dict(tgws[0]['Tags'])
+
+ return tgw
+
+ @staticmethod
+ def enable_option_flag(flag):
+ disabled = "disable"
+ enabled = "enable"
+ if flag:
+ return enabled
+ return disabled
+
+ def create_tgw(self, description):
+ """
+ Create a transit gateway and optionally wait for status to become available.
+
+ :param description: The description of the transit gateway.
+ :return dict: transit gateway object
+ """
+ options = dict()
+ wait = self._module.params.get('wait')
+ wait_timeout = self._module.params.get('wait_timeout')
+
+ if self._module.params.get('asn'):
+ options['AmazonSideAsn'] = self._module.params.get('asn')
+
+ options['AutoAcceptSharedAttachments'] = self.enable_option_flag(self._module.params.get('auto_attach'))
+ options['DefaultRouteTableAssociation'] = self.enable_option_flag(self._module.params.get('auto_associate'))
+ options['DefaultRouteTablePropagation'] = self.enable_option_flag(self._module.params.get('auto_propagate'))
+ options['VpnEcmpSupport'] = self.enable_option_flag(self._module.params.get('vpn_ecmp_support'))
+ options['DnsSupport'] = self.enable_option_flag(self._module.params.get('dns_support'))
+
+ try:
+ response = self._connection.create_transit_gateway(Description=description, Options=options)
+ except (ClientError, BotoCoreError) as e:
+ self._module.fail_json_aws(e)
+
+ tgw_id = response['TransitGateway']['TransitGatewayId']
+
+ if wait:
+ result = self.wait_for_status(wait_timeout=wait_timeout, tgw_id=tgw_id, status="available")
+ else:
+ result = self.get_matching_tgw(tgw_id=tgw_id)
+
+ self._results['msg'] = (' Transit gateway {0} created'.format(result['transit_gateway_id']))
+
+ return result
+
+ def delete_tgw(self, tgw_id):
+ """
+ De;lete the transit gateway and optionally wait for status to become deleted
+
+ :param tgw_id: The id of the transit gateway
+ :return dict: transit gateway object
+ """
+ wait = self._module.params.get('wait')
+ wait_timeout = self._module.params.get('wait_timeout')
+
+ try:
+ response = self._connection.delete_transit_gateway(TransitGatewayId=tgw_id)
+ except (ClientError, BotoCoreError) as e:
+ self._module.fail_json_aws(e)
+
+ if wait:
+ result = self.wait_for_status(wait_timeout=wait_timeout, tgw_id=tgw_id, status="deleted", skip_deleted=False)
+ else:
+ result = self.get_matching_tgw(tgw_id=tgw_id, skip_deleted=False)
+
+ self._results['msg'] = (' Transit gateway {0} deleted'.format(tgw_id))
+
+ return result
+
+ def ensure_tgw_present(self, tgw_id=None, description=None):
+ """
+ Will create a tgw if no match to the tgw_id or description are found
+ Will update the tgw tags if matching one found but tags are not synced
+
+ :param tgw_id: The AWS id of the transit gateway
+ :param description: The description of the transit gateway.
+ :return dict: transit gateway object
+ """
+ tgw = self.get_matching_tgw(tgw_id, description)
+
+ if tgw is None:
+ if self._check_mode:
+ self._results['changed'] = True
+ self._results['transit_gateway_id'] = None
+ return self._results
+
+ try:
+ if not description:
+ self._module.fail_json(msg="Failed to create Transit Gateway: description argument required")
+ tgw = self.create_tgw(description)
+ self._results['changed'] = True
+ except (BotoCoreError, ClientError) as e:
+ self._module.fail_json_aws(e, msg='Unable to create Transit Gateway')
+
+ self._results['changed'] |= ensure_ec2_tags(
+ self._connection, self._module, tgw['transit_gateway_id'],
+ tags=self._module.params.get('tags'),
+ purge_tags=self._module.params.get('purge_tags'),
+ )
+
+ self._results['transit_gateway'] = self.get_matching_tgw(tgw_id=tgw['transit_gateway_id'])
+
+ return self._results
+
+ def ensure_tgw_absent(self, tgw_id=None, description=None):
+ """
+ Will delete the tgw if a single tgw is found not yet in deleted status
+
+ :param tgw_id: The AWS id of the transit gateway
+ :param description: The description of the transit gateway.
+ :return doct: transit gateway object
+ """
+ self._results['transit_gateway_id'] = None
+ tgw = self.get_matching_tgw(tgw_id, description)
+
+ if tgw is not None:
+ if self._check_mode:
+ self._results['changed'] = True
+ return self._results
+
+ try:
+ tgw = self.delete_tgw(tgw_id=tgw['transit_gateway_id'])
+ self._results['changed'] = True
+ self._results['transit_gateway'] = self.get_matching_tgw(tgw_id=tgw['transit_gateway_id'],
+ skip_deleted=False)
+ except (BotoCoreError, ClientError) as e:
+ self._module.fail_json_aws(e, msg='Unable to delete Transit Gateway')
+
+ return self._results
+
+
+def setup_module_object():
+ """
+ merge argument spec and create Ansible module object
+ :return: Ansible module object
+ """
+
+ argument_spec = dict(
+ asn=dict(type='int'),
+ auto_associate=dict(type='bool', default=True),
+ auto_attach=dict(type='bool', default=False),
+ auto_propagate=dict(type='bool', default=True),
+ description=dict(type='str'),
+ dns_support=dict(type='bool', default=True),
+ purge_tags=dict(type='bool', default=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ transit_gateway_id=dict(type='str'),
+ vpn_ecmp_support=dict(type='bool', default=True),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=300)
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_one_of=[('description', 'transit_gateway_id')],
+ supports_check_mode=True,
+ )
+
+ return module
+
+
+def main():
+
+ module = setup_module_object()
+
+ results = dict(
+ changed=False
+ )
+
+ tgw_manager = AnsibleEc2Tgw(module=module, results=results)
+ tgw_manager.process()
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_info.py b/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_info.py
new file mode 100644
index 000000000..5ce3dc6a4
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_info.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: ec2_transit_gateway_info
+short_description: Gather information about ec2 transit gateways in AWS
+version_added: 1.0.0
+description:
+ - Gather information about ec2 transit gateways in AWS
+author: "Bob Boldin (@BobBoldin)"
+options:
+ transit_gateway_ids:
+ description:
+ - A list of transit gateway IDs to gather information for.
+ aliases: [transit_gateway_id]
+ type: list
+ elements: str
+ default: []
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTransitGateways.html) for filters.
+ type: dict
+ default: {}
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather info about all transit gateways
+ community.aws.ec2_transit_gateway_info:
+
+- name: Gather info about a particular transit gateway using filter transit gateway ID
+ community.aws.ec2_transit_gateway_info:
+ filters:
+ transit-gateway-id: tgw-02c42332e6b7da829
+
+- name: Gather info about a particular transit gateway using multiple option filters
+ community.aws.ec2_transit_gateway_info:
+ filters:
+ options.dns-support: enable
+ options.vpn-ecmp-support: enable
+
+- name: Gather info about multiple transit gateways using module param
+ community.aws.ec2_transit_gateway_info:
+ transit_gateway_ids:
+ - tgw-02c42332e6b7da829
+ - tgw-03c53443d5a8cb716
+'''
+
+RETURN = r'''
+transit_gateways:
+ description: >
+ Transit gateways that match the provided filters. Each element consists of a dict with all the information
+ related to that transit gateway.
+ returned: on success
+ type: complex
+ contains:
+ creation_time:
+ description: The creation time.
+ returned: always
+ type: str
+ sample: "2019-02-05T16:19:58+00:00"
+ description:
+ description: The description of the transit gateway.
+ returned: always
+ type: str
+ sample: "A transit gateway"
+ options:
+ description: A dictionary of the transit gateway options.
+ returned: always
+ type: complex
+ contains:
+ amazon_side_asn:
+ description:
+ - A private Autonomous System Number (ASN) for the Amazon
+ side of a BGP session. The range is 64512 to 65534 for
+ 16-bit ASNs and 4200000000 to 4294967294 for 32-bit ASNs.
+ returned: always
+ type: int
+ sample: 64512
+ auto_accept_shared_attachments:
+ description:
+ - Indicates whether attachment requests are automatically accepted.
+ returned: always
+ type: str
+ sample: "enable"
+ default_route_table_association:
+ description:
+ - Indicates whether resource attachments are automatically
+ associated with the default association route table.
+ returned: always
+ type: str
+ sample: "disable"
+ association_default_route_table_id:
+ description:
+ - The ID of the default association route table.
+ returned: when present
+ type: str
+ sample: "rtb-11223344"
+ default_route_table_propagation:
+ description:
+ - Indicates whether resource attachments automatically
+ propagate routes to the default propagation route table.
+ returned: always
+ type: str
+ sample: "disable"
+ dns_support:
+ description:
+ - Indicates whether DNS support is enabled.
+ returned: always
+ type: str
+ sample: "enable"
+ propagation_default_route_table_id:
+ description:
+ - The ID of the default propagation route table.
+ returned: when present
+ type: str
+ sample: "rtb-11223344"
+ vpn_ecmp_support:
+ description:
+ - Indicates whether Equal Cost Multipath Protocol support
+ is enabled.
+ returned: always
+ type: str
+ sample: "enable"
+ owner_id:
+ description: The AWS account number ID which owns the transit gateway.
+ returned: always
+ type: str
+ sample: "123456789012"
+ state:
+ description: The state of the transit gateway.
+ returned: always
+ type: str
+ sample: "available"
+ tags:
+ description: A dict of tags associated with the transit gateway.
+ returned: always
+ type: dict
+ sample: '{
+ "Name": "A sample TGW"
+ }'
+ transit_gateway_arn:
+ description: The Amazon Resource Name (ARN) of the transit gateway.
+ returned: always
+ type: str
+ sample: "arn:aws:ec2:us-west-2:123456789012:transit-gateway/tgw-02c42332e6b7da829"
+ transit_gateway_id:
+ description: The ID of the transit gateway.
+ returned: always
+ type: str
+ sample: "tgw-02c42332e6b7da829"
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by imported AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+class AnsibleEc2TgwInfo(object):
+
+ def __init__(self, module, results):
+ self._module = module
+ self._results = results
+ self._connection = self._module.client('ec2')
+ self._check_mode = self._module.check_mode
+
+ @AWSRetry.exponential_backoff()
+ def describe_transit_gateways(self):
+ """
+ Describe transit gateways.
+
+ module : AnsibleAWSModule object
+ connection : boto3 client connection object
+ """
+ # collect parameters
+ filters = ansible_dict_to_boto3_filter_list(self._module.params['filters'])
+ transit_gateway_ids = self._module.params['transit_gateway_ids']
+
+ # init empty list for return vars
+ transit_gateway_info = list()
+
+ # Get the basic transit gateway info
+ try:
+ response = self._connection.describe_transit_gateways(
+ TransitGatewayIds=transit_gateway_ids, Filters=filters)
+ except is_boto3_error_code('InvalidTransitGatewayID.NotFound'):
+ self._results['transit_gateways'] = []
+ return
+
+ for transit_gateway in response['TransitGateways']:
+ transit_gateway_info.append(camel_dict_to_snake_dict(transit_gateway, ignore_list=['Tags']))
+ # convert tag list to ansible dict
+ transit_gateway_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(transit_gateway.get('Tags', []))
+
+ self._results['transit_gateways'] = transit_gateway_info
+ return
+
+
+def setup_module_object():
+ """
+ merge argument spec and create Ansible module object
+ :return: Ansible module object
+ """
+
+ argument_spec = dict(
+ transit_gateway_ids=dict(type='list', default=[], elements='str', aliases=['transit_gateway_id']),
+ filters=dict(type='dict', default={})
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ return module
+
+
+def main():
+
+ module = setup_module_object()
+
+ results = dict(
+ changed=False
+ )
+
+ tgwf_manager = AnsibleEc2TgwInfo(module=module, results=results)
+ try:
+ tgwf_manager.describe_transit_gateways()
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e)
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment.py b/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment.py
new file mode 100644
index 000000000..554059021
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment.py
@@ -0,0 +1,336 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: ec2_transit_gateway_vpc_attachment
+short_description: Create and delete AWS Transit Gateway VPC attachments
+version_added: 4.0.0
+description:
+ - Creates, Deletes and Updates AWS Transit Gateway VPC Attachments.
+options:
+ transit_gateway:
+ description:
+ - The ID of the Transit Gateway that the attachment belongs to.
+ - When creating a new attachment, I(transit_gateway) must be provided.
+ - At least one of I(name), I(transit_gateway) and I(id) must be provided.
+ - I(transit_gateway) is an immutable setting and can not be updated on an
+ existing attachment.
+ type: str
+ required: false
+ aliases: ['transit_gateway_id']
+ id:
+ description:
+ - The ID of the Transit Gateway Attachment.
+ - When I(id) is not set, a search using I(transit_gateway) and I(name) will be
+ performed. If multiple results are returned, the module will fail.
+ - At least one of I(name), I(transit_gateway) and I(id) must be provided.
+ type: str
+ required: false
+ aliases: ['attachment_id']
+ name:
+ description:
+ - The C(Name) tag of the Transit Gateway attachment.
+ - Providing both I(id) and I(name) will set the C(Name) tag on an existing
+ attachment the matching I(id).
+ - Setting the C(Name) tag in I(tags) will also result in the C(Name) tag being
+ updated.
+ - At least one of I(name), I(transit_gateway) and I(id) must be provided.
+ type: str
+ required: false
+ state:
+ description:
+ - Create or remove the Transit Gateway attachment.
+ type: str
+ required: false
+ choices: ['present', 'absent']
+ default: 'present'
+ subnets:
+ description:
+ - The ID of the subnets in which to create the transit gateway VPC attachment.
+ - Required when creating a new attachment.
+ type: list
+ elements: str
+ required: false
+ purge_subnets:
+ description:
+ - If I(purge_subnets=true), existing subnets will be removed from the
+ attachment as necessary to match exactly what is defined by I(subnets).
+ type: bool
+ required: false
+ default: true
+ dns_support:
+ description:
+ - Whether DNS support is enabled.
+ type: bool
+ required: false
+ ipv6_support:
+ description:
+ - Whether IPv6 support is enabled.
+ type: bool
+ required: false
+ appliance_mode_support:
+ description:
+ - Whether the attachment is configured for appliance mode.
+ - When appliance mode is enabled, Transit Gateway, using 4-tuples of an
+ IP packet, selects a single Transit Gateway ENI in the Appliance VPC
+ for the life of a flow to send traffic to.
+ type: bool
+ required: false
+ wait:
+ description:
+ - Whether to wait for the Transit Gateway attachment to reach the
+ C(Available) or C(Deleted) state before the module returns.
+ type: bool
+ required: false
+ default: true
+ wait_timeout:
+ description:
+ - Maximum time, in seconds, to wait for the Transit Gateway attachment
+ to reach the expected state.
+ - Defaults to 600 seconds.
+ type: int
+ required: false
+author:
+ - "Mark Chappell (@tremble)"
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+ - amazon.aws.tags
+'''
+
+EXAMPLES = '''
+# Create a Transit Gateway attachment
+- community.aws.ec2_transit_gateway_vpc_attachment:
+ state: present
+ transit_gateway: 'tgw-123456789abcdef01'
+ name: AnsibleTest-1
+ subnets:
+ - subnet-00000000000000000
+ - subnet-11111111111111111
+ - subnet-22222222222222222
+ ipv6_support: True
+ purge_subnets: True
+ dns_support: True
+ appliance_mode_support: True
+ tags:
+ TestTag: changed data in Test Tag
+
+# Set sub options on a Transit Gateway attachment
+- community.aws.ec2_transit_gateway_vpc_attachment:
+ state: present
+ id: 'tgw-attach-0c0c5fd0b0f01d1c9'
+ name: AnsibleTest-1
+ ipv6_support: True
+ purge_subnets: False
+ dns_support: False
+ appliance_mode_support: True
+
+# Delete the transit gateway
+- community.aws.ec2_transit_gateway_vpc_attachment:
+ state: absent
+ id: 'tgw-attach-0c0c5fd0b0f01d1c9'
+'''
+
+RETURN = '''
+transit_gateway_attachments:
+ description: The attributes of the Transit Gateway attachments.
+ type: list
+ elements: dict
+ returned: success
+ contains:
+ creation_time:
+ description:
+ - An ISO 8601 date time stamp of when the attachment was created.
+ type: str
+ returned: success
+ example: '2022-03-10T16:40:26+00:00'
+ options:
+ description:
+ - Additional VPC attachment options.
+ type: dict
+ returned: success
+ contains:
+ appliance_mode_support:
+ description:
+ - Indicates whether appliance mode support is enabled.
+ type: str
+ returned: success
+ example: 'enable'
+ dns_support:
+ description:
+ - Indicates whether DNS support is enabled.
+ type: str
+ returned: success
+ example: 'disable'
+ ipv6_support:
+ description:
+ - Indicates whether IPv6 support is disabled.
+ type: str
+ returned: success
+ example: 'disable'
+ state:
+ description:
+ - The state of the attachment.
+ type: str
+ returned: success
+ example: 'deleting'
+ subnet_ids:
+ description:
+ - The IDs of the subnets in use by the attachment.
+ type: list
+ elements: str
+ returned: success
+ example: ['subnet-0123456789abcdef0', 'subnet-11111111111111111']
+ tags:
+ description:
+ - A dictionary representing the resource tags.
+ type: dict
+ returned: success
+ transit_gateway_attachment_id:
+ description:
+ - The ID of the attachment.
+ type: str
+ returned: success
+ example: 'tgw-attach-0c0c5fd0b0f01d1c9'
+ transit_gateway_id:
+ description:
+ - The ID of the transit gateway that the attachment is connected to.
+ type: str
+ returned: success
+ example: 'tgw-0123456789abcdef0'
+ vpc_id:
+ description:
+ - The ID of the VPC that the attachment is connected to.
+ type: str
+ returned: success
+ example: 'vpc-0123456789abcdef0'
+ vpc_owner_id:
+ description:
+ - The ID of the account that the VPC belongs to.
+ type: str
+ returned: success
+ example: '123456789012'
+'''
+
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+from ansible_collections.community.aws.plugins.module_utils.transitgateway import TransitGatewayVpcAttachmentManager
+
+
+def main():
+
+ argument_spec = dict(
+ state=dict(type='str', required=False, default='present', choices=['absent', 'present']),
+ transit_gateway=dict(type='str', required=False, aliases=['transit_gateway_id']),
+ id=dict(type='str', required=False, aliases=['attachment_id']),
+ name=dict(type='str', required=False),
+ subnets=dict(type='list', elements='str', required=False),
+ purge_subnets=dict(type='bool', required=False, default=True),
+ tags=dict(type='dict', required=False, aliases=['resource_tags']),
+ purge_tags=dict(type='bool', required=False, default=True),
+ appliance_mode_support=dict(type='bool', required=False),
+ dns_support=dict(type='bool', required=False),
+ ipv6_support=dict(type='bool', required=False),
+ wait=dict(type='bool', required=False, default=True),
+ wait_timeout=dict(type='int', required=False),
+ )
+
+ one_of = [
+ ['id', 'transit_gateway', 'name'],
+ ]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=one_of,
+ )
+
+ attach_id = module.params.get('id', None)
+ tgw = module.params.get('transit_gateway', None)
+ name = module.params.get('name', None)
+ tags = module.params.get('tags', None)
+ purge_tags = module.params.get('purge_tags')
+ state = module.params.get('state')
+ subnets = module.params.get('subnets', None)
+ purge_subnets = module.params.get('purge_subnets')
+
+ # When not provided with an ID see if one exists.
+ if not attach_id:
+ search_manager = TransitGatewayVpcAttachmentManager(module=module)
+ filters = dict()
+ if tgw:
+ filters['transit-gateway-id'] = tgw
+ if name:
+ filters['tag:Name'] = name
+ if subnets:
+ vpc_id = search_manager.subnets_to_vpc(subnets)
+ filters['vpc-id'] = vpc_id
+
+ # Attachments lurk in a 'deleted' state, for a while, ignore them so we
+ # can reuse the names
+ filters['state'] = [
+ 'available', 'deleting', 'failed', 'failing', 'initiatingRequest', 'modifying',
+ 'pendingAcceptance', 'pending', 'rollingBack', 'rejected', 'rejecting'
+ ]
+ attachments = search_manager.list(filters=filters)
+ if len(attachments) > 1:
+ module.fail_json('Multiple matching attachments found, provide an ID', attachments=attachments)
+ # If we find a match then we'll modify it by ID, otherwise we'll be
+ # creating a new RTB.
+ if attachments:
+ attach_id = attachments[0]['transit_gateway_attachment_id']
+
+ manager = TransitGatewayVpcAttachmentManager(module=module, id=attach_id)
+ manager.set_wait(module.params.get('wait', None))
+ manager.set_wait_timeout(module.params.get('wait_timeout', None))
+
+ if state == 'absent':
+ manager.delete()
+ else:
+ if not attach_id:
+ if not tgw:
+ module.fail_json('No existing attachment found. To create a new attachment'
+ ' the `transit_gateway` parameter must be provided.')
+ if not subnets:
+ module.fail_json('No existing attachment found. To create a new attachment'
+ ' the `subnets` parameter must be provided.')
+
+ # name is just a special case of tags.
+ if name:
+ new_tags = dict(Name=name)
+ if tags is None:
+ purge_tags = False
+ else:
+ new_tags.update(tags)
+ tags = new_tags
+
+ manager.set_transit_gateway(tgw)
+ manager.set_subnets(subnets, purge_subnets)
+ manager.set_tags(tags, purge_tags)
+ manager.set_dns_support(module.params.get('dns_support', None))
+ manager.set_ipv6_support(module.params.get('ipv6_support', None))
+ manager.set_appliance_mode_support(module.params.get('appliance_mode_support', None))
+ manager.flush_changes()
+
+ results = dict(
+ changed=manager.changed,
+ attachments=[manager.updated_resource],
+ )
+ if manager.changed:
+ results['diff'] = dict(
+ before=manager.original_resource,
+ after=manager.updated_resource,
+ )
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment_info.py b/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment_info.py
new file mode 100644
index 000000000..b76b0b0f7
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment_info.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: ec2_transit_gateway_vpc_attachment_info
+short_description: describes AWS Transit Gateway VPC attachments
+version_added: 4.0.0
+description:
+ - Describes AWS Transit Gateway VPC Attachments.
+options:
+ id:
+ description:
+ - The ID of the Transit Gateway Attachment.
+ - Mutually exclusive with I(name) and I(filters)
+ type: str
+ required: false
+ aliases: ['attachment_id']
+ name:
+ description:
+ - The C(Name) tag of the Transit Gateway attachment.
+ type: str
+ required: false
+ filters:
+ description:
+ - A dictionary of filters to apply. Each dict item consists of a filter key and a filter value.
+ - Setting a C(tag:Name) filter will override the I(name) parameter.
+ type: dict
+ required: false
+ include_deleted:
+ description:
+ - If I(include_deleted=True), then attachments in a deleted state will
+ also be returned.
+ - Setting a C(state) filter will override the I(include_deleted) parameter.
+ type: bool
+ required: false
+ default: false
+author: "Mark Chappell (@tremble)"
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Describe a specific Transit Gateway attachment.
+- community.aws.ec2_transit_gateway_vpc_attachment_info:
+ id: 'tgw-attach-0123456789abcdef0'
+
+# Describe all attachments attached to a transit gateway.
+- community.aws.ec2_transit_gateway_vpc_attachment_info:
+ filters:
+ transit-gateway-id: tgw-0fedcba9876543210'
+
+# Describe all attachments in an account.
+- community.aws.ec2_transit_gateway_vpc_attachment_info:
+ filters:
+ transit-gateway-id: tgw-0fedcba9876543210'
+'''
+
+RETURN = '''
+transit_gateway_attachments:
+ description: The attributes of the Transit Gateway attachments.
+ type: list
+ elements: dict
+ returned: success
+ contains:
+ creation_time:
+ description:
+ - An ISO 8601 date time stamp of when the attachment was created.
+ type: str
+ returned: success
+ example: '2022-03-10T16:40:26+00:00'
+ options:
+ description:
+ - Additional VPC attachment options.
+ type: dict
+ returned: success
+ contains:
+ appliance_mode_support:
+ description:
+ - Indicates whether appliance mode support is enabled.
+ type: str
+ returned: success
+ example: 'enable'
+ dns_support:
+ description:
+ - Indicates whether DNS support is enabled.
+ type: str
+ returned: success
+ example: 'disable'
+ ipv6_support:
+ description:
+ - Indicates whether IPv6 support is disabled.
+ type: str
+ returned: success
+ example: 'disable'
+ state:
+ description:
+ - The state of the attachment.
+ type: str
+ returned: success
+ example: 'deleting'
+ subnet_ids:
+ description:
+ - The IDs of the subnets in use by the attachment.
+ type: list
+ elements: str
+ returned: success
+ example: ['subnet-0123456789abcdef0', 'subnet-11111111111111111']
+ tags:
+ description:
+ - A dictionary representing the resource tags.
+ type: dict
+ returned: success
+ transit_gateway_attachment_id:
+ description:
+ - The ID of the attachment.
+ type: str
+ returned: success
+ example: 'tgw-attach-0c0c5fd0b0f01d1c9'
+ transit_gateway_id:
+ description:
+ - The ID of the transit gateway that the attachment is connected to.
+ type: str
+ returned: success
+ example: 'tgw-0123456789abcdef0'
+ vpc_id:
+ description:
+ - The ID of the VPC that the attachment is connected to.
+ type: str
+ returned: success
+ example: 'vpc-0123456789abcdef0'
+ vpc_owner_id:
+ description:
+ - The ID of the account that the VPC belongs to.
+ type: str
+ returned: success
+ example: '123456789012'
+'''
+
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+from ansible_collections.community.aws.plugins.module_utils.transitgateway import TransitGatewayVpcAttachmentManager
+
+
+def main():
+
+ argument_spec = dict(
+ id=dict(type='str', required=False, aliases=['attachment_id']),
+ name=dict(type='str', required=False),
+ filters=dict(type='dict', required=False),
+ include_deleted=dict(type='bool', required=False, default=False)
+ )
+
+ mutually_exclusive = [
+ ['id', 'name'],
+ ['id', 'filters'],
+ ]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ name = module.params.get('name', None)
+ id = module.params.get('id', None)
+ opt_filters = module.params.get('filters', None)
+
+ search_manager = TransitGatewayVpcAttachmentManager(module=module)
+ filters = dict()
+
+ if name:
+ filters['tag:Name'] = name
+
+ if not module.params.get('include_deleted'):
+ # Attachments lurk in a 'deleted' state, for a while, ignore them so we
+ # can reuse the names
+ filters['state'] = [
+ 'available', 'deleting', 'failed', 'failing', 'initiatingRequest', 'modifying',
+ 'pendingAcceptance', 'pending', 'rollingBack', 'rejected', 'rejecting'
+ ]
+
+ if opt_filters:
+ filters.update(opt_filters)
+
+ attachments = search_manager.list(filters=filters, id=id)
+
+ module.exit_json(changed=False, attachments=attachments, filters=filters)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_egress_igw.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_egress_igw.py
new file mode 100644
index 000000000..dbcf15b12
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_egress_igw.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_egress_igw
+version_added: 1.0.0
+short_description: Manage an AWS VPC Egress Only Internet gateway
+description:
+ - Manage an AWS VPC Egress Only Internet gateway
+author: Daniel Shepherd (@shepdelacreme)
+options:
+ vpc_id:
+ description:
+ - The VPC ID for the VPC that this Egress Only Internet Gateway should be attached.
+ required: true
+ type: str
+ state:
+ description:
+ - Create or delete the EIGW.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Ensure that the VPC has an Internet Gateway.
+# The Internet Gateway ID is can be accessed via {{eigw.gateway_id}} for use in setting up NATs etc.
+- community.aws.ec2_vpc_egress_igw:
+ vpc_id: vpc-abcdefgh
+ state: present
+ register: eigw
+
+'''
+
+RETURN = '''
+gateway_id:
+ description: The ID of the Egress Only Internet Gateway or Null.
+ returned: always
+ type: str
+ sample: eigw-0e00cf111ba5bc11e
+vpc_id:
+ description: The ID of the VPC to attach or detach gateway from.
+ returned: always
+ type: str
+ sample: vpc-012345678
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+def delete_eigw(module, connection, eigw_id):
+ """
+ Delete EIGW.
+
+ module : AnsibleAWSModule object
+ connection : boto3 client connection object
+ eigw_id : ID of the EIGW to delete
+ """
+ changed = False
+
+ try:
+ response = connection.delete_egress_only_internet_gateway(
+ aws_retry=True,
+ DryRun=module.check_mode,
+ EgressOnlyInternetGatewayId=eigw_id)
+ except is_boto3_error_code('DryRunOperation'):
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Could not delete Egress-Only Internet Gateway {0} from VPC {1}".format(eigw_id, module.vpc_id))
+
+ if not module.check_mode:
+ changed = response.get('ReturnCode', False)
+
+ return changed
+
+
+def create_eigw(module, connection, vpc_id):
+ """
+ Create EIGW.
+
+ module : AnsibleAWSModule object
+ connection : boto3 client connection object
+ vpc_id : ID of the VPC we are operating on
+ """
+ gateway_id = None
+ changed = False
+
+ try:
+ response = connection.create_egress_only_internet_gateway(
+ aws_retry=True,
+ DryRun=module.check_mode,
+ VpcId=vpc_id)
+ except is_boto3_error_code('DryRunOperation'):
+ # When boto3 method is run with DryRun=True it returns an error on success
+ # We need to catch the error and return something valid
+ changed = True
+ except is_boto3_error_code('InvalidVpcID.NotFound') as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="invalid vpc ID '{0}' provided".format(vpc_id))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Could not create Egress-Only Internet Gateway for vpc ID {0}".format(vpc_id))
+
+ if not module.check_mode:
+ gateway = response.get('EgressOnlyInternetGateway', {})
+ state = gateway.get('Attachments', [{}])[0].get('State')
+ gateway_id = gateway.get('EgressOnlyInternetGatewayId')
+
+ if gateway_id and state in ('attached', 'attaching'):
+ changed = True
+ else:
+ # EIGW gave back a bad attachment state or an invalid response so we error out
+ module.fail_json(msg='Unable to create and attach Egress Only Internet Gateway to VPCId: {0}. Bad or no state in response'.format(vpc_id),
+ **camel_dict_to_snake_dict(response))
+
+ return changed, gateway_id
+
+
+def describe_eigws(module, connection, vpc_id):
+ """
+ Describe EIGWs.
+
+ module : AnsibleAWSModule object
+ connection : boto3 client connection object
+ vpc_id : ID of the VPC we are operating on
+ """
+ gateway_id = None
+
+ try:
+ response = connection.describe_egress_only_internet_gateways(
+ aws_retry=True)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Could not get list of existing Egress-Only Internet Gateways")
+
+ for eigw in response.get('EgressOnlyInternetGateways', []):
+ for attachment in eigw.get('Attachments', []):
+ if attachment.get('VpcId') == vpc_id and attachment.get('State') in ('attached', 'attaching'):
+ gateway_id = eigw.get('EgressOnlyInternetGatewayId')
+
+ return gateway_id
+
+
+def main():
+ argument_spec = dict(
+ vpc_id=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ retry_decorator = AWSRetry.jittered_backoff(retries=10)
+ connection = module.client('ec2', retry_decorator=retry_decorator)
+
+ vpc_id = module.params.get('vpc_id')
+ state = module.params.get('state')
+
+ eigw_id = describe_eigws(module, connection, vpc_id)
+
+ result = dict(gateway_id=eigw_id, vpc_id=vpc_id)
+ changed = False
+
+ if state == 'present' and not eigw_id:
+ changed, result['gateway_id'] = create_eigw(module, connection, vpc_id)
+ elif state == 'absent' and eigw_id:
+ changed = delete_eigw(module, connection, eigw_id)
+
+ module.exit_json(changed=changed, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl.py
new file mode 100644
index 000000000..e11df3de5
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl.py
@@ -0,0 +1,608 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: ec2_vpc_nacl
+short_description: create and delete Network ACLs
+version_added: 1.0.0
+description:
+ - Read the AWS documentation for Network ACLS
+ U(https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html)
+options:
+ name:
+ description:
+ - Tagged name identifying a network ACL.
+ - One and only one of the I(name) or I(nacl_id) is required.
+ required: false
+ type: str
+ nacl_id:
+ description:
+ - NACL id identifying a network ACL.
+ - One and only one of the I(name) or I(nacl_id) is required.
+ required: false
+ type: str
+ vpc_id:
+ description:
+ - VPC id of the requesting VPC.
+ - Required when state present.
+ required: false
+ type: str
+ subnets:
+ description:
+ - The list of subnets that should be associated with the network ACL.
+ - Must be specified as a list
+ - Each subnet can be specified as subnet ID, or its tagged name.
+ required: false
+ type: list
+ elements: str
+ default: []
+ egress:
+ description:
+ - A list of rules for outgoing traffic. Each rule must be specified as a list.
+ Each rule may contain the rule number (integer 1-32766), protocol (one of ['tcp', 'udp', 'icmp', 'ipv6-icmp', '-1', 'all']),
+ the rule action ('allow' or 'deny') the CIDR of the IPv4 or IPv6 network range to allow or deny,
+ the ICMP type (-1 means all types), the ICMP code (-1 means all codes), the last port in the range for
+ TCP or UDP protocols, and the first port in the range for TCP or UDP protocols.
+ See examples.
+ default: []
+ required: false
+ type: list
+ elements: list
+ ingress:
+ description:
+ - List of rules for incoming traffic. Each rule must be specified as a list.
+ Each rule may contain the rule number (integer 1-32766), protocol (one of ['tcp', 'udp', 'icmp', 'ipv6-icmp', '-1', 'all']),
+ the rule action ('allow' or 'deny') the CIDR of the IPv4 or IPv6 network range to allow or deny,
+ the ICMP type (-1 means all types), the ICMP code (-1 means all codes), the last port in the range for
+ TCP or UDP protocols, and the first port in the range for TCP or UDP protocols.
+ See examples.
+ default: []
+ required: false
+ type: list
+ elements: list
+ state:
+ description:
+ - Creates or modifies an existing NACL
+ - Deletes a NACL and reassociates subnets to the default NACL
+ required: false
+ type: str
+ choices: ['present', 'absent']
+ default: present
+author: Mike Mochan (@mmochan)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+ - amazon.aws.tags
+notes:
+ - Support for I(purge_tags) was added in release 4.0.0.
+'''
+
+EXAMPLES = r'''
+
+# Complete example to create and delete a network ACL
+# that allows SSH, HTTP and ICMP in, and all traffic out.
+- name: "Create and associate production DMZ network ACL with DMZ subnets"
+ community.aws.ec2_vpc_nacl:
+ vpc_id: vpc-12345678
+ name: prod-dmz-nacl
+ region: ap-southeast-2
+ subnets: ['prod-dmz-1', 'prod-dmz-2']
+ tags:
+ CostCode: CC1234
+ Project: phoenix
+ Description: production DMZ
+ ingress:
+ # rule no, protocol, allow/deny, cidr, icmp_type, icmp_code,
+ # port from, port to
+ - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
+ - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
+ - [205, 'tcp', 'allow', '::/0', null, null, 80, 80]
+ - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
+ - [305, 'ipv6-icmp', 'allow', '::/0', 0, 8]
+ egress:
+ - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
+ - [105, 'all', 'allow', '::/0', null, null, null, null]
+ state: 'present'
+
+- name: "Remove the ingress and egress rules - defaults to deny all"
+ community.aws.ec2_vpc_nacl:
+ vpc_id: vpc-12345678
+ name: prod-dmz-nacl
+ region: ap-southeast-2
+ subnets:
+ - prod-dmz-1
+ - prod-dmz-2
+ tags:
+ CostCode: CC1234
+ Project: phoenix
+ Description: production DMZ
+ state: present
+
+- name: "Remove the NACL subnet associations and tags"
+ community.aws.ec2_vpc_nacl:
+ vpc_id: 'vpc-12345678'
+ name: prod-dmz-nacl
+ region: ap-southeast-2
+ state: present
+
+- name: "Delete nacl and subnet associations"
+ community.aws.ec2_vpc_nacl:
+ vpc_id: vpc-12345678
+ name: prod-dmz-nacl
+ state: absent
+
+- name: "Delete nacl by its id"
+ community.aws.ec2_vpc_nacl:
+ nacl_id: acl-33b4ee5b
+ state: absent
+'''
+RETURN = r'''
+task:
+ description: The result of the create, or delete action.
+ returned: success
+ type: dict
+nacl_id:
+ description: The id of the NACL (when creating or updating an ACL)
+ returned: success
+ type: str
+ sample: acl-123456789abcdef01
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications
+
+# VPC-supported IANA protocol numbers
+# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
+PROTOCOL_NUMBERS = {'all': -1, 'icmp': 1, 'tcp': 6, 'udp': 17, 'ipv6-icmp': 58}
+
+
+# Utility methods
+def icmp_present(entry):
+ if len(entry) == 6 and entry[1] in ['icmp', 'ipv6-icmp'] or entry[1] in [1, 58]:
+ return True
+
+
+def subnets_removed(nacl_id, subnets, client, module):
+ results = find_acl_by_id(nacl_id, client, module)
+ associations = results['NetworkAcls'][0]['Associations']
+ subnet_ids = [assoc['SubnetId'] for assoc in associations]
+ return [subnet for subnet in subnet_ids if subnet not in subnets]
+
+
+def subnets_added(nacl_id, subnets, client, module):
+ results = find_acl_by_id(nacl_id, client, module)
+ associations = results['NetworkAcls'][0]['Associations']
+ subnet_ids = [assoc['SubnetId'] for assoc in associations]
+ return [subnet for subnet in subnets if subnet not in subnet_ids]
+
+
+def subnets_changed(nacl, client, module):
+ changed = False
+ vpc_id = module.params.get('vpc_id')
+ nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
+ subnets = subnets_to_associate(nacl, client, module)
+ if not subnets:
+ default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0]
+ subnets = find_subnet_ids_by_nacl_id(nacl_id, client, module)
+ if subnets:
+ replace_network_acl_association(default_nacl_id, subnets, client, module)
+ changed = True
+ return changed
+ changed = False
+ return changed
+ subs_added = subnets_added(nacl_id, subnets, client, module)
+ if subs_added:
+ replace_network_acl_association(nacl_id, subs_added, client, module)
+ changed = True
+ subs_removed = subnets_removed(nacl_id, subnets, client, module)
+ if subs_removed:
+ default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0]
+ replace_network_acl_association(default_nacl_id, subs_removed, client, module)
+ changed = True
+ return changed
+
+
+def nacls_changed(nacl, client, module):
+ changed = False
+ params = dict()
+ params['egress'] = module.params.get('egress')
+ params['ingress'] = module.params.get('ingress')
+
+ nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
+ nacl = describe_network_acl(client, module)
+ entries = nacl['NetworkAcls'][0]['Entries']
+ egress = [rule for rule in entries if rule['Egress'] is True and rule['RuleNumber'] < 32767]
+ ingress = [rule for rule in entries if rule['Egress'] is False and rule['RuleNumber'] < 32767]
+ if rules_changed(egress, params['egress'], True, nacl_id, client, module):
+ changed = True
+ if rules_changed(ingress, params['ingress'], False, nacl_id, client, module):
+ changed = True
+ return changed
+
+
+def tags_changed(nacl_id, client, module):
+ tags = module.params.get('tags')
+ name = module.params.get('name')
+ purge_tags = module.params.get('purge_tags')
+
+ if name is None and tags is None:
+ return False
+
+ if module.params.get('tags') is None:
+ # Only purge tags if tags is explicitly set to {} and purge_tags is True
+ purge_tags = False
+
+ new_tags = dict()
+ if module.params.get('name') is not None:
+ new_tags['Name'] = module.params.get('name')
+ new_tags.update(module.params.get('tags') or {})
+
+ return ensure_ec2_tags(client, module, nacl_id, tags=new_tags,
+ purge_tags=purge_tags, retry_codes=['InvalidNetworkAclID.NotFound'])
+
+
+def rules_changed(aws_rules, param_rules, Egress, nacl_id, client, module):
+ changed = False
+ rules = list()
+ for entry in param_rules:
+ rules.append(process_rule_entry(entry, Egress))
+ if rules == aws_rules:
+ return changed
+ else:
+ removed_rules = [x for x in aws_rules if x not in rules]
+ if removed_rules:
+ params = dict()
+ for rule in removed_rules:
+ params['NetworkAclId'] = nacl_id
+ params['RuleNumber'] = rule['RuleNumber']
+ params['Egress'] = Egress
+ delete_network_acl_entry(params, client, module)
+ changed = True
+ added_rules = [x for x in rules if x not in aws_rules]
+ if added_rules:
+ for rule in added_rules:
+ rule['NetworkAclId'] = nacl_id
+ create_network_acl_entry(rule, client, module)
+ changed = True
+ return changed
+
+
+def is_ipv6(cidr):
+ return ':' in cidr
+
+
+def process_rule_entry(entry, Egress):
+ params = dict()
+ params['RuleNumber'] = entry[0]
+ params['Protocol'] = str(PROTOCOL_NUMBERS[entry[1]])
+ params['RuleAction'] = entry[2]
+ params['Egress'] = Egress
+ if is_ipv6(entry[3]):
+ params['Ipv6CidrBlock'] = entry[3]
+ else:
+ params['CidrBlock'] = entry[3]
+ if icmp_present(entry):
+ params['IcmpTypeCode'] = {"Type": int(entry[4]), "Code": int(entry[5])}
+ else:
+ if entry[6] or entry[7]:
+ params['PortRange'] = {"From": entry[6], 'To': entry[7]}
+ return params
+
+
+def restore_default_associations(assoc_ids, default_nacl_id, client, module):
+ if assoc_ids:
+ params = dict()
+ params['NetworkAclId'] = default_nacl_id[0]
+ for assoc_id in assoc_ids:
+ params['AssociationId'] = assoc_id
+ restore_default_acl_association(params, client, module)
+ return True
+
+
+def construct_acl_entries(nacl, client, module):
+ for entry in module.params.get('ingress'):
+ params = process_rule_entry(entry, Egress=False)
+ params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId']
+ create_network_acl_entry(params, client, module)
+ for rule in module.params.get('egress'):
+ params = process_rule_entry(rule, Egress=True)
+ params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId']
+ create_network_acl_entry(params, client, module)
+
+
+# Module invocations
+def setup_network_acl(client, module):
+ changed = False
+ nacl = describe_network_acl(client, module)
+ if not nacl['NetworkAcls']:
+ tags = {}
+ if module.params.get('name'):
+ tags['Name'] = module.params.get('name')
+ tags.update(module.params.get('tags') or {})
+ nacl = create_network_acl(module.params.get('vpc_id'), client, module, tags)
+ nacl_id = nacl['NetworkAcl']['NetworkAclId']
+ subnets = subnets_to_associate(nacl, client, module)
+ replace_network_acl_association(nacl_id, subnets, client, module)
+ construct_acl_entries(nacl, client, module)
+ changed = True
+ return changed, nacl['NetworkAcl']['NetworkAclId']
+ else:
+ changed = False
+ nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
+ changed |= subnets_changed(nacl, client, module)
+ changed |= nacls_changed(nacl, client, module)
+ changed |= tags_changed(nacl_id, client, module)
+ return changed, nacl_id
+
+
+def remove_network_acl(client, module):
+ changed = False
+ result = dict()
+ nacl = describe_network_acl(client, module)
+ if nacl['NetworkAcls']:
+ nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
+ vpc_id = nacl['NetworkAcls'][0]['VpcId']
+ associations = nacl['NetworkAcls'][0]['Associations']
+ assoc_ids = [a['NetworkAclAssociationId'] for a in associations]
+ default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)
+ if not default_nacl_id:
+ result = {vpc_id: "Default NACL ID not found - Check the VPC ID"}
+ return changed, result
+ if restore_default_associations(assoc_ids, default_nacl_id, client, module):
+ delete_network_acl(nacl_id, client, module)
+ changed = True
+ result[nacl_id] = "Successfully deleted"
+ return changed, result
+ if not assoc_ids:
+ delete_network_acl(nacl_id, client, module)
+ changed = True
+ result[nacl_id] = "Successfully deleted"
+ return changed, result
+ return changed, result
+
+
+# Boto3 client methods
+@AWSRetry.jittered_backoff()
+def _create_network_acl(client, *args, **kwargs):
+ return client.create_network_acl(*args, **kwargs)
+
+
+def create_network_acl(vpc_id, client, module, tags):
+ params = dict(VpcId=vpc_id)
+ if tags:
+ params['TagSpecifications'] = boto3_tag_specifications(tags, ['network-acl'])
+ try:
+ if module.check_mode:
+ nacl = dict(NetworkAcl=dict(NetworkAclId="nacl-00000000"))
+ else:
+ nacl = _create_network_acl(client, **params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+ return nacl
+
+
+@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
+def _create_network_acl_entry(client, *args, **kwargs):
+ return client.create_network_acl_entry(*args, **kwargs)
+
+
+def create_network_acl_entry(params, client, module):
+ try:
+ if not module.check_mode:
+ _create_network_acl_entry(client, **params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+
+@AWSRetry.jittered_backoff()
+def _delete_network_acl(client, *args, **kwargs):
+ return client.delete_network_acl(*args, **kwargs)
+
+
+def delete_network_acl(nacl_id, client, module):
+ try:
+ if not module.check_mode:
+ _delete_network_acl(client, NetworkAclId=nacl_id)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+
+@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
+def _delete_network_acl_entry(client, *args, **kwargs):
+ return client.delete_network_acl_entry(*args, **kwargs)
+
+
+def delete_network_acl_entry(params, client, module):
+ try:
+ if not module.check_mode:
+ _delete_network_acl_entry(client, **params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+
+@AWSRetry.jittered_backoff()
+def _describe_network_acls(client, **kwargs):
+ return client.describe_network_acls(**kwargs)
+
+
+@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
+def _describe_network_acls_retry_missing(client, **kwargs):
+ return client.describe_network_acls(**kwargs)
+
+
+def describe_acl_associations(subnets, client, module):
+ if not subnets:
+ return []
+ try:
+ results = _describe_network_acls_retry_missing(client, Filters=[
+ {'Name': 'association.subnet-id', 'Values': subnets}
+ ])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+ associations = results['NetworkAcls'][0]['Associations']
+ return [a['NetworkAclAssociationId'] for a in associations if a['SubnetId'] in subnets]
+
+
+def describe_network_acl(client, module):
+ try:
+ if module.params.get('nacl_id'):
+ nacl = _describe_network_acls(client, Filters=[
+ {'Name': 'network-acl-id', 'Values': [module.params.get('nacl_id')]}
+ ])
+ else:
+ nacl = _describe_network_acls(client, Filters=[
+ {'Name': 'tag:Name', 'Values': [module.params.get('name')]}
+ ])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+ return nacl
+
+
+def find_acl_by_id(nacl_id, client, module):
+ try:
+ return _describe_network_acls_retry_missing(client, NetworkAclIds=[nacl_id])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+
+def find_default_vpc_nacl(vpc_id, client, module):
+ try:
+ response = _describe_network_acls_retry_missing(client, Filters=[
+ {'Name': 'vpc-id', 'Values': [vpc_id]}])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+ nacls = response['NetworkAcls']
+ return [n['NetworkAclId'] for n in nacls if n['IsDefault'] is True]
+
+
+def find_subnet_ids_by_nacl_id(nacl_id, client, module):
+ try:
+ results = _describe_network_acls_retry_missing(client, Filters=[
+ {'Name': 'association.network-acl-id', 'Values': [nacl_id]}
+ ])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+ if results['NetworkAcls']:
+ associations = results['NetworkAcls'][0]['Associations']
+ return [s['SubnetId'] for s in associations if s['SubnetId']]
+ else:
+ return []
+
+
+@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
+def _replace_network_acl_association(client, *args, **kwargs):
+ return client.replace_network_acl_association(*args, **kwargs)
+
+
+def replace_network_acl_association(nacl_id, subnets, client, module):
+ params = dict()
+ params['NetworkAclId'] = nacl_id
+ for association in describe_acl_associations(subnets, client, module):
+ params['AssociationId'] = association
+ try:
+ if not module.check_mode:
+ _replace_network_acl_association(client, **params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+
+@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
+def _replace_network_acl_entry(client, *args, **kwargs):
+ return client.replace_network_acl_entry(*args, **kwargs)
+
+
+def replace_network_acl_entry(entries, Egress, nacl_id, client, module):
+ for entry in entries:
+ params = entry
+ params['NetworkAclId'] = nacl_id
+ try:
+ if not module.check_mode:
+ _replace_network_acl_entry(client, **params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+
+@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
+def _replace_network_acl_association(client, *args, **kwargs):
+ return client.replace_network_acl_association(*args, **kwargs)
+
+
+def restore_default_acl_association(params, client, module):
+ try:
+ if not module.check_mode:
+ _replace_network_acl_association(client, **params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+
+@AWSRetry.jittered_backoff()
+def _describe_subnets(client, *args, **kwargs):
+ return client.describe_subnets(*args, **kwargs)
+
+
+def subnets_to_associate(nacl, client, module):
+ params = list(module.params.get('subnets'))
+ if not params:
+ return []
+ all_found = []
+ if any(x.startswith("subnet-") for x in params):
+ try:
+ subnets = _describe_subnets(client, Filters=[
+ {'Name': 'subnet-id', 'Values': params}])
+ all_found.extend(subnets.get('Subnets', []))
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+ if len(params) != len(all_found):
+ try:
+ subnets = _describe_subnets(client, Filters=[
+ {'Name': 'tag:Name', 'Values': params}])
+ all_found.extend(subnets.get('Subnets', []))
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+ return list(set(s['SubnetId'] for s in all_found if s.get('SubnetId')))
+
+
+def main():
+ argument_spec = dict(
+ vpc_id=dict(),
+ name=dict(),
+ nacl_id=dict(),
+ subnets=dict(required=False, type='list', default=list(), elements='str'),
+ tags=dict(required=False, type='dict', aliases=['resource_tags']),
+ purge_tags=dict(required=False, type='bool', default=True),
+ ingress=dict(required=False, type='list', default=list(), elements='list'),
+ egress=dict(required=False, type='list', default=list(), elements='list'),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['name', 'nacl_id']],
+ required_if=[['state', 'present', ['vpc_id']]])
+
+ state = module.params.get('state').lower()
+
+ client = module.client('ec2')
+
+ invocations = {
+ "present": setup_network_acl,
+ "absent": remove_network_acl
+ }
+ (changed, results) = invocations[state](client, module)
+ module.exit_json(changed=changed, nacl_id=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl_info.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl_info.py
new file mode 100644
index 000000000..b85c94236
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl_info.py
@@ -0,0 +1,216 @@
+#!/usr/bin/python
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_vpc_nacl_info
+version_added: 1.0.0
+short_description: Gather information about Network ACLs in an AWS VPC
+description:
+ - Gather information about Network ACLs in an AWS VPC
+author: "Brad Davidson (@brandond)"
+options:
+ nacl_ids:
+ description:
+ - A list of Network ACL IDs to retrieve information about.
+ required: false
+ default: []
+ aliases: [nacl_id]
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
+ U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkAcls.html) for possible filters. Filter
+ names and values are case sensitive.
+ required: false
+ default: {}
+ type: dict
+notes:
+ - By default, the module will return all Network ACLs.
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all Network ACLs:
+- name: Get All NACLs
+ community.aws.ec2_vpc_nacl_info:
+ region: us-west-2
+ register: all_nacls
+
+# Retrieve default Network ACLs:
+- name: Get Default NACLs
+ community.aws.ec2_vpc_nacl_info:
+ region: us-west-2
+ filters:
+ 'default': 'true'
+ register: default_nacls
+'''
+
+RETURN = r'''
+nacls:
+ description: Returns an array of complex objects as described below.
+ returned: success
+ type: complex
+ contains:
+ nacl_id:
+ description: The ID of the Network Access Control List.
+ returned: always
+ type: str
+ vpc_id:
+ description: The ID of the VPC that the NACL is attached to.
+ returned: always
+ type: str
+ is_default:
+ description: True if the NACL is the default for its VPC.
+ returned: always
+ type: bool
+ tags:
+ description: A dict of tags associated with the NACL.
+ returned: always
+ type: dict
+ subnets:
+ description: A list of subnet IDs that are associated with the NACL.
+ returned: always
+ type: list
+ elements: str
+ ingress:
+ description:
+ - A list of NACL ingress rules with the following format.
+ - "C([rule no, protocol, allow/deny, v4 or v6 cidr, icmp_type, icmp_code, port from, port to])"
+ returned: always
+ type: list
+ elements: list
+ sample: [[100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]]
+ egress:
+ description:
+ - A list of NACL egress rules with the following format.
+ - "C([rule no, protocol, allow/deny, v4 or v6 cidr, icmp_type, icmp_code, port from, port to])"
+ returned: always
+ type: list
+ elements: list
+ sample: [[100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]]
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+# VPC-supported IANA protocol numbers
+# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
+PROTOCOL_NAMES = {'-1': 'all', '1': 'icmp', '6': 'tcp', '17': 'udp'}
+
+
+def list_ec2_vpc_nacls(connection, module):
+
+ nacl_ids = module.params.get("nacl_ids")
+ filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+
+ if nacl_ids is None:
+ nacl_ids = []
+
+ try:
+ nacls = connection.describe_network_acls(aws_retry=True, NetworkAclIds=nacl_ids, Filters=filters)
+ except is_boto3_error_code('InvalidNetworkAclID.NotFound'):
+ module.fail_json(msg='Unable to describe ACL. NetworkAcl does not exist')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Unable to describe network ACLs {0}".format(nacl_ids))
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_nacls = []
+ for nacl in nacls['NetworkAcls']:
+ snaked_nacls.append(camel_dict_to_snake_dict(nacl))
+
+ # Turn the boto3 result in to ansible friendly tag dictionary
+ for nacl in snaked_nacls:
+ if 'tags' in nacl:
+ nacl['tags'] = boto3_tag_list_to_ansible_dict(nacl['tags'], 'key', 'value')
+ if 'entries' in nacl:
+ nacl['egress'] = [nacl_entry_to_list(entry) for entry in nacl['entries']
+ if entry['rule_number'] < 32767 and entry['egress']]
+ nacl['ingress'] = [nacl_entry_to_list(entry) for entry in nacl['entries']
+ if entry['rule_number'] < 32767 and not entry['egress']]
+ del nacl['entries']
+ if 'associations' in nacl:
+ nacl['subnets'] = [a['subnet_id'] for a in nacl['associations']]
+ del nacl['associations']
+ if 'network_acl_id' in nacl:
+ nacl['nacl_id'] = nacl['network_acl_id']
+ del nacl['network_acl_id']
+
+ module.exit_json(nacls=snaked_nacls)
+
+
+def nacl_entry_to_list(entry):
+
+ # entry list format
+ # [ rule_num, protocol name or number, allow or deny, ipv4/6 cidr, icmp type, icmp code, port from, port to]
+ elist = []
+
+ elist.append(entry['rule_number'])
+
+ if entry.get('protocol') in PROTOCOL_NAMES:
+ elist.append(PROTOCOL_NAMES[entry['protocol']])
+ else:
+ elist.append(entry.get('protocol'))
+
+ elist.append(entry['rule_action'])
+
+ if entry.get('cidr_block'):
+ elist.append(entry['cidr_block'])
+ elif entry.get('ipv6_cidr_block'):
+ elist.append(entry['ipv6_cidr_block'])
+ else:
+ elist.append(None)
+
+ elist = elist + [None, None, None, None]
+
+ if entry['protocol'] in ('1', '58'):
+ elist[4] = entry.get('icmp_type_code', {}).get('type')
+ elist[5] = entry.get('icmp_type_code', {}).get('code')
+
+ if entry['protocol'] not in ('1', '6', '17', '58'):
+ elist[6] = 0
+ elist[7] = 65535
+ elif 'port_range' in entry:
+ elist[6] = entry['port_range']['from']
+ elist[7] = entry['port_range']['to']
+
+ return elist
+
+
+def main():
+
+ argument_spec = dict(
+ nacl_ids=dict(default=[], type='list', aliases=['nacl_id'], elements='str'),
+ filters=dict(default={}, type='dict'))
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ list_ec2_vpc_nacls(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_peer.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_peer.py
new file mode 100644
index 000000000..f23ffae19
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_peer.py
@@ -0,0 +1,590 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: ec2_vpc_peer
+short_description: create, delete, accept, and reject VPC peering connections between two VPCs.
+version_added: 1.0.0
+description:
+ - Read the AWS documentation for VPC Peering Connections
+ U(https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-peering.html).
+options:
+ vpc_id:
+ description:
+ - VPC id of the requesting VPC.
+ required: false
+ type: str
+ peering_id:
+ description:
+ - Peering connection id.
+ required: false
+ type: str
+ peer_region:
+ description:
+ - Region of the accepting VPC.
+ required: false
+ type: str
+ peer_vpc_id:
+ description:
+ - VPC id of the accepting VPC.
+ required: false
+ type: str
+ peer_owner_id:
+ description:
+ - The AWS account number for cross account peering.
+ required: false
+ type: str
+ state:
+ description:
+ - Create, delete, accept, reject a peering connection.
+ required: false
+ default: present
+ choices: ['present', 'absent', 'accept', 'reject']
+ type: str
+ wait:
+ description:
+ - Wait for peering state changes to complete.
+ required: false
+ default: false
+ type: bool
+notes:
+ - Support for I(purge_tags) was added in release 2.0.0.
+author:
+ - Mike Mochan (@mmochan)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+ - amazon.aws.tags
+'''
+
+EXAMPLES = '''
+# Complete example to create and accept a local peering connection.
+- name: Create local account VPC peering Connection
+ community.aws.ec2_vpc_peer:
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-87654321
+ state: present
+ tags:
+ Name: Peering connection for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: Accept local VPC peering request
+ community.aws.ec2_vpc_peer:
+ region: ap-southeast-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ state: accept
+ register: action_peer
+
+# Complete example to delete a local peering connection.
+- name: Create local account VPC peering Connection
+ community.aws.ec2_vpc_peer:
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-87654321
+ state: present
+ tags:
+ Name: Peering connection for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: delete a local VPC peering Connection
+ community.aws.ec2_vpc_peer:
+ region: ap-southeast-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ state: absent
+ register: vpc_peer
+
+ # Complete example to create and accept a cross account peering connection.
+- name: Create cross account VPC peering Connection
+ community.aws.ec2_vpc_peer:
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-12345678
+ peer_owner_id: 123456789012
+ state: present
+ tags:
+ Name: Peering connection for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: Accept peering connection from remote account
+ community.aws.ec2_vpc_peer:
+ region: ap-southeast-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ profile: bot03_profile_for_cross_account
+ state: accept
+ register: vpc_peer
+
+# Complete example to create and accept an intra-region peering connection.
+- name: Create intra-region VPC peering Connection
+ community.aws.ec2_vpc_peer:
+ region: us-east-1
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-87654321
+ peer_region: us-west-2
+ state: present
+ tags:
+ Name: Peering connection for us-east-1 VPC to us-west-2 VPC
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: Accept peering connection from peer region
+ community.aws.ec2_vpc_peer:
+ region: us-west-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ state: accept
+ register: vpc_peer
+
+# Complete example to create and reject a local peering connection.
+- name: Create local account VPC peering Connection
+ community.aws.ec2_vpc_peer:
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-87654321
+ state: present
+ tags:
+ Name: Peering connection for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: Reject a local VPC peering Connection
+ community.aws.ec2_vpc_peer:
+ region: ap-southeast-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ state: reject
+
+# Complete example to create and accept a cross account peering connection.
+- name: Create cross account VPC peering Connection
+ community.aws.ec2_vpc_peer:
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-12345678
+ peer_owner_id: 123456789012
+ state: present
+ tags:
+ Name: Peering connection for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: Accept a cross account VPC peering connection request
+ community.aws.ec2_vpc_peer:
+ region: ap-southeast-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ profile: bot03_profile_for_cross_account
+ state: accept
+ tags:
+ Name: Peering connection for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+
+# Complete example to create and reject a cross account peering connection.
+- name: Create cross account VPC peering Connection
+ community.aws.ec2_vpc_peer:
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-12345678
+ peer_owner_id: 123456789012
+ state: present
+ tags:
+ Name: Peering connection for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: Reject a cross account VPC peering Connection
+ community.aws.ec2_vpc_peer:
+ region: ap-southeast-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ profile: bot03_profile_for_cross_account
+ state: reject
+
+'''
+RETURN = '''
+peering_id:
+ description: The id of the VPC peering connection created/deleted.
+ returned: always
+ type: str
+ sample: pcx-034223d7c0aec3cde
+vpc_peering_connection:
+ description: The details of the VPC peering connection as returned by Boto3 (snake cased).
+ returned: success
+ type: complex
+ contains:
+ accepter_vpc_info:
+ description: Information about the VPC which accepted the connection.
+ returned: success
+ type: complex
+ contains:
+ cidr_block:
+ description: The primary CIDR for the VPC.
+ returned: when connection is in the accepted state.
+ type: str
+ example: '10.10.10.0/23'
+ cidr_block_set:
+ description: A list of all CIDRs for the VPC.
+ returned: when connection is in the accepted state.
+ type: complex
+ contains:
+ cidr_block:
+ description: A CIDR block used by the VPC.
+ returned: success
+ type: str
+ example: '10.10.10.0/23'
+ owner_id:
+ description: The AWS account that owns the VPC.
+ returned: success
+ type: str
+ example: 123456789012
+ peering_options:
+ description: Additional peering configuration.
+ returned: when connection is in the accepted state.
+ type: dict
+ contains:
+ allow_dns_resolution_from_remote_vpc:
+ description: Indicates whether a VPC can resolve public DNS hostnames to private IP addresses when queried from instances in a peer VPC.
+ returned: success
+ type: bool
+ allow_egress_from_local_classic_link_to_remote_vpc:
+ description: Indicates whether a local ClassicLink connection can communicate with the peer VPC over the VPC peering connection.
+ returned: success
+ type: bool
+ allow_egress_from_local_vpc_to_remote_classic_link:
+ description: Indicates whether a local VPC can communicate with a ClassicLink connection in the peer VPC over the VPC peering connection.
+ returned: success
+ type: bool
+ region:
+ description: The AWS region that the VPC is in.
+ returned: success
+ type: str
+ example: us-east-1
+ vpc_id:
+ description: The ID of the VPC
+ returned: success
+ type: str
+ example: vpc-0123456789abcdef0
+ requester_vpc_info:
+ description: Information about the VPC which requested the connection.
+ returned: success
+ type: complex
+ contains:
+ cidr_block:
+ description: The primary CIDR for the VPC.
+ returned: when connection is not in the deleted state.
+ type: str
+ example: '10.10.10.0/23'
+ cidr_block_set:
+ description: A list of all CIDRs for the VPC.
+ returned: when connection is not in the deleted state.
+ type: complex
+ contains:
+ cidr_block:
+ description: A CIDR block used by the VPC
+ returned: success
+ type: str
+ example: '10.10.10.0/23'
+ owner_id:
+ description: The AWS account that owns the VPC.
+ returned: success
+ type: str
+ example: 123456789012
+ peering_options:
+ description: Additional peering configuration.
+ returned: when connection is not in the deleted state.
+ type: dict
+ contains:
+ allow_dns_resolution_from_remote_vpc:
+ description: Indicates whether a VPC can resolve public DNS hostnames to private IP addresses when queried from instances in a peer VPC.
+ returned: success
+ type: bool
+ allow_egress_from_local_classic_link_to_remote_vpc:
+ description: Indicates whether a local ClassicLink connection can communicate with the peer VPC over the VPC peering connection.
+ returned: success
+ type: bool
+ allow_egress_from_local_vpc_to_remote_classic_link:
+ description: Indicates whether a local VPC can communicate with a ClassicLink connection in the peer VPC over the VPC peering connection.
+ returned: success
+ type: bool
+ region:
+ description: The AWS region that the VPC is in.
+ returned: success
+ type: str
+ example: us-east-1
+ vpc_id:
+ description: The ID of the VPC
+ returned: success
+ type: str
+ example: vpc-0123456789abcdef0
+ status:
+ description: Details of the current status of the connection.
+ returned: success
+ type: complex
+ contains:
+ code:
+ description: A short code describing the status of the connection.
+ returned: success
+ type: str
+ example: active
+ message:
+ description: Additional information about the status of the connection.
+ returned: success
+ type: str
+ example: Pending Acceptance by 123456789012
+ tags:
+ description: Tags applied to the connection.
+ returned: success
+ type: dict
+ vpc_peering_connection_id:
+ description: The ID of the VPC peering connection.
+ returned: success
+ type: str
+ example: "pcx-0123456789abcdef0"
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import add_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags
+
+
+def wait_for_state(client, module, state, pcx_id):
+ waiter = client.get_waiter('vpc_peering_connection_exists')
+ peer_filter = {
+ 'vpc-peering-connection-id': pcx_id,
+ 'status-code': state,
+ }
+ try:
+ waiter.wait(
+ Filters=ansible_dict_to_boto3_filter_list(peer_filter)
+ )
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, "Failed to wait for state change")
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Enable to describe Peerig Connection while waiting for state to change")
+
+
+def describe_peering_connections(params, client):
+ peer_filter = {
+ 'requester-vpc-info.vpc-id': params['VpcId'],
+ 'accepter-vpc-info.vpc-id': params['PeerVpcId'],
+ }
+ result = client.describe_vpc_peering_connections(
+ aws_retry=True,
+ Filters=ansible_dict_to_boto3_filter_list(peer_filter),
+ )
+ if result['VpcPeeringConnections'] == []:
+ # Try again with the VPC/Peer relationship reversed
+ peer_filter = {
+ 'requester-vpc-info.vpc-id': params['PeerVpcId'],
+ 'accepter-vpc-info.vpc-id': params['VpcId'],
+ }
+ result = client.describe_vpc_peering_connections(
+ aws_retry=True,
+ Filters=ansible_dict_to_boto3_filter_list(peer_filter),
+ )
+
+ return result
+
+
+def is_active(peering_conn):
+ return peering_conn['Status']['Code'] == 'active'
+
+
+def is_pending(peering_conn):
+ return peering_conn['Status']['Code'] == 'pending-acceptance'
+
+
+def create_peer_connection(client, module):
+ changed = False
+ params = dict()
+ params['VpcId'] = module.params.get('vpc_id')
+ params['PeerVpcId'] = module.params.get('peer_vpc_id')
+ if module.params.get('peer_region'):
+ params['PeerRegion'] = module.params.get('peer_region')
+ if module.params.get('peer_owner_id'):
+ params['PeerOwnerId'] = str(module.params.get('peer_owner_id'))
+ peering_conns = describe_peering_connections(params, client)
+ for peering_conn in peering_conns['VpcPeeringConnections']:
+ pcx_id = peering_conn['VpcPeeringConnectionId']
+ if ensure_ec2_tags(client, module, pcx_id,
+ purge_tags=module.params.get('purge_tags'),
+ tags=module.params.get('tags'),
+ ):
+ changed = True
+ if is_active(peering_conn):
+ return (changed, peering_conn)
+ if is_pending(peering_conn):
+ return (changed, peering_conn)
+ try:
+ peering_conn = client.create_vpc_peering_connection(aws_retry=True, **params)
+ pcx_id = peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId']
+ if module.params.get('tags'):
+ # Once the minimum botocore version is bumped to > 1.17.24
+ # (hopefully community.aws 3.0.0) we can add the tags to the
+ # creation parameters
+ add_ec2_tags(client, module, pcx_id, module.params.get('tags'),
+ retry_codes=['InvalidVpcPeeringConnectionID.NotFound'])
+ if module.params.get('wait'):
+ wait_for_state(client, module, 'pending-acceptance', pcx_id)
+ changed = True
+ return (changed, peering_conn['VpcPeeringConnection'])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def remove_peer_connection(client, module):
+ pcx_id = module.params.get('peering_id')
+ if pcx_id:
+ peering_conn = get_peering_connection_by_id(pcx_id, client, module)
+ else:
+ params = dict()
+ params['VpcId'] = module.params.get('vpc_id')
+ params['PeerVpcId'] = module.params.get('peer_vpc_id')
+ params['PeerRegion'] = module.params.get('peer_region')
+ if module.params.get('peer_owner_id'):
+ params['PeerOwnerId'] = str(module.params.get('peer_owner_id'))
+ peering_conn = describe_peering_connections(params, client)['VpcPeeringConnections'][0]
+
+ if not peering_conn:
+ module.exit_json(changed=False)
+ else:
+ pcx_id = pcx_id or peering_conn['VpcPeeringConnectionId']
+
+ if peering_conn['Status']['Code'] == 'deleted':
+ module.exit_json(msg='Connection in deleted state.', changed=False, peering_id=pcx_id)
+ if peering_conn['Status']['Code'] == 'rejected':
+ module.exit_json(
+ msg='Connection has been rejected. State cannot be changed and will be removed automatically by AWS',
+ changed=False,
+ peering_id=pcx_id
+ )
+
+ try:
+ params = dict()
+ params['VpcPeeringConnectionId'] = pcx_id
+ client.delete_vpc_peering_connection(aws_retry=True, **params)
+ if module.params.get('wait'):
+ wait_for_state(client, module, 'deleted', pcx_id)
+ module.exit_json(changed=True, peering_id=pcx_id)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def get_peering_connection_by_id(peering_id, client, module):
+ params = dict()
+ params['VpcPeeringConnectionIds'] = [peering_id]
+ try:
+ vpc_peering_connection = client.describe_vpc_peering_connections(aws_retry=True, **params)
+ return vpc_peering_connection['VpcPeeringConnections'][0]
+ except is_boto3_error_code('InvalidVpcPeeringConnectionId.Malformed') as e:
+ module.fail_json_aws(e, msg='Malformed connection ID')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Error while describing peering connection by peering_id')
+
+
+def accept_reject(state, client, module):
+ changed = False
+ params = dict()
+ peering_id = module.params.get('peering_id')
+ params['VpcPeeringConnectionId'] = peering_id
+ vpc_peering_connection = get_peering_connection_by_id(peering_id, client, module)
+ peering_status = vpc_peering_connection['Status']['Code']
+
+ if peering_status not in ['active', 'rejected']:
+ try:
+ if state == 'accept':
+ client.accept_vpc_peering_connection(aws_retry=True, **params)
+ target_state = 'active'
+ else:
+ client.reject_vpc_peering_connection(aws_retry=True, **params)
+ target_state = 'rejected'
+ if module.params.get('tags'):
+ add_ec2_tags(client, module, peering_id, module.params.get('tags'),
+ retry_codes=['InvalidVpcPeeringConnectionID.NotFound'])
+ changed = True
+ if module.params.get('wait'):
+ wait_for_state(client, module, target_state, peering_id)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+ if ensure_ec2_tags(client, module, peering_id,
+ purge_tags=module.params.get('purge_tags'),
+ tags=module.params.get('tags'),
+ ):
+ changed = True
+
+ # Relaod peering conection infos to return latest state/params
+ vpc_peering_connection = get_peering_connection_by_id(peering_id, client, module)
+ return (changed, vpc_peering_connection)
+
+
+def main():
+ argument_spec = dict(
+ vpc_id=dict(),
+ peer_vpc_id=dict(),
+ peer_region=dict(),
+ peering_id=dict(),
+ peer_owner_id=dict(),
+ tags=dict(required=False, type='dict', aliases=['resource_tags']),
+ purge_tags=dict(default=True, type='bool'),
+ state=dict(default='present', choices=['present', 'absent', 'accept', 'reject']),
+ wait=dict(default=False, type='bool'),
+ )
+ required_if = [
+ ('state', 'present', ['vpc_id', 'peer_vpc_id']),
+ ('state', 'accept', ['peering_id']),
+ ('state', 'reject', ['peering_id'])
+ ]
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if)
+
+ state = module.params.get('state')
+ peering_id = module.params.get('peering_id')
+ vpc_id = module.params.get('vpc_id')
+ peer_vpc_id = module.params.get('peer_vpc_id')
+
+ try:
+ client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ if state == 'present':
+ (changed, results) = create_peer_connection(client, module)
+ elif state == 'absent':
+ if not peering_id and (not vpc_id or not peer_vpc_id):
+ module.fail_json(msg='state is absent but one of the following is missing: peering_id or [vpc_id, peer_vpc_id]')
+
+ remove_peer_connection(client, module)
+ else:
+ (changed, results) = accept_reject(state, client, module)
+
+ formatted_results = camel_dict_to_snake_dict(results)
+ # Turn the resource tags from boto3 into an ansible friendly tag dictionary
+ formatted_results['tags'] = boto3_tag_list_to_ansible_dict(formatted_results.get('tags', []))
+
+ module.exit_json(changed=changed, vpc_peering_connection=formatted_results, peering_id=results['VpcPeeringConnectionId'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_peering_info.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_peering_info.py
new file mode 100644
index 000000000..680fa3b68
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_peering_info.py
@@ -0,0 +1,256 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: ec2_vpc_peering_info
+short_description: Retrieves AWS VPC Peering details using AWS methods.
+version_added: 1.0.0
+description:
+ - Gets various details related to AWS VPC Peers
+options:
+ peer_connection_ids:
+ description:
+ - List of specific VPC peer IDs to get details for.
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcPeeringConnections.html)
+ for possible filters.
+ type: dict
+ default: {}
+author: Karen Cheng (@Etherdaemon)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = r'''
+# Simple example of listing all VPC Peers
+- name: List all vpc peers
+ community.aws.ec2_vpc_peering_info:
+ region: ap-southeast-2
+ register: all_vpc_peers
+
+- name: Debugging the result
+ ansible.builtin.debug:
+ msg: "{{ all_vpc_peers.result }}"
+
+- name: Get details on specific VPC peer
+ community.aws.ec2_vpc_peering_info:
+ peer_connection_ids:
+ - pcx-12345678
+ - pcx-87654321
+ region: ap-southeast-2
+ register: all_vpc_peers
+
+- name: Get all vpc peers with specific filters
+ community.aws.ec2_vpc_peering_info:
+ region: ap-southeast-2
+ filters:
+ status-code: ['pending-acceptance']
+ register: pending_vpc_peers
+'''
+
+RETURN = r'''
+vpc_peering_connections:
+ description: Details of the matching VPC peering connections.
+ returned: success
+ type: list
+ contains:
+ accepter_vpc_info:
+ description: Information about the VPC which accepted the connection.
+ returned: success
+ type: complex
+ contains:
+ cidr_block:
+ description: The primary CIDR for the VPC.
+ returned: when connection is in the accepted state.
+ type: str
+ example: '10.10.10.0/23'
+ cidr_block_set:
+ description: A list of all CIDRs for the VPC.
+ returned: when connection is in the accepted state.
+ type: complex
+ contains:
+ cidr_block:
+ description: A CIDR block used by the VPC.
+ returned: success
+ type: str
+ example: '10.10.10.0/23'
+ owner_id:
+ description: The AWS account that owns the VPC.
+ returned: success
+ type: str
+ example: 123456789012
+ peering_options:
+ description: Additional peering configuration.
+ returned: when connection is in the accepted state.
+ type: dict
+ contains:
+ allow_dns_resolution_from_remote_vpc:
+ description: Indicates whether a VPC can resolve public DNS hostnames to private IP addresses when queried from instances in a peer VPC.
+ returned: success
+ type: bool
+ allow_egress_from_local_classic_link_to_remote_vpc:
+ description: Indicates whether a local ClassicLink connection can communicate with the peer VPC over the VPC peering connection.
+ returned: success
+ type: bool
+ allow_egress_from_local_vpc_to_remote_classic_link:
+ description: Indicates whether a local VPC can communicate with a ClassicLink connection in the peer VPC over the VPC peering connection.
+ returned: success
+ type: bool
+ region:
+ description: The AWS region that the VPC is in.
+ returned: success
+ type: str
+ example: us-east-1
+ vpc_id:
+ description: The ID of the VPC
+ returned: success
+ type: str
+ example: vpc-0123456789abcdef0
+ requester_vpc_info:
+ description: Information about the VPC which requested the connection.
+ returned: success
+ type: complex
+ contains:
+ cidr_block:
+ description: The primary CIDR for the VPC.
+ returned: when connection is not in the deleted state.
+ type: str
+ example: '10.10.10.0/23'
+ cidr_block_set:
+ description: A list of all CIDRs for the VPC.
+ returned: when connection is not in the deleted state.
+ type: complex
+ contains:
+ cidr_block:
+ description: A CIDR block used by the VPC
+ returned: success
+ type: str
+ example: '10.10.10.0/23'
+ owner_id:
+ description: The AWS account that owns the VPC.
+ returned: success
+ type: str
+ example: 123456789012
+ peering_options:
+ description: Additional peering configuration.
+ returned: when connection is not in the deleted state.
+ type: dict
+ contains:
+ allow_dns_resolution_from_remote_vpc:
+ description: Indicates whether a VPC can resolve public DNS hostnames to private IP addresses when queried from instances in a peer VPC.
+ returned: success
+ type: bool
+ allow_egress_from_local_classic_link_to_remote_vpc:
+ description: Indicates whether a local ClassicLink connection can communicate with the peer VPC over the VPC peering connection.
+ returned: success
+ type: bool
+ allow_egress_from_local_vpc_to_remote_classic_link:
+ description: Indicates whether a local VPC can communicate with a ClassicLink connection in the peer VPC over the VPC peering connection.
+ returned: success
+ type: bool
+ region:
+ description: The AWS region that the VPC is in.
+ returned: success
+ type: str
+ example: us-east-1
+ vpc_id:
+ description: The ID of the VPC
+ returned: success
+ type: str
+ example: vpc-0123456789abcdef0
+ status:
+ description: Details of the current status of the connection.
+ returned: success
+ type: complex
+ contains:
+ code:
+ description: A short code describing the status of the connection.
+ returned: success
+ type: str
+ example: active
+ message:
+ description: Additional information about the status of the connection.
+ returned: success
+ type: str
+ example: Pending Acceptance by 123456789012
+ tags:
+ description: Tags applied to the connection.
+ returned: success
+ type: dict
+ vpc_peering_connection_id:
+ description: The ID of the VPC peering connection.
+ returned: success
+ type: str
+ example: "pcx-0123456789abcdef0"
+
+result:
+ description: The result of the describe.
+ returned: success
+ type: list
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def get_vpc_peers(client, module):
+ params = dict()
+ params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ if module.params.get('peer_connection_ids'):
+ params['VpcPeeringConnectionIds'] = module.params.get('peer_connection_ids')
+ try:
+ result = client.describe_vpc_peering_connections(aws_retry=True, **params)
+ result = normalize_boto3_result(result)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe peering connections")
+
+ return result['VpcPeeringConnections']
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(default=dict(), type='dict'),
+ peer_connection_ids=dict(default=None, type='list', elements='str'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True,)
+
+ try:
+ ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ # Turn the boto3 result in to ansible friendly_snaked_names
+ results = [camel_dict_to_snake_dict(peer) for peer in get_vpc_peers(ec2, module)]
+
+ # Turn the boto3 result in to ansible friendly tag dictionary
+ for peer in results:
+ peer['tags'] = boto3_tag_list_to_ansible_dict(peer.get('tags', []))
+
+ module.exit_json(result=results, vpc_peering_connections=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw.py
new file mode 100644
index 000000000..8332e1006
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw.py
@@ -0,0 +1,529 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: ec2_vpc_vgw
+short_description: Create and delete AWS VPN Virtual Gateways
+version_added: 1.0.0
+description:
+ - Creates AWS VPN Virtual Gateways
+ - Deletes AWS VPN Virtual Gateways
+ - Attaches Virtual Gateways to VPCs
+ - Detaches Virtual Gateways from VPCs
+options:
+ state:
+ description:
+ - C(present) to ensure resource is created.
+ - C(absent) to remove resource.
+ default: present
+ choices: [ "present", "absent"]
+ type: str
+ name:
+ description:
+ - Name of the VGW to be created or deleted.
+ type: str
+ type:
+ description:
+ - Type of the virtual gateway to be created.
+ choices: [ "ipsec.1" ]
+ default: "ipsec.1"
+ type: str
+ vpn_gateway_id:
+ description:
+ - VPN gateway ID of an existing virtual gateway.
+ type: str
+ vpc_id:
+ description:
+ - The ID of a VPC to attach or detach to the VGW.
+ type: str
+ asn:
+ description:
+ - The BGP ASN on the Amazon side.
+ type: int
+ wait_timeout:
+ description:
+ - Number of seconds to wait for status during VPC attach and detach.
+ default: 320
+ type: int
+notes:
+ - Support for I(purge_tags) was added in release 4.0.0.
+author:
+ - Nick Aslanidis (@naslanidis)
+extends_documentation_fragment:
+ - amazon.aws.ec2
+ - amazon.aws.aws
+ - amazon.aws.boto3
+ - amazon.aws.tags
+'''
+
+EXAMPLES = '''
+- name: Create a new VGW attached to a specific VPC
+ community.aws.ec2_vpc_vgw:
+ state: present
+ region: ap-southeast-2
+ profile: personal
+ vpc_id: vpc-12345678
+ name: personal-testing
+ type: ipsec.1
+ register: created_vgw
+
+- name: Create a new unattached VGW
+ community.aws.ec2_vpc_vgw:
+ state: present
+ region: ap-southeast-2
+ profile: personal
+ name: personal-testing
+ type: ipsec.1
+ tags:
+ environment: production
+ owner: ABC
+ register: created_vgw
+
+- name: Remove a new VGW using the name
+ community.aws.ec2_vpc_vgw:
+ state: absent
+ region: ap-southeast-2
+ profile: personal
+ name: personal-testing
+ type: ipsec.1
+ register: deleted_vgw
+
+- name: Remove a new VGW using the vpn_gateway_id
+ community.aws.ec2_vpc_vgw:
+ state: absent
+ region: ap-southeast-2
+ profile: personal
+ vpn_gateway_id: vgw-3a9aa123
+ register: deleted_vgw
+'''
+
+RETURN = '''
+vgw:
+ description: A description of the VGW
+ returned: success
+ type: dict
+ contains:
+ id:
+ description: The ID of the VGW.
+ type: str
+ returned: success
+ example: "vgw-0123456789abcdef0"
+ state:
+ description: The state of the VGW.
+ type: str
+ returned: success
+ example: "available"
+ tags:
+ description: A dictionary representing the tags attached to the VGW
+ type: dict
+ returned: success
+ example: { "Name": "ansible-test-ec2-vpc-vgw" }
+ type:
+ description: The type of VPN connection the virtual private gateway supports.
+ type: str
+ returned: success
+ example: "ipsec.1"
+ vpc_id:
+ description: The ID of the VPC to which the VGW is attached.
+ type: str
+ returned: success
+ example: vpc-123456789abcdef01
+'''
+
+import time
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+
+
+# AWS uses VpnGatewayLimitExceeded for both 'Too many VGWs' and 'Too many concurrent changes'
+# we need to look at the mesage to tell the difference.
+class VGWRetry(AWSRetry):
+ @staticmethod
+ def status_code_from_exception(error):
+ return (error.response['Error']['Code'], error.response['Error']['Message'],)
+
+ @staticmethod
+ def found(response_code, catch_extra_error_codes=None):
+ retry_on = ['The maximum number of mutating objects has been reached.']
+
+ if catch_extra_error_codes:
+ retry_on.extend(catch_extra_error_codes)
+ if not isinstance(response_code, tuple):
+ response_code = (response_code,)
+
+ for code in response_code:
+ if super().found(response_code, catch_extra_error_codes):
+ return True
+
+ return False
+
+
+def get_vgw_info(vgws):
+ if not isinstance(vgws, list):
+ return
+
+ for vgw in vgws:
+ vgw_info = {
+ 'id': vgw['VpnGatewayId'],
+ 'type': vgw['Type'],
+ 'state': vgw['State'],
+ 'vpc_id': None,
+ 'tags': dict()
+ }
+
+ if vgw['Tags']:
+ vgw_info['tags'] = boto3_tag_list_to_ansible_dict(vgw['Tags'])
+
+ if len(vgw['VpcAttachments']) != 0 and vgw['VpcAttachments'][0]['State'] == 'attached':
+ vgw_info['vpc_id'] = vgw['VpcAttachments'][0]['VpcId']
+
+ return vgw_info
+
+
+def wait_for_status(client, module, vpn_gateway_id, status):
+ polling_increment_secs = 15
+ max_retries = (module.params.get('wait_timeout') // polling_increment_secs)
+ status_achieved = False
+
+ for x in range(0, max_retries):
+ try:
+ response = find_vgw(client, module, vpn_gateway_id)
+ if response[0]['VpcAttachments'][0]['State'] == status:
+ status_achieved = True
+ break
+ else:
+ time.sleep(polling_increment_secs)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failure while waiting for status update')
+
+ result = response
+ return status_achieved, result
+
+
+def attach_vgw(client, module, vpn_gateway_id):
+ params = dict()
+ params['VpcId'] = module.params.get('vpc_id')
+
+ try:
+ # Immediately after a detachment, the EC2 API sometimes will report the VpnGateways[0].State
+ # as available several seconds before actually permitting a new attachment.
+ # So we catch and retry that error. See https://github.com/ansible/ansible/issues/53185
+ response = VGWRetry.jittered_backoff(retries=5,
+ catch_extra_error_codes=['InvalidParameterValue']
+ )(client.attach_vpn_gateway)(VpnGatewayId=vpn_gateway_id,
+ VpcId=params['VpcId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to attach VPC')
+
+ status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'attached')
+ if not status_achieved:
+ module.fail_json(msg='Error waiting for vpc to attach to vgw - please check the AWS console')
+
+ result = response
+ return result
+
+
+def detach_vgw(client, module, vpn_gateway_id, vpc_id=None):
+ params = dict()
+ params['VpcId'] = module.params.get('vpc_id')
+
+ try:
+ if vpc_id:
+ response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=vpc_id, aws_retry=True)
+ else:
+ response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId'], aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, 'Failed to detach gateway')
+
+ status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'detached')
+ if not status_achieved:
+ module.fail_json(msg='Error waiting for vpc to detach from vgw - please check the AWS console')
+
+ result = response
+ return result
+
+
+def create_vgw(client, module):
+ params = dict()
+ params['Type'] = module.params.get('type')
+ tags = module.params.get('tags') or {}
+ tags['Name'] = module.params.get('name')
+ params['TagSpecifications'] = boto3_tag_specifications(tags, ['vpn-gateway'])
+ if module.params.get('asn'):
+ params['AmazonSideAsn'] = module.params.get('asn')
+
+ try:
+ response = client.create_vpn_gateway(aws_retry=True, **params)
+ get_waiter(
+ client, 'vpn_gateway_exists'
+ ).wait(
+ VpnGatewayIds=[response['VpnGateway']['VpnGatewayId']]
+ )
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, msg="Failed to wait for Vpn Gateway {0} to be available".format(response['VpnGateway']['VpnGatewayId']))
+ except is_boto3_error_code('VpnGatewayLimitExceeded') as e:
+ module.fail_json_aws(e, msg="Too many VPN gateways exist in this account.")
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Failed to create gateway')
+
+ result = response
+ return result
+
+
+def delete_vgw(client, module, vpn_gateway_id):
+
+ try:
+ response = client.delete_vpn_gateway(VpnGatewayId=vpn_gateway_id, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to delete gateway')
+
+ # return the deleted VpnGatewayId as this is not included in the above response
+ result = vpn_gateway_id
+ return result
+
+
+def find_vpc(client, module):
+ params = dict()
+ params['vpc_id'] = module.params.get('vpc_id')
+
+ if params['vpc_id']:
+ try:
+ response = client.describe_vpcs(VpcIds=[params['vpc_id']], aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to describe VPC')
+
+ result = response
+ return result
+
+
+def find_vgw(client, module, vpn_gateway_id=None):
+ params = dict()
+ if vpn_gateway_id:
+ params['VpnGatewayIds'] = vpn_gateway_id
+ else:
+ params['Filters'] = [
+ {'Name': 'type', 'Values': [module.params.get('type')]},
+ {'Name': 'tag:Name', 'Values': [module.params.get('name')]},
+ ]
+ if module.params.get('state') == 'present':
+ params['Filters'].append({'Name': 'state', 'Values': ['pending', 'available']})
+ try:
+ response = client.describe_vpn_gateways(aws_retry=True, **params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to describe gateway using filters')
+
+ return sorted(response['VpnGateways'], key=lambda k: k['VpnGatewayId'])
+
+
+def ensure_vgw_present(client, module):
+
+ # If an existing vgw name and type matches our args, then a match is considered to have been
+ # found and we will not create another vgw.
+
+ changed = False
+ params = dict()
+ result = dict()
+ params['Name'] = module.params.get('name')
+ params['VpcId'] = module.params.get('vpc_id')
+ params['Type'] = module.params.get('type')
+ params['Tags'] = module.params.get('tags')
+ params['VpnGatewayIds'] = module.params.get('vpn_gateway_id')
+
+ # check that the vpc_id exists. If not, an exception is thrown
+ if params['VpcId']:
+ vpc = find_vpc(client, module)
+
+ # check if a gateway matching our module args already exists
+ existing_vgw = find_vgw(client, module)
+
+ if existing_vgw != []:
+ vpn_gateway_id = existing_vgw[0]['VpnGatewayId']
+ desired_tags = module.params.get('tags')
+ purge_tags = module.params.get('purge_tags')
+ if desired_tags is None:
+ desired_tags = dict()
+ purge_tags = False
+ tags = dict(Name=module.params.get('name'))
+ tags.update(desired_tags)
+ changed = ensure_ec2_tags(client, module, vpn_gateway_id, resource_type='vpn-gateway',
+ tags=tags, purge_tags=purge_tags)
+
+ # if a vpc_id was provided, check if it exists and if it's attached
+ if params['VpcId']:
+
+ current_vpc_attachments = existing_vgw[0]['VpcAttachments']
+
+ if current_vpc_attachments != [] and current_vpc_attachments[0]['State'] == 'attached':
+ if current_vpc_attachments[0]['VpcId'] != params['VpcId'] or current_vpc_attachments[0]['State'] != 'attached':
+ # detach the existing vpc from the virtual gateway
+ vpc_to_detach = current_vpc_attachments[0]['VpcId']
+ detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
+ get_waiter(client, 'vpn_gateway_detached').wait(VpnGatewayIds=[vpn_gateway_id])
+ attached_vgw = attach_vgw(client, module, vpn_gateway_id)
+ changed = True
+ else:
+ # attach the vgw to the supplied vpc
+ attached_vgw = attach_vgw(client, module, vpn_gateway_id)
+ changed = True
+
+ # if params['VpcId'] is not provided, check the vgw is attached to a vpc. if so, detach it.
+ else:
+ existing_vgw = find_vgw(client, module, [vpn_gateway_id])
+
+ if existing_vgw[0]['VpcAttachments'] != []:
+ if existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
+ # detach the vpc from the vgw
+ vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
+ detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
+ changed = True
+
+ else:
+ # create a new vgw
+ new_vgw = create_vgw(client, module)
+ changed = True
+ vpn_gateway_id = new_vgw['VpnGateway']['VpnGatewayId']
+
+ # if a vpc-id was supplied, attempt to attach it to the vgw
+ if params['VpcId']:
+ attached_vgw = attach_vgw(client, module, vpn_gateway_id)
+ changed = True
+
+ # return current state of the vgw
+ vgw = find_vgw(client, module, [vpn_gateway_id])
+ result = get_vgw_info(vgw)
+ return changed, result
+
+
+def ensure_vgw_absent(client, module):
+
+ # If an existing vgw name and type matches our args, then a match is considered to have been
+ # found and we will take steps to delete it.
+
+ changed = False
+ params = dict()
+ result = dict()
+ params['Name'] = module.params.get('name')
+ params['VpcId'] = module.params.get('vpc_id')
+ params['Type'] = module.params.get('type')
+ params['Tags'] = module.params.get('tags')
+ params['VpnGatewayIds'] = module.params.get('vpn_gateway_id')
+
+ # check if a gateway matching our module args already exists
+ if params['VpnGatewayIds']:
+ existing_vgw_with_id = find_vgw(client, module, [params['VpnGatewayIds']])
+ if existing_vgw_with_id != [] and existing_vgw_with_id[0]['State'] != 'deleted':
+ existing_vgw = existing_vgw_with_id
+ if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
+ if params['VpcId']:
+ if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']:
+ module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console')
+
+ else:
+ # detach the vpc from the vgw
+ detach_vgw(client, module, params['VpnGatewayIds'], params['VpcId'])
+ deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
+ changed = True
+
+ else:
+ # attempt to detach any attached vpcs
+ vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
+ detach_vgw(client, module, params['VpnGatewayIds'], vpc_to_detach)
+ deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
+ changed = True
+
+ else:
+ # no vpc's are attached so attempt to delete the vgw
+ deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
+ changed = True
+
+ else:
+ changed = False
+ deleted_vgw = "Nothing to do"
+
+ else:
+ # Check that a name and type argument has been supplied if no vgw-id
+ if not module.params.get('name') or not module.params.get('type'):
+ module.fail_json(msg='A name and type is required when no vgw-id and a status of \'absent\' is supplied')
+
+ existing_vgw = find_vgw(client, module)
+ if existing_vgw != [] and existing_vgw[0]['State'] != 'deleted':
+ vpn_gateway_id = existing_vgw[0]['VpnGatewayId']
+ if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
+ if params['VpcId']:
+ if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']:
+ module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console')
+
+ else:
+ # detach the vpc from the vgw
+ detach_vgw(client, module, vpn_gateway_id, params['VpcId'])
+
+ # now that the vpc has been detached, delete the vgw
+ deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
+ changed = True
+
+ else:
+ # attempt to detach any attached vpcs
+ vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
+ detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
+ changed = True
+
+ # now that the vpc has been detached, delete the vgw
+ deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
+
+ else:
+ # no vpc's are attached so attempt to delete the vgw
+ deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
+ changed = True
+
+ else:
+ changed = False
+ deleted_vgw = None
+
+ result = deleted_vgw
+ return changed, result
+
+
+def main():
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ name=dict(),
+ vpn_gateway_id=dict(),
+ vpc_id=dict(),
+ asn=dict(type='int'),
+ wait_timeout=dict(type='int', default=320),
+ type=dict(default='ipsec.1', choices=['ipsec.1']),
+ tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']),
+ purge_tags=dict(default=True, type='bool'),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[['state', 'present', ['name']]])
+
+ state = module.params.get('state').lower()
+
+ client = module.client('ec2', retry_decorator=VGWRetry.jittered_backoff(retries=10))
+
+ if state == 'present':
+ (changed, results) = ensure_vgw_present(client, module)
+ else:
+ (changed, results) = ensure_vgw_absent(client, module)
+ module.exit_json(changed=changed, vgw=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw_info.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw_info.py
new file mode 100644
index 000000000..fcb520cf0
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw_info.py
@@ -0,0 +1,189 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_vpc_vgw_info
+version_added: 1.0.0
+short_description: Gather information about virtual gateways in AWS
+description:
+ - Gather information about virtual gateways (VGWs) in AWS.
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpnGateways.html) for possible filters.
+ type: dict
+ default: {}
+ vpn_gateway_ids:
+ description:
+ - Get details of a specific Virtual Gateway ID.
+ type: list
+ elements: str
+author:
+ - "Nick Aslanidis (@naslanidis)"
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# # Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all virtual gateways for an account or profile
+ community.aws.ec2_vpc_vgw_info:
+ region: ap-southeast-2
+ profile: production
+ register: vgw_info
+
+- name: Gather information about a filtered list of Virtual Gateways
+ community.aws.ec2_vpc_vgw_info:
+ region: ap-southeast-2
+ profile: production
+ filters:
+ "tag:Name": "main-virt-gateway"
+ register: vgw_info
+
+- name: Gather information about a specific virtual gateway by VpnGatewayIds
+ community.aws.ec2_vpc_vgw_info:
+ region: ap-southeast-2
+ profile: production
+ vpn_gateway_ids: vgw-c432f6a7
+ register: vgw_info
+'''
+
+RETURN = r'''
+virtual_gateways:
+ description: The virtual gateways for the account.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ vpn_gateway_id:
+ description: The ID of the VGW.
+ type: str
+ returned: success
+ example: "vgw-0123456789abcdef0"
+ state:
+ description: The current state of the VGW.
+ type: str
+ returned: success
+ example: "available"
+ type:
+ description: The type of VPN connection the VGW supports.
+ type: str
+ returned: success
+ example: "ipsec.1"
+ vpc_attachments:
+ description: A description of the attachment of VPCs to the VGW.
+ type: list
+ elements: dict
+ returned: success
+ contains:
+ state:
+ description: The current state of the attachment.
+ type: str
+ returned: success
+ example: available
+ vpc_id:
+ description: The ID of the VPC.
+ type: str
+ returned: success
+ example: vpc-12345678901234567
+ tags:
+ description:
+ - A list of dictionaries representing the tags attached to the VGW.
+ - Represents the same details as I(resource_tags).
+ type: list
+ elements: dict
+ returned: success
+ contains:
+ key:
+ description: The key of the tag.
+ type: str
+ returned: success
+ example: MyKey
+ value:
+ description: The value of the tag.
+ type: str
+ returned: success
+ example: MyValue
+ resource_tags:
+ description:
+ - A dictionary representing the tags attached to the VGW.
+ - Represents the same details as I(tags).
+ type: dict
+ returned: success
+ example: {"MyKey": "MyValue"}
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def get_virtual_gateway_info(virtual_gateway):
+ tags = virtual_gateway.get('Tags', [])
+ resource_tags = boto3_tag_list_to_ansible_dict(tags)
+ virtual_gateway_info = dict(
+ VpnGatewayId=virtual_gateway['VpnGatewayId'],
+ State=virtual_gateway['State'],
+ Type=virtual_gateway['Type'],
+ VpcAttachments=virtual_gateway['VpcAttachments'],
+ Tags=tags,
+ ResourceTags=resource_tags,
+ )
+ return virtual_gateway_info
+
+
+def list_virtual_gateways(client, module):
+ params = dict()
+
+ params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+
+ if module.params.get("vpn_gateway_ids"):
+ params['VpnGatewayIds'] = module.params.get("vpn_gateway_ids")
+
+ try:
+ all_virtual_gateways = client.describe_vpn_gateways(**params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to list gateways")
+
+ return [camel_dict_to_snake_dict(get_virtual_gateway_info(vgw), ignore_list=['ResourceTags'])
+ for vgw in all_virtual_gateways['VpnGateways']]
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(type='dict', default=dict()),
+ vpn_gateway_ids=dict(type='list', default=None, elements='str'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ try:
+ connection = module.client('ec2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ # call your function here
+ results = list_virtual_gateways(connection, module)
+
+ module.exit_json(virtual_gateways=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py
new file mode 100644
index 000000000..77a994aaa
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py
@@ -0,0 +1,803 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_vpc_vpn
+version_added: 1.0.0
+short_description: Create, modify, and delete EC2 VPN connections
+description:
+ - This module creates, modifies, and deletes VPN connections. Idempotence is achieved by using the filters
+ option or specifying the VPN connection identifier.
+extends_documentation_fragment:
+ - amazon.aws.ec2
+ - amazon.aws.aws
+ - amazon.aws.boto3
+ - amazon.aws.tags
+author:
+ - "Sloane Hertel (@s-hertel)"
+options:
+ state:
+ description:
+ - The desired state of the VPN connection.
+ choices: ['present', 'absent']
+ default: present
+ required: false
+ type: str
+ customer_gateway_id:
+ description:
+ - The ID of the customer gateway.
+ type: str
+ connection_type:
+ description:
+ - The type of VPN connection.
+ - At this time only C(ipsec.1) is supported.
+ default: ipsec.1
+ type: str
+ vpn_gateway_id:
+ description:
+ - The ID of the virtual private gateway.
+ type: str
+ vpn_connection_id:
+ description:
+ - The ID of the VPN connection. Required to modify or delete a connection if the filters option does not provide a unique match.
+ type: str
+ static_only:
+ description:
+ - Indicates whether the VPN connection uses static routes only. Static routes must be used for devices that don't support BGP.
+ default: False
+ type: bool
+ required: false
+ tunnel_options:
+ description:
+ - An optional list object containing no more than two dict members, each of which may contain I(TunnelInsideCidr)
+ and/or I(PreSharedKey) keys with appropriate string values. AWS defaults will apply in absence of either of
+ the aforementioned keys.
+ required: false
+ type: list
+ elements: dict
+ default: []
+ suboptions:
+ TunnelInsideCidr:
+ type: str
+ description: The range of inside IP addresses for the tunnel.
+ PreSharedKey:
+ type: str
+ description: The pre-shared key (PSK) to establish initial authentication between the virtual private gateway and customer gateway.
+ filters:
+ description:
+ - An alternative to using I(vpn_connection_id). If multiple matches are found, vpn_connection_id is required.
+ If one of the following suboptions is a list of items to filter by, only one item needs to match to find the VPN
+ that correlates. e.g. if the filter I(cidr) is C(['194.168.2.0/24', '192.168.2.0/24']) and the VPN route only has the
+ destination cidr block of C(192.168.2.0/24) it will be found with this filter (assuming there are not multiple
+ VPNs that are matched). Another example, if the filter I(vpn) is equal to C(['vpn-ccf7e7ad', 'vpn-cb0ae2a2']) and one
+ of of the VPNs has the state deleted (exists but is unmodifiable) and the other exists and is not deleted,
+ it will be found via this filter. See examples.
+ suboptions:
+ cgw-config:
+ description:
+ - The customer gateway configuration of the VPN as a string (in the format of the return value) or a list of those strings.
+ static-routes-only:
+ description:
+ - The type of routing; C(true) or C(false).
+ cidr:
+ description:
+ - The destination cidr of the VPN's route as a string or a list of those strings.
+ bgp:
+ description:
+ - The BGP ASN number associated with a BGP device. Only works if the connection is attached.
+ This filtering option is currently not working.
+ vpn:
+ description:
+ - The VPN connection id as a string or a list of those strings.
+ vgw:
+ description:
+ - The virtual private gateway as a string or a list of those strings.
+ tag-keys:
+ description:
+ - The key of a tag as a string or a list of those strings.
+ tag-values:
+ description:
+ - The value of a tag as a string or a list of those strings.
+ tags:
+ description:
+ - A dict of key value pairs.
+ cgw:
+ description:
+ - The customer gateway id as a string or a list of those strings.
+ type: dict
+ default: {}
+ routes:
+ description:
+ - Routes to add to the connection.
+ type: list
+ elements: str
+ default: []
+ purge_routes:
+ description:
+ - Whether or not to delete VPN connections routes that are not specified in the task.
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - How long, in seconds, before wait gives up.
+ default: 600
+ type: int
+ required: false
+ delay:
+ description:
+ - The time, in seconds, to wait before checking operation again.
+ required: false
+ type: int
+ default: 15
+'''
+
+EXAMPLES = r"""
+# Note: None of these examples set aws_access_key, aws_secret_key, or region.
+# It is assumed that their matching environment variables are set.
+
+- name: create a VPN connection
+ community.aws.ec2_vpc_vpn:
+ state: present
+ vpn_gateway_id: vgw-XXXXXXXX
+ customer_gateway_id: cgw-XXXXXXXX
+
+- name: modify VPN connection tags
+ community.aws.ec2_vpc_vpn:
+ state: present
+ vpn_connection_id: vpn-XXXXXXXX
+ tags:
+ Name: ansible-tag-1
+ Other: ansible-tag-2
+
+- name: delete a connection
+ community.aws.ec2_vpc_vpn:
+ vpn_connection_id: vpn-XXXXXXXX
+ state: absent
+
+- name: modify VPN tags (identifying VPN by filters)
+ community.aws.ec2_vpc_vpn:
+ state: present
+ filters:
+ cidr: 194.168.1.0/24
+ tag-keys:
+ - Ansible
+ - Other
+ tags:
+ New: Tag
+ purge_tags: true
+ static_only: true
+
+- name: set up VPN with tunnel options utilizing 'TunnelInsideCidr' only
+ community.aws.ec2_vpc_vpn:
+ state: present
+ filters:
+ vpn: vpn-XXXXXXXX
+ static_only: true
+ tunnel_options:
+ -
+ TunnelInsideCidr: '169.254.100.1/30'
+ -
+ TunnelInsideCidr: '169.254.100.5/30'
+
+- name: add routes and remove any preexisting ones
+ community.aws.ec2_vpc_vpn:
+ state: present
+ filters:
+ vpn: vpn-XXXXXXXX
+ routes:
+ - 195.168.2.0/24
+ - 196.168.2.0/24
+ purge_routes: true
+
+- name: remove all routes
+ community.aws.ec2_vpc_vpn:
+ state: present
+ vpn_connection_id: vpn-XXXXXXXX
+ routes: []
+ purge_routes: true
+
+- name: delete a VPN identified by filters
+ community.aws.ec2_vpc_vpn:
+ state: absent
+ filters:
+ tags:
+ Ansible: Tag
+"""
+
+RETURN = r"""
+changed:
+ description: If the VPN connection has changed.
+ type: bool
+ returned: always
+ sample:
+ changed: true
+customer_gateway_configuration:
+ description: The configuration of the VPN connection.
+ returned: I(state=present)
+ type: str
+customer_gateway_id:
+ description: The customer gateway connected via the connection.
+ type: str
+ returned: I(state=present)
+ sample:
+ customer_gateway_id: cgw-1220c87b
+vpn_gateway_id:
+ description: The virtual private gateway connected via the connection.
+ type: str
+ returned: I(state=present)
+ sample:
+ vpn_gateway_id: vgw-cb0ae2a2
+options:
+ description: The VPN connection options (currently only containing static_routes_only).
+ type: complex
+ returned: I(state=present)
+ contains:
+ static_routes_only:
+ description: If the VPN connection only allows static routes.
+ returned: I(state=present)
+ type: str
+ sample:
+ static_routes_only: true
+routes:
+ description: The routes of the VPN connection.
+ type: list
+ returned: I(state=present)
+ sample:
+ routes: [{
+ 'destination_cidr_block': '192.168.1.0/24',
+ 'state': 'available'
+ }]
+state:
+ description: The status of the VPN connection.
+ type: str
+ returned: I(state=present)
+ sample:
+ state: available
+tags:
+ description: The tags associated with the connection.
+ type: dict
+ returned: I(state=present)
+ sample:
+ tags:
+ name: ansible-test
+ other: tag
+type:
+ description: The type of VPN connection (currently only ipsec.1 is available).
+ type: str
+ returned: I(state=present)
+ sample:
+ type: "ipsec.1"
+vgw_telemetry:
+ type: list
+ returned: I(state=present)
+ description: The telemetry for the VPN tunnel.
+ sample:
+ vgw_telemetry: [{
+ 'outside_ip_address': 'string',
+ 'status': 'up',
+ 'last_status_change': 'datetime(2015, 1, 1)',
+ 'status_message': 'string',
+ 'accepted_route_count': 123
+ }]
+vpn_connection_id:
+ description: The identifier for the VPN connection.
+ type: str
+ returned: I(state=present)
+ sample:
+ vpn_connection_id: vpn-781e0e19
+"""
+
+from ansible.module_utils._text import to_text
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError, WaiterError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+
+class VPNConnectionException(Exception):
+ def __init__(self, msg, exception=None):
+ super(VPNConnectionException, self).__init__(msg)
+ self.msg = msg
+ self.exception = exception
+
+
+# AWS uses VpnGatewayLimitExceeded for both 'Too many VGWs' and 'Too many concurrent changes'
+# we need to look at the mesage to tell the difference.
+class VPNRetry(AWSRetry):
+ @staticmethod
+ def status_code_from_exception(error):
+ return (error.response['Error']['Code'], error.response['Error']['Message'],)
+
+ @staticmethod
+ def found(response_code, catch_extra_error_codes=None):
+ retry_on = ['The maximum number of mutating objects has been reached.']
+
+ if catch_extra_error_codes:
+ retry_on.extend(catch_extra_error_codes)
+ if not isinstance(response_code, tuple):
+ response_code = (response_code,)
+
+ for code in response_code:
+ if super().found(response_code, catch_extra_error_codes):
+ return True
+
+ return False
+
+
+def find_connection(connection, module_params, vpn_connection_id=None):
+ ''' Looks for a unique VPN connection. Uses find_connection_response() to return the connection found, None,
+ or raise an error if there were multiple viable connections. '''
+
+ filters = module_params.get('filters')
+
+ # vpn_connection_id may be provided via module option; takes precedence over any filter values
+ if not vpn_connection_id and module_params.get('vpn_connection_id'):
+ vpn_connection_id = module_params.get('vpn_connection_id')
+
+ if not isinstance(vpn_connection_id, list) and vpn_connection_id:
+ vpn_connection_id = [to_text(vpn_connection_id)]
+ elif isinstance(vpn_connection_id, list):
+ vpn_connection_id = [to_text(connection) for connection in vpn_connection_id]
+
+ formatted_filter = []
+ # if vpn_connection_id is provided it will take precedence over any filters since it is a unique identifier
+ if not vpn_connection_id:
+ formatted_filter = create_filter(module_params, provided_filters=filters)
+
+ # see if there is a unique matching connection
+ try:
+ if vpn_connection_id:
+ existing_conn = connection.describe_vpn_connections(aws_retry=True,
+ VpnConnectionIds=vpn_connection_id,
+ Filters=formatted_filter)
+ else:
+ existing_conn = connection.describe_vpn_connections(aws_retry=True, Filters=formatted_filter)
+ except (BotoCoreError, ClientError) as e:
+ raise VPNConnectionException(msg="Failed while describing VPN connection.",
+ exception=e)
+
+ return find_connection_response(connections=existing_conn)
+
+
+def add_routes(connection, vpn_connection_id, routes_to_add):
+ for route in routes_to_add:
+ try:
+ connection.create_vpn_connection_route(aws_retry=True,
+ VpnConnectionId=vpn_connection_id,
+ DestinationCidrBlock=route)
+ except (BotoCoreError, ClientError) as e:
+ raise VPNConnectionException(msg="Failed while adding route {0} to the VPN connection {1}.".format(route, vpn_connection_id),
+ exception=e)
+
+
+def remove_routes(connection, vpn_connection_id, routes_to_remove):
+ for route in routes_to_remove:
+ try:
+ connection.delete_vpn_connection_route(aws_retry=True,
+ VpnConnectionId=vpn_connection_id,
+ DestinationCidrBlock=route)
+ except (BotoCoreError, ClientError) as e:
+ raise VPNConnectionException(msg="Failed to remove route {0} from the VPN connection {1}.".format(route, vpn_connection_id),
+ exception=e)
+
+
+def create_filter(module_params, provided_filters):
+ """ Creates a filter using the user-specified parameters and unmodifiable options that may have been specified in the task """
+ boto3ify_filter = {'cgw-config': 'customer-gateway-configuration',
+ 'static-routes-only': 'option.static-routes-only',
+ 'cidr': 'route.destination-cidr-block',
+ 'bgp': 'bgp-asn',
+ 'vpn': 'vpn-connection-id',
+ 'vgw': 'vpn-gateway-id',
+ 'tag-keys': 'tag-key',
+ 'tag-values': 'tag-value',
+ 'tags': 'tag',
+ 'cgw': 'customer-gateway-id'}
+
+ # unmodifiable options and their filter name counterpart
+ param_to_filter = {"customer_gateway_id": "customer-gateway-id",
+ "vpn_gateway_id": "vpn-gateway-id",
+ "vpn_connection_id": "vpn-connection-id"}
+
+ flat_filter_dict = {}
+ formatted_filter = []
+
+ for raw_param in dict(provided_filters):
+
+ # fix filter names to be recognized by boto3
+ if raw_param in boto3ify_filter:
+ param = boto3ify_filter[raw_param]
+ provided_filters[param] = provided_filters.pop(raw_param)
+ elif raw_param in list(boto3ify_filter.items()):
+ param = raw_param
+ else:
+ raise VPNConnectionException(msg="{0} is not a valid filter.".format(raw_param))
+
+ # reformat filters with special formats
+ if param == 'tag':
+ for key in provided_filters[param]:
+ formatted_key = 'tag:' + key
+ if isinstance(provided_filters[param][key], list):
+ flat_filter_dict[formatted_key] = str(provided_filters[param][key])
+ else:
+ flat_filter_dict[formatted_key] = [str(provided_filters[param][key])]
+ elif param == 'option.static-routes-only':
+ flat_filter_dict[param] = [str(provided_filters[param]).lower()]
+ else:
+ if isinstance(provided_filters[param], list):
+ flat_filter_dict[param] = provided_filters[param]
+ else:
+ flat_filter_dict[param] = [str(provided_filters[param])]
+
+ # if customer_gateway, vpn_gateway, or vpn_connection was specified in the task but not the filter, add it
+ for param in param_to_filter:
+ if param_to_filter[param] not in flat_filter_dict and module_params.get(param):
+ flat_filter_dict[param_to_filter[param]] = [module_params.get(param)]
+
+ # change the flat dict into something boto3 will understand
+ formatted_filter = [{'Name': key, 'Values': value} for key, value in flat_filter_dict.items()]
+
+ return formatted_filter
+
+
+def find_connection_response(connections=None):
+ """ Determine if there is a viable unique match in the connections described. Returns the unique VPN connection if one is found,
+ returns None if the connection does not exist, raise an error if multiple matches are found. """
+
+ # Found no connections
+ if not connections or 'VpnConnections' not in connections:
+ return None
+
+ # Too many results
+ elif connections and len(connections['VpnConnections']) > 1:
+ viable = []
+ for each in connections['VpnConnections']:
+ # deleted connections are not modifiable
+ if each['State'] not in ("deleted", "deleting"):
+ viable.append(each)
+ if len(viable) == 1:
+ # Found one viable result; return unique match
+ return viable[0]
+ elif len(viable) == 0:
+ # Found a result but it was deleted already; since there was only one viable result create a new one
+ return None
+ else:
+ raise VPNConnectionException(msg="More than one matching VPN connection was found. "
+ "To modify or delete a VPN please specify vpn_connection_id or add filters.")
+
+ # Found unique match
+ elif connections and len(connections['VpnConnections']) == 1:
+ # deleted connections are not modifiable
+ if connections['VpnConnections'][0]['State'] not in ("deleted", "deleting"):
+ return connections['VpnConnections'][0]
+
+
+def create_connection(connection, customer_gateway_id, static_only, vpn_gateway_id, connection_type, max_attempts, delay, tunnel_options=None):
+ """ Creates a VPN connection """
+
+ options = {'StaticRoutesOnly': static_only}
+ if tunnel_options and len(tunnel_options) <= 2:
+ t_opt = []
+ for m in tunnel_options:
+ # See Boto3 docs regarding 'create_vpn_connection'
+ # tunnel options for allowed 'TunnelOptions' keys.
+ if not isinstance(m, dict):
+ raise TypeError("non-dict list member")
+ t_opt.append(m)
+ if t_opt:
+ options['TunnelOptions'] = t_opt
+
+ if not (customer_gateway_id and vpn_gateway_id):
+ raise VPNConnectionException(msg="No matching connection was found. To create a new connection you must provide "
+ "both vpn_gateway_id and customer_gateway_id.")
+ try:
+ vpn = connection.create_vpn_connection(Type=connection_type,
+ CustomerGatewayId=customer_gateway_id,
+ VpnGatewayId=vpn_gateway_id,
+ Options=options)
+ connection.get_waiter('vpn_connection_available').wait(
+ VpnConnectionIds=[vpn['VpnConnection']['VpnConnectionId']],
+ WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}
+ )
+ except WaiterError as e:
+ raise VPNConnectionException(msg="Failed to wait for VPN connection {0} to be available".format(vpn['VpnConnection']['VpnConnectionId']),
+ exception=e)
+ except (BotoCoreError, ClientError) as e:
+ raise VPNConnectionException(msg="Failed to create VPN connection",
+ exception=e)
+
+ return vpn['VpnConnection']
+
+
+def delete_connection(connection, vpn_connection_id, delay, max_attempts):
+ """ Deletes a VPN connection """
+ try:
+ connection.delete_vpn_connection(aws_retry=True, VpnConnectionId=vpn_connection_id)
+ connection.get_waiter('vpn_connection_deleted').wait(
+ VpnConnectionIds=[vpn_connection_id],
+ WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}
+ )
+ except WaiterError as e:
+ raise VPNConnectionException(msg="Failed to wait for VPN connection {0} to be removed".format(vpn_connection_id),
+ exception=e)
+ except (BotoCoreError, ClientError) as e:
+ raise VPNConnectionException(msg="Failed to delete the VPN connection: {0}".format(vpn_connection_id),
+ exception=e)
+
+
+def add_tags(connection, vpn_connection_id, add):
+ try:
+ connection.create_tags(aws_retry=True,
+ Resources=[vpn_connection_id],
+ Tags=add)
+ except (BotoCoreError, ClientError) as e:
+ raise VPNConnectionException(msg="Failed to add the tags: {0}.".format(add),
+ exception=e)
+
+
+def remove_tags(connection, vpn_connection_id, remove):
+ # format tags since they are a list in the format ['tag1', 'tag2', 'tag3']
+ key_dict_list = [{'Key': tag} for tag in remove]
+ try:
+ connection.delete_tags(aws_retry=True,
+ Resources=[vpn_connection_id],
+ Tags=key_dict_list)
+ except (BotoCoreError, ClientError) as e:
+ raise VPNConnectionException(msg="Failed to remove the tags: {0}.".format(remove),
+ exception=e)
+
+
+def check_for_update(connection, module_params, vpn_connection_id):
+ """ Determines if there are any tags or routes that need to be updated. Ensures non-modifiable attributes aren't expected to change. """
+ tags = module_params.get('tags')
+ routes = module_params.get('routes')
+ purge_tags = module_params.get('purge_tags')
+ purge_routes = module_params.get('purge_routes')
+
+ vpn_connection = find_connection(connection, module_params, vpn_connection_id=vpn_connection_id)
+ current_attrs = camel_dict_to_snake_dict(vpn_connection)
+
+ # Initialize changes dict
+ changes = {'tags_to_add': [],
+ 'tags_to_remove': [],
+ 'routes_to_add': [],
+ 'routes_to_remove': []}
+
+ # Get changes to tags
+ current_tags = boto3_tag_list_to_ansible_dict(current_attrs.get('tags', []), u'key', u'value')
+ if tags is None:
+ changes['tags_to_remove'] = []
+ changes['tags_to_add'] = []
+ else:
+ tags_to_add, changes['tags_to_remove'] = compare_aws_tags(current_tags, tags, purge_tags)
+ changes['tags_to_add'] = ansible_dict_to_boto3_tag_list(tags_to_add)
+ # Get changes to routes
+ if 'Routes' in vpn_connection:
+ current_routes = [route['DestinationCidrBlock'] for route in vpn_connection['Routes']]
+ if purge_routes:
+ changes['routes_to_remove'] = [old_route for old_route in current_routes if old_route not in routes]
+ changes['routes_to_add'] = [new_route for new_route in routes if new_route not in current_routes]
+
+ # Check if nonmodifiable attributes are attempted to be modified
+ for attribute in current_attrs:
+ if attribute in ("tags", "routes", "state"):
+ continue
+ elif attribute == 'options':
+ will_be = module_params.get('static_only', None)
+ is_now = bool(current_attrs[attribute]['static_routes_only'])
+ attribute = 'static_only'
+ elif attribute == 'type':
+ will_be = module_params.get("connection_type", None)
+ is_now = current_attrs[attribute]
+ else:
+ is_now = current_attrs[attribute]
+ will_be = module_params.get(attribute, None)
+
+ if will_be is not None and to_text(will_be) != to_text(is_now):
+ raise VPNConnectionException(msg="You cannot modify {0}, the current value of which is {1}. Modifiable VPN "
+ "connection attributes are tags and routes. The value you tried to change it to "
+ "is {2}.".format(attribute, is_now, will_be))
+
+ return changes
+
+
+def make_changes(connection, vpn_connection_id, changes):
+ """ changes is a dict with the keys 'tags_to_add', 'tags_to_remove', 'routes_to_add', 'routes_to_remove',
+ the values of which are lists (generated by check_for_update()).
+ """
+ changed = False
+
+ if changes['tags_to_add']:
+ changed = True
+ add_tags(connection, vpn_connection_id, changes['tags_to_add'])
+
+ if changes['tags_to_remove']:
+ changed = True
+ remove_tags(connection, vpn_connection_id, changes['tags_to_remove'])
+
+ if changes['routes_to_add']:
+ changed = True
+ add_routes(connection, vpn_connection_id, changes['routes_to_add'])
+
+ if changes['routes_to_remove']:
+ changed = True
+ remove_routes(connection, vpn_connection_id, changes['routes_to_remove'])
+
+ return changed
+
+
+def get_check_mode_results(connection, module_params, vpn_connection_id=None, current_state=None):
+ """ Returns the changes that would be made to a VPN Connection """
+ state = module_params.get('state')
+ if state == 'absent':
+ if vpn_connection_id:
+ return True, {}
+ else:
+ return False, {}
+
+ changed = False
+ results = {'customer_gateway_configuration': '',
+ 'customer_gateway_id': module_params.get('customer_gateway_id'),
+ 'vpn_gateway_id': module_params.get('vpn_gateway_id'),
+ 'options': {'static_routes_only': module_params.get('static_only')},
+ 'routes': [module_params.get('routes')]}
+
+ # get combined current tags and tags to set
+ present_tags = module_params.get('tags')
+ if present_tags is None:
+ pass
+ elif current_state and 'Tags' in current_state:
+ current_tags = boto3_tag_list_to_ansible_dict(current_state['Tags'])
+ tags_to_add, tags_to_remove = compare_aws_tags(current_tags, present_tags, module_params.get('purge_tags'))
+ changed |= bool(tags_to_remove) or bool(tags_to_add)
+ if module_params.get('purge_tags'):
+ current_tags = {}
+ current_tags.update(present_tags)
+ results['tags'] = current_tags
+ elif module_params.get('tags'):
+ changed = True
+ if present_tags:
+ results['tags'] = present_tags
+
+ # get combined current routes and routes to add
+ present_routes = module_params.get('routes')
+ if current_state and 'Routes' in current_state:
+ current_routes = [route['DestinationCidrBlock'] for route in current_state['Routes']]
+ if module_params.get('purge_routes'):
+ if set(current_routes) != set(present_routes):
+ changed = True
+ elif set(present_routes) != set(current_routes):
+ if not set(present_routes) < set(current_routes):
+ changed = True
+ present_routes.extend([route for route in current_routes if route not in present_routes])
+ elif module_params.get('routes'):
+ changed = True
+ results['routes'] = [{"destination_cidr_block": cidr, "state": "available"} for cidr in present_routes]
+
+ # return the vpn_connection_id if it's known
+ if vpn_connection_id:
+ results['vpn_connection_id'] = vpn_connection_id
+ else:
+ changed = True
+ results['vpn_connection_id'] = 'vpn-XXXXXXXX'
+
+ return changed, results
+
+
+def ensure_present(connection, module_params, check_mode=False):
+ """ Creates and adds tags to a VPN connection. If the connection already exists update tags. """
+ vpn_connection = find_connection(connection, module_params)
+ changed = False
+ delay = module_params.get('delay')
+ max_attempts = module_params.get('wait_timeout') // delay
+
+ # No match but vpn_connection_id was specified.
+ if not vpn_connection and module_params.get('vpn_connection_id'):
+ raise VPNConnectionException(msg="There is no VPN connection available or pending with that id. Did you delete it?")
+
+ # Unique match was found. Check if attributes provided differ.
+ elif vpn_connection:
+ vpn_connection_id = vpn_connection['VpnConnectionId']
+ # check_for_update returns a dict with the keys tags_to_add, tags_to_remove, routes_to_add, routes_to_remove
+ changes = check_for_update(connection, module_params, vpn_connection_id)
+ if check_mode:
+ return get_check_mode_results(connection, module_params, vpn_connection_id, current_state=vpn_connection)
+ changed = make_changes(connection, vpn_connection_id, changes)
+
+ # No match was found. Create and tag a connection and add routes.
+ else:
+ changed = True
+ if check_mode:
+ return get_check_mode_results(connection, module_params)
+ vpn_connection = create_connection(connection,
+ customer_gateway_id=module_params.get('customer_gateway_id'),
+ static_only=module_params.get('static_only'),
+ vpn_gateway_id=module_params.get('vpn_gateway_id'),
+ connection_type=module_params.get('connection_type'),
+ tunnel_options=module_params.get('tunnel_options'),
+ max_attempts=max_attempts,
+ delay=delay)
+ changes = check_for_update(connection, module_params, vpn_connection['VpnConnectionId'])
+ make_changes(connection, vpn_connection['VpnConnectionId'], changes)
+
+ # get latest version if a change has been made and make tags output nice before returning it
+ if vpn_connection:
+ vpn_connection = find_connection(connection, module_params, vpn_connection['VpnConnectionId'])
+ if 'Tags' in vpn_connection:
+ vpn_connection['Tags'] = boto3_tag_list_to_ansible_dict(vpn_connection['Tags'])
+
+ return changed, vpn_connection
+
+
+def ensure_absent(connection, module_params, check_mode=False):
+ """ Deletes a VPN connection if it exists. """
+ vpn_connection = find_connection(connection, module_params)
+
+ if check_mode:
+ return get_check_mode_results(connection, module_params, vpn_connection['VpnConnectionId'] if vpn_connection else None)
+
+ delay = module_params.get('delay')
+ max_attempts = module_params.get('wait_timeout') // delay
+
+ if vpn_connection:
+ delete_connection(connection, vpn_connection['VpnConnectionId'], delay=delay, max_attempts=max_attempts)
+ changed = True
+ else:
+ changed = False
+
+ return changed, {}
+
+
+def main():
+ argument_spec = dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ filters=dict(type='dict', default={}),
+ vpn_gateway_id=dict(type='str'),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ connection_type=dict(default='ipsec.1', type='str'),
+ tunnel_options=dict(no_log=True, type='list', default=[], elements='dict'),
+ static_only=dict(default=False, type='bool'),
+ customer_gateway_id=dict(type='str'),
+ vpn_connection_id=dict(type='str'),
+ purge_tags=dict(type='bool', default=True),
+ routes=dict(type='list', default=[], elements='str'),
+ purge_routes=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=600),
+ delay=dict(type='int', default=15),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+ connection = module.client('ec2', retry_decorator=VPNRetry.jittered_backoff(retries=10))
+
+ state = module.params.get('state')
+ parameters = dict(module.params)
+
+ try:
+ if state == 'present':
+ changed, response = ensure_present(connection, parameters, module.check_mode)
+ elif state == 'absent':
+ changed, response = ensure_absent(connection, parameters, module.check_mode)
+ except VPNConnectionException as e:
+ if e.exception:
+ module.fail_json_aws(e.exception, msg=e.msg)
+ else:
+ module.fail_json(msg=e.msg)
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn_info.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn_info.py
new file mode 100644
index 000000000..c7a71f154
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn_info.py
@@ -0,0 +1,216 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_vpc_vpn_info
+version_added: 1.0.0
+short_description: Gather information about VPN Connections in AWS.
+description:
+ - Gather information about VPN Connections in AWS.
+author: Madhura Naniwadekar (@Madhura-CSI)
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpnConnections.html) for possible filters.
+ required: false
+ type: dict
+ default: {}
+ vpn_connection_ids:
+ description:
+ - Get details of a specific VPN connections using vpn connection ID/IDs. This value should be provided as a list.
+ required: false
+ type: list
+ elements: str
+ default: []
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = r'''
+# # Note: These examples do not set authentication details, see the AWS Guide for details.
+- name: Gather information about all vpn connections
+ community.aws.ec2_vpc_vpn_info:
+
+- name: Gather information about a filtered list of vpn connections, based on tags
+ community.aws.ec2_vpc_vpn_info:
+ filters:
+ "tag:Name": test-connection
+ register: vpn_conn_info
+
+- name: Gather information about vpn connections by specifying connection IDs.
+ community.aws.ec2_vpc_vpn_info:
+ filters:
+ vpn-gateway-id: vgw-cbe66beb
+ register: vpn_conn_info
+'''
+
+RETURN = r'''
+vpn_connections:
+ description: List of one or more VPN Connections.
+ returned: always
+ type: complex
+ contains:
+ category:
+ description: The category of the VPN connection.
+ returned: always
+ type: str
+ sample: VPN
+ customer_gatway_configuration:
+ description: The configuration information for the VPN connection's customer gateway (in the native XML format).
+ returned: always
+ type: str
+ customer_gateway_id:
+ description: The ID of the customer gateway at your end of the VPN connection.
+ returned: always
+ type: str
+ sample: cgw-17a53c37
+ options:
+ description: The VPN connection options.
+ returned: always
+ type: dict
+ sample: {
+ "static_routes_only": false
+ }
+ routes:
+ description: List of static routes associated with the VPN connection.
+ returned: always
+ type: complex
+ contains:
+ destination_cidr_block:
+ description: The CIDR block associated with the local subnet of the customer data center.
+ returned: always
+ type: str
+ sample: 10.0.0.0/16
+ state:
+ description: The current state of the static route.
+ returned: always
+ type: str
+ sample: available
+ state:
+ description: The current state of the VPN connection.
+ returned: always
+ type: str
+ sample: available
+ tags:
+ description: Any tags assigned to the VPN connection.
+ returned: always
+ type: dict
+ sample: {
+ "Name": "test-conn"
+ }
+ type:
+ description: The type of VPN connection.
+ returned: always
+ type: str
+ sample: ipsec.1
+ vgw_telemetry:
+ description: Information about the VPN tunnel.
+ returned: always
+ type: complex
+ contains:
+ accepted_route_count:
+ description: The number of accepted routes.
+ returned: always
+ type: int
+ sample: 0
+ last_status_change:
+ description: The date and time of the last change in status.
+ returned: always
+ type: str
+ sample: "2018-02-09T14:35:27+00:00"
+ outside_ip_address:
+ description: The Internet-routable IP address of the virtual private gateway's outside interface.
+ returned: always
+ type: str
+ sample: 13.127.79.191
+ status:
+ description: The status of the VPN tunnel.
+ returned: always
+ type: str
+ sample: DOWN
+ status_message:
+ description: If an error occurs, a description of the error.
+ returned: always
+ type: str
+ sample: IPSEC IS DOWN
+ certificate_arn:
+ description: The Amazon Resource Name of the virtual private gateway tunnel endpoint certificate.
+ returned: when a private certificate is used for authentication
+ type: str
+ sample: "arn:aws:acm:us-east-1:123456789012:certificate/c544d8ce-20b8-4fff-98b0-example"
+ vpn_connection_id:
+ description: The ID of the VPN connection.
+ returned: always
+ type: str
+ sample: vpn-f700d5c0
+ vpn_gateway_id:
+ description: The ID of the virtual private gateway at the AWS side of the VPN connection.
+ returned: always
+ type: str
+ sample: vgw-cbe56bfb
+'''
+
+import json
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
+ boto3_tag_list_to_ansible_dict,
+ camel_dict_to_snake_dict,
+ )
+
+
+def date_handler(obj):
+ return obj.isoformat() if hasattr(obj, 'isoformat') else obj
+
+
+def list_vpn_connections(connection, module):
+ params = dict()
+
+ params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ params['VpnConnectionIds'] = module.params.get('vpn_connection_ids')
+
+ try:
+ result = json.loads(json.dumps(connection.describe_vpn_connections(**params), default=date_handler))
+ except ValueError as e:
+ module.fail_json_aws(e, msg="Cannot validate JSON data")
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Could not describe customer gateways")
+ snaked_vpn_connections = [camel_dict_to_snake_dict(vpn_connection) for vpn_connection in result['VpnConnections']]
+ if snaked_vpn_connections:
+ for vpn_connection in snaked_vpn_connections:
+ vpn_connection['tags'] = boto3_tag_list_to_ansible_dict(vpn_connection.get('tags', []))
+ module.exit_json(changed=False, vpn_connections=snaked_vpn_connections)
+
+
+def main():
+
+ argument_spec = dict(
+ vpn_connection_ids=dict(default=[], type='list', elements='str'),
+ filters=dict(default={}, type='dict')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ mutually_exclusive=[['vpn_connection_ids', 'filters']],
+ supports_check_mode=True)
+
+ connection = module.client('ec2')
+
+ list_vpn_connections(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_win_password.py b/ansible_collections/community/aws/plugins/modules/ec2_win_password.py
new file mode 100644
index 000000000..9b92c3e4f
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ec2_win_password.py
@@ -0,0 +1,217 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_win_password
+version_added: 1.0.0
+short_description: Gets the default administrator password for EC2 Windows instances
+description:
+ - Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. C(i-XXXXXXX)).
+author: "Rick Mendes (@rickmendes)"
+options:
+ instance_id:
+ description:
+ - The instance id to get the password data from.
+ required: true
+ type: str
+ key_file:
+ description:
+ - Path to the file containing the key pair used on the instance.
+ - Conflicts with I(key_data).
+ required: false
+ type: path
+ key_data:
+ description:
+ - The private key (usually stored in vault).
+ - Conflicts with I(key_file),
+ required: false
+ type: str
+ key_passphrase:
+ description:
+ - The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to
+ convert your password protected keys if they do not use DES or 3DES. ex) C(openssl rsa -in current_key -out new_key -des3).
+ type: str
+ wait:
+ description:
+ - Whether or not to wait for the password to be available before returning.
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - Number of seconds to wait before giving up.
+ default: 120
+ type: int
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+requirements:
+- cryptography
+'''
+
+EXAMPLES = '''
+# Example of getting a password
+- name: get the Administrator password
+ community.aws.ec2_win_password:
+ profile: my-boto-profile
+ instance_id: i-XXXXXX
+ region: us-east-1
+ key_file: "~/aws-creds/my_test_key.pem"
+
+# Example of getting a password using a variable
+- name: get the Administrator password
+ community.aws.ec2_win_password:
+ profile: my-boto-profile
+ instance_id: i-XXXXXX
+ region: us-east-1
+ key_data: "{{ ec2_private_key }}"
+
+# Example of getting a password with a password protected key
+- name: get the Administrator password
+ community.aws.ec2_win_password:
+ profile: my-boto-profile
+ instance_id: i-XXXXXX
+ region: us-east-1
+ key_file: "~/aws-creds/my_protected_test_key.pem"
+ key_passphrase: "secret"
+
+# Example of waiting for a password
+- name: get the Administrator password
+ community.aws.ec2_win_password:
+ profile: my-boto-profile
+ instance_id: i-XXXXXX
+ region: us-east-1
+ key_file: "~/aws-creds/my_test_key.pem"
+ wait: true
+ wait_timeout: 45
+'''
+
+import datetime
+import time
+from base64 import b64decode
+
+try:
+ from cryptography.hazmat.backends import default_backend
+ from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
+ from cryptography.hazmat.primitives.serialization import load_pem_private_key
+ HAS_CRYPTOGRAPHY = True
+except ImportError:
+ HAS_CRYPTOGRAPHY = False
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_bytes
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+def setup_module_object():
+ argument_spec = dict(
+ instance_id=dict(required=True),
+ key_file=dict(required=False, default=None, type='path'),
+ key_passphrase=dict(no_log=True, default=None, required=False),
+ key_data=dict(no_log=True, default=None, required=False),
+ wait=dict(type='bool', default=False, required=False),
+ wait_timeout=dict(default=120, required=False, type='int'),
+ )
+ mutually_exclusive = [['key_file', 'key_data']]
+ module = AnsibleAWSModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive)
+ return module
+
+
+def _get_password(module, client, instance_id):
+ try:
+ data = client.get_password_data(aws_retry=True, InstanceId=instance_id)['PasswordData']
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to get password data')
+ return data
+
+
+def ec2_win_password(module):
+ instance_id = module.params.get('instance_id')
+ key_file = module.params.get('key_file')
+ if module.params.get('key_passphrase') is None:
+ b_key_passphrase = None
+ else:
+ b_key_passphrase = to_bytes(module.params.get('key_passphrase'), errors='surrogate_or_strict')
+ if module.params.get('key_data') is None:
+ b_key_data = None
+ else:
+ b_key_data = to_bytes(module.params.get('key_data'), errors='surrogate_or_strict')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ if wait:
+ start = datetime.datetime.now()
+ end = start + datetime.timedelta(seconds=wait_timeout)
+
+ while datetime.datetime.now() < end:
+ data = _get_password(module, client, instance_id)
+ decoded = b64decode(data)
+ if not decoded:
+ time.sleep(5)
+ else:
+ break
+ else:
+ data = _get_password(module, client, instance_id)
+ decoded = b64decode(data)
+
+ if wait and datetime.datetime.now() >= end:
+ module.fail_json(msg="wait for password timeout after %d seconds" % wait_timeout)
+
+ if key_file is not None and b_key_data is None:
+ try:
+ with open(key_file, 'rb') as f:
+ key = load_pem_private_key(f.read(), b_key_passphrase, default_backend())
+ except IOError as e:
+ # Handle bad files
+ module.fail_json(msg="I/O error (%d) opening key file: %s" % (e.errno, e.strerror))
+ except (ValueError, TypeError) as e:
+ # Handle issues loading key
+ module.fail_json(msg="unable to parse key file")
+ elif b_key_data is not None and key_file is None:
+ try:
+ key = load_pem_private_key(b_key_data, b_key_passphrase, default_backend())
+ except (ValueError, TypeError) as e:
+ module.fail_json(msg="unable to parse key data")
+
+ try:
+ decrypted = key.decrypt(decoded, PKCS1v15())
+ except ValueError as e:
+ decrypted = None
+
+ if decrypted is None:
+ module.fail_json(msg="unable to decrypt password", win_password='', changed=False)
+ else:
+ if wait:
+ elapsed = datetime.datetime.now() - start
+ module.exit_json(win_password=decrypted, changed=False, elapsed=elapsed.seconds)
+ else:
+ module.exit_json(win_password=decrypted, changed=False)
+
+
+def main():
+ module = setup_module_object()
+
+ if not HAS_CRYPTOGRAPHY:
+ module.fail_json(msg='cryptography package required for this module.')
+
+ ec2_win_password(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ecs_attribute.py b/ansible_collections/community/aws/plugins/modules/ecs_attribute.py
new file mode 100644
index 000000000..6efe701d1
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ecs_attribute.py
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ecs_attribute
+version_added: 1.0.0
+short_description: manage ecs attributes
+description:
+ - Create, update or delete ECS container instance attributes.
+author: Andrej Svenke (@anryko)
+options:
+ cluster:
+ description:
+ - The short name or full Amazon Resource Name (ARN) of the cluster
+ that contains the resource to apply attributes.
+ required: true
+ type: str
+ state:
+ description:
+ - The desired state of the attributes.
+ required: false
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ attributes:
+ description:
+ - List of attributes.
+ required: true
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the attribute. Up to 128 letters (uppercase and lowercase),
+ numbers, hyphens, underscores, and periods are allowed.
+ required: true
+ type: str
+ value:
+ description:
+ - The value of the attribute. Up to 128 letters (uppercase and lowercase),
+ numbers, hyphens, underscores, periods, at signs (@), forward slashes, colons,
+ and spaces are allowed.
+ required: false
+ type: str
+ ec2_instance_id:
+ description:
+ - EC2 instance ID of ECS cluster container instance.
+ required: true
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Set attributes
+ community.aws.ecs_attribute:
+ state: present
+ cluster: test-cluster
+ ec2_instance_id: "{{ ec2_id }}"
+ attributes:
+ - flavor: test
+ - migrated
+ delegate_to: localhost
+
+- name: Delete attributes
+ community.aws.ecs_attribute:
+ state: absent
+ cluster: test-cluster
+ ec2_instance_id: "{{ ec2_id }}"
+ attributes:
+ - flavor: test
+ - migrated
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+attributes:
+ description: attributes
+ type: complex
+ returned: always
+ contains:
+ cluster:
+ description: cluster name
+ type: str
+ ec2_instance_id:
+ description: ec2 instance id of ecs container instance
+ type: str
+ attributes:
+ description: list of attributes
+ type: list
+ elements: dict
+ contains:
+ name:
+ description: name of the attribute
+ type: str
+ value:
+ description: value of the attribute
+ returned: if present
+ type: str
+'''
+
+try:
+ import botocore
+ from botocore.exceptions import ClientError, EndpointConnectionError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+class EcsAttributes(object):
+ """Handles ECS Cluster Attribute"""
+
+ def __init__(self, module, attributes):
+ self.module = module
+ self.attributes = attributes if self._validate_attrs(attributes) else self._parse_attrs(attributes)
+
+ def __bool__(self):
+ return bool(self.attributes)
+
+ __nonzero__ = __bool__
+
+ def __iter__(self):
+ return iter(self.attributes)
+
+ @staticmethod
+ def _validate_attrs(attrs):
+ return all(tuple(attr.keys()) in (('name', 'value'), ('value', 'name')) for attr in attrs)
+
+ def _parse_attrs(self, attrs):
+ attrs_parsed = []
+ for attr in attrs:
+ if isinstance(attr, dict):
+ if len(attr) != 1:
+ self.module.fail_json(msg="Incorrect attribute format - %s" % str(attr))
+ name, value = list(attr.items())[0]
+ attrs_parsed.append({'name': name, 'value': value})
+ elif isinstance(attr, str):
+ attrs_parsed.append({'name': attr, 'value': None})
+ else:
+ self.module.fail_json(msg="Incorrect attributes format - %s" % str(attrs))
+
+ return attrs_parsed
+
+ def _setup_attr_obj(self, ecs_arn, name, value=None, skip_value=False):
+ attr_obj = {'targetType': 'container-instance',
+ 'targetId': ecs_arn,
+ 'name': name}
+ if not skip_value and value is not None:
+ attr_obj['value'] = value
+
+ return attr_obj
+
+ def get_for_ecs_arn(self, ecs_arn, skip_value=False):
+ """
+ Returns list of attribute dicts ready to be passed to boto3
+ attributes put/delete methods.
+ """
+ return [self._setup_attr_obj(ecs_arn, skip_value=skip_value, **attr) for attr in self.attributes]
+
+ def diff(self, attrs):
+ """
+ Returns EcsAttributes Object containing attributes which are present
+ in self but are absent in passed attrs (EcsAttributes Object).
+ """
+ attrs_diff = [attr for attr in self.attributes if attr not in attrs]
+ return EcsAttributes(self.module, attrs_diff)
+
+
+class Ec2EcsInstance(object):
+ """Handle ECS Cluster Remote Operations"""
+
+ def __init__(self, module, cluster, ec2_id):
+ self.module = module
+ self.cluster = cluster
+ self.ec2_id = ec2_id
+
+ try:
+ self.ecs = module.client('ecs')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ self.ecs_arn = self._get_ecs_arn()
+
+ def _get_ecs_arn(self):
+ try:
+ ecs_instances_arns = self.ecs.list_container_instances(cluster=self.cluster)['containerInstanceArns']
+ ec2_instances = self.ecs.describe_container_instances(cluster=self.cluster,
+ containerInstances=ecs_instances_arns)['containerInstances']
+ except (ClientError, EndpointConnectionError) as e:
+ self.module.fail_json(msg="Can't connect to the cluster - %s" % str(e))
+
+ try:
+ ecs_arn = next(inst for inst in ec2_instances
+ if inst['ec2InstanceId'] == self.ec2_id)['containerInstanceArn']
+ except StopIteration:
+ self.module.fail_json(msg="EC2 instance Id not found in ECS cluster - %s" % str(self.cluster))
+
+ return ecs_arn
+
+ def attrs_put(self, attrs):
+ """Puts attributes on ECS container instance"""
+ try:
+ self.ecs.put_attributes(cluster=self.cluster,
+ attributes=attrs.get_for_ecs_arn(self.ecs_arn))
+ except ClientError as e:
+ self.module.fail_json(msg=str(e))
+
+ def attrs_delete(self, attrs):
+ """Deletes attributes from ECS container instance."""
+ try:
+ self.ecs.delete_attributes(cluster=self.cluster,
+ attributes=attrs.get_for_ecs_arn(self.ecs_arn, skip_value=True))
+ except ClientError as e:
+ self.module.fail_json(msg=str(e))
+
+ def attrs_get_by_name(self, attrs):
+ """
+ Returns EcsAttributes object containing attributes from ECS container instance with names
+ matching to attrs.attributes (EcsAttributes Object).
+ """
+ attr_objs = [{'targetType': 'container-instance', 'attributeName': attr['name']}
+ for attr in attrs]
+
+ try:
+ matched_ecs_targets = [attr_found for attr_obj in attr_objs
+ for attr_found in self.ecs.list_attributes(cluster=self.cluster, **attr_obj)['attributes']]
+ except ClientError as e:
+ self.module.fail_json(msg="Can't connect to the cluster - %s" % str(e))
+
+ matched_objs = [target for target in matched_ecs_targets
+ if target['targetId'] == self.ecs_arn]
+
+ results = [{'name': match['name'], 'value': match.get('value', None)}
+ for match in matched_objs]
+
+ return EcsAttributes(self.module, results)
+
+
+def main():
+ argument_spec = dict(
+ state=dict(required=False, default='present', choices=['present', 'absent']),
+ cluster=dict(required=True, type='str'),
+ ec2_instance_id=dict(required=True, type='str'),
+ attributes=dict(required=True, type='list', elements='dict'),
+ )
+
+ required_together = [['cluster', 'ec2_instance_id', 'attributes']]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_together=required_together,
+ )
+
+ cluster = module.params['cluster']
+ ec2_instance_id = module.params['ec2_instance_id']
+ attributes = module.params['attributes']
+
+ conti = Ec2EcsInstance(module, cluster, ec2_instance_id)
+ attrs = EcsAttributes(module, attributes)
+
+ results = {'changed': False,
+ 'attributes': [
+ {'cluster': cluster,
+ 'ec2_instance_id': ec2_instance_id,
+ 'attributes': attributes}
+ ]}
+
+ attrs_present = conti.attrs_get_by_name(attrs)
+
+ if module.params['state'] == 'present':
+ attrs_diff = attrs.diff(attrs_present)
+ if not attrs_diff:
+ module.exit_json(**results)
+
+ conti.attrs_put(attrs_diff)
+ results['changed'] = True
+
+ elif module.params['state'] == 'absent':
+ if not attrs_present:
+ module.exit_json(**results)
+
+ conti.attrs_delete(attrs_present)
+ results['changed'] = True
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ecs_cluster.py b/ansible_collections/community/aws/plugins/modules/ecs_cluster.py
new file mode 100644
index 000000000..347e2173e
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ecs_cluster.py
@@ -0,0 +1,359 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ecs_cluster
+version_added: 1.0.0
+short_description: Create or terminate ECS clusters.
+notes:
+ - When deleting a cluster, the information returned is the state of the cluster prior to deletion.
+ - It will also wait for a cluster to have instances registered to it.
+description:
+ - Creates or terminates ecs clusters.
+author: Mark Chance (@Java1Guy)
+options:
+ state:
+ description:
+ - The desired state of the cluster.
+ required: true
+ choices: ['present', 'absent', 'has_instances']
+ type: str
+ name:
+ description:
+ - The cluster name.
+ required: true
+ type: str
+ delay:
+ description:
+ - Number of seconds to wait.
+ required: false
+ type: int
+ default: 10
+ repeat:
+ description:
+ - The number of times to wait for the cluster to have an instance.
+ required: false
+ type: int
+ default: 10
+ capacity_providers:
+ version_added: 5.2.0
+ description:
+ - List of capacity providers to use for the cluster.
+ required: false
+ type: list
+ elements: str
+ capacity_provider_strategy:
+ version_added: 5.2.0
+ description:
+ - List of capacity provider strategies to use for the cluster.
+ required: false
+ type: list
+ elements: dict
+ suboptions:
+ capacity_provider:
+ description:
+ - Name of capacity provider.
+ type: str
+ weight:
+ description:
+ - The relative percentage of the total number of launched tasks that should use the specified provider.
+ type: int
+ base:
+ description:
+ - How many tasks, at a minimum, should use the specified provider.
+ type: int
+ default: 0
+ purge_capacity_providers:
+ version_added: 5.2.0
+ description:
+ - Toggle overwriting of existing capacity providers or strategy. This is needed for backwards compatibility.
+ - By default I(purge_capacity_providers=false). In a release after 2024-06-01 this will be changed to I(purge_capacity_providers=true).
+ required: false
+ type: bool
+ default: false
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Cluster creation
+ community.aws.ecs_cluster:
+ name: default
+ state: present
+
+- name: Cluster creation with capacity providers and strategies.
+ community.aws.ecs_cluster:
+ name: default
+ state: present
+ capacity_providers:
+ - FARGATE
+ - FARGATE_SPOT
+ capacity_provider_strategy:
+ - capacity_provider: FARGATE
+ base: 1
+ weight: 1
+ - capacity_provider: FARGATE_SPOT
+ weight: 100
+ purge_capacity_providers: True
+
+- name: Cluster deletion
+ community.aws.ecs_cluster:
+ name: default
+ state: absent
+
+- name: Wait for register
+ community.aws.ecs_cluster:
+ name: "{{ new_cluster }}"
+ state: has_instances
+ delay: 10
+ repeat: 10
+ register: task_output
+
+'''
+RETURN = '''
+activeServicesCount:
+ description: how many services are active in this cluster
+ returned: 0 if a new cluster
+ type: int
+capacityProviders:
+ version_added: 5.2.0
+ description: list of capacity providers used in this cluster
+ returned: always
+ type: list
+defaultCapacityProviderStrategy:
+ version_added: 5.2.0
+ description: list of capacity provider strategies used in this cluster
+ returned: always
+ type: list
+clusterArn:
+ description: the ARN of the cluster just created
+ type: str
+ returned: 0 if a new cluster
+ sample: arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster
+clusterName:
+ description: name of the cluster just created (should match the input argument)
+ type: str
+ returned: always
+ sample: test-cluster
+pendingTasksCount:
+ description: how many tasks are waiting to run in this cluster
+ returned: 0 if a new cluster
+ type: int
+registeredContainerInstancesCount:
+ description: how many container instances are available in this cluster
+ returned: 0 if a new cluster
+ type: int
+runningTasksCount:
+ description: how many tasks are running in this cluster
+ returned: 0 if a new cluster
+ type: int
+status:
+ description: the status of the new cluster
+ returned: always
+ type: str
+ sample: ACTIVE
+'''
+
+import time
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+
+class EcsClusterManager:
+ """Handles ECS Clusters"""
+
+ def __init__(self, module):
+ self.module = module
+ try:
+ self.ecs = module.client('ecs')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ def find_in_array(self, array_of_clusters, cluster_name, field_name='clusterArn'):
+ for c in array_of_clusters:
+ if c[field_name].endswith(cluster_name):
+ return c
+ return None
+
+ def describe_cluster(self, cluster_name):
+ response = self.ecs.describe_clusters(clusters=[
+ cluster_name
+ ])
+ if len(response['failures']) > 0:
+ c = self.find_in_array(response['failures'], cluster_name, 'arn')
+ if c and c['reason'] == 'MISSING':
+ return None
+ # fall thru and look through found ones
+ if len(response['clusters']) > 0:
+ c = self.find_in_array(response['clusters'], cluster_name)
+ if c:
+ return c
+ raise Exception("Unknown problem describing cluster %s." % cluster_name)
+
+ def create_cluster(self, cluster_name, capacity_providers, capacity_provider_strategy):
+ params = dict(clusterName=cluster_name)
+ if capacity_providers:
+ params['capacityProviders'] = snake_dict_to_camel_dict(capacity_providers)
+ if capacity_provider_strategy:
+ params['defaultCapacityProviderStrategy'] = snake_dict_to_camel_dict(capacity_provider_strategy)
+ response = self.ecs.create_cluster(**params)
+ return response['cluster']
+
+ def update_cluster(self, cluster_name, capacity_providers, capacity_provider_strategy):
+ params = dict(cluster=cluster_name)
+ if capacity_providers:
+ params['capacityProviders'] = snake_dict_to_camel_dict(capacity_providers)
+ else:
+ params['capacityProviders'] = []
+ if capacity_provider_strategy:
+ params['defaultCapacityProviderStrategy'] = snake_dict_to_camel_dict(capacity_provider_strategy)
+ else:
+ params['defaultCapacityProviderStrategy'] = []
+ response = self.ecs.put_cluster_capacity_providers(**params)
+ return response['cluster']
+
+ def delete_cluster(self, clusterName):
+ return self.ecs.delete_cluster(cluster=clusterName)
+
+
+def main():
+
+ argument_spec = dict(
+ state=dict(required=True, choices=['present', 'absent', 'has_instances']),
+ name=dict(required=True, type='str'),
+ delay=dict(required=False, type='int', default=10),
+ repeat=dict(required=False, type='int', default=10),
+ purge_capacity_providers=dict(required=False, type='bool', default=False),
+ capacity_providers=dict(required=False, type='list', elements='str'),
+ capacity_provider_strategy=dict(required=False,
+ type='list',
+ elements='dict',
+ options=dict(capacity_provider=dict(type='str'),
+ weight=dict(type='int'),
+ base=dict(type='int', default=0)
+ )
+ ),
+ )
+ required_together = [['state', 'name']]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_together=required_together,
+ )
+
+ cluster_mgr = EcsClusterManager(module)
+ try:
+ existing = cluster_mgr.describe_cluster(module.params['name'])
+ except Exception as e:
+ module.fail_json(msg="Exception describing cluster '" + module.params['name'] + "': " + str(e))
+
+ results = dict(changed=False)
+ if module.params['state'] == 'present':
+ # Pull requested and existing capacity providers and strategies.
+ purge_capacity_providers = module.params['purge_capacity_providers']
+ requested_cp = module.params['capacity_providers']
+ requested_cps = module.params['capacity_provider_strategy']
+ if existing and 'status' in existing and existing['status'] == "ACTIVE":
+ existing_cp = existing['capacityProviders']
+ existing_cps = existing['defaultCapacityProviderStrategy']
+
+ if requested_cp is None:
+ requested_cp = []
+
+ # Check if capacity provider strategy needs to trigger an update.
+ cps_update_needed = False
+ if requested_cps is not None:
+ for strategy in requested_cps:
+ if snake_dict_to_camel_dict(strategy) not in existing_cps:
+ cps_update_needed = True
+ for strategy in existing_cps:
+ if camel_dict_to_snake_dict(strategy) not in requested_cps:
+ cps_update_needed = True
+ elif requested_cps is None and existing_cps != []:
+ cps_update_needed = True
+
+ # Unless purge_capacity_providers is true, we will not be updating the providers or strategy.
+ if not purge_capacity_providers:
+ module.deprecate('After 2024-06-01 the default value of purge_capacity_providers will change from false to true.'
+ ' To maintain the existing behaviour explicitly set purge_capacity_providers=true',
+ date='2024-06-01', collection_name='community.aws')
+ cps_update_needed = False
+ requested_cp = existing_cp
+ requested_cps = existing_cps
+
+ # If either the providers or strategy differ, update the cluster.
+ if requested_cp != existing_cp or cps_update_needed:
+ if not module.check_mode:
+ results['cluster'] = cluster_mgr.update_cluster(cluster_name=module.params['name'],
+ capacity_providers=requested_cp,
+ capacity_provider_strategy=requested_cps)
+ results['changed'] = True
+ else:
+ results['cluster'] = existing
+ else:
+ if not module.check_mode:
+ # doesn't exist. create it.
+ results['cluster'] = cluster_mgr.create_cluster(cluster_name=module.params['name'],
+ capacity_providers=requested_cp,
+ capacity_provider_strategy=requested_cps)
+ results['changed'] = True
+
+ # delete the cluster
+ elif module.params['state'] == 'absent':
+ if not existing:
+ pass
+ else:
+ # it exists, so we should delete it and mark changed.
+ # return info about the cluster deleted
+ results['cluster'] = existing
+ if 'status' in existing and existing['status'] == "INACTIVE":
+ results['changed'] = False
+ else:
+ if not module.check_mode:
+ cluster_mgr.delete_cluster(module.params['name'])
+ results['changed'] = True
+ elif module.params['state'] == 'has_instances':
+ if not existing:
+ module.fail_json(msg="Cluster '" + module.params['name'] + " not found.")
+ return
+ # it exists, so we should delete it and mark changed.
+ # return info about the cluster deleted
+ delay = module.params['delay']
+ repeat = module.params['repeat']
+ time.sleep(delay)
+ count = 0
+ for i in range(repeat):
+ existing = cluster_mgr.describe_cluster(module.params['name'])
+ count = existing['registeredContainerInstancesCount']
+ if count > 0:
+ results['changed'] = True
+ break
+ time.sleep(delay)
+ if count == 0 and i is repeat - 1:
+ module.fail_json(msg="Cluster instance count still zero after " + str(repeat) + " tries of " + str(delay) + " seconds each.")
+ return
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ecs_ecr.py b/ansible_collections/community/aws/plugins/modules/ecs_ecr.py
new file mode 100644
index 000000000..d83d5af2e
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ecs_ecr.py
@@ -0,0 +1,626 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ecs_ecr
+version_added: 1.0.0
+short_description: Manage Elastic Container Registry repositories
+description:
+ - Manage Elastic Container Registry repositories.
+options:
+ name:
+ description:
+ - The name of the repository.
+ required: true
+ type: str
+ registry_id:
+ description:
+ - AWS account id associated with the registry.
+ - If not specified, the default registry is assumed.
+ required: false
+ type: str
+ policy:
+ description:
+ - JSON or dict that represents the new policy.
+ required: false
+ type: json
+ force_absent:
+ description:
+ - If I(force_absent=true), the repository will be removed, even if images are present.
+ required: false
+ default: false
+ type: bool
+ version_added: 4.1.0
+ force_set_policy:
+ description:
+ - If I(force_set_policy=false), it prevents setting a policy that would prevent you from
+ setting another policy in the future.
+ required: false
+ default: false
+ type: bool
+ purge_policy:
+ description:
+ - If C(true), remove the policy from the repository.
+ - Defaults to C(false).
+ required: false
+ type: bool
+ image_tag_mutability:
+ description:
+ - Configure whether repository should be mutable (ie. an already existing tag can be overwritten) or not.
+ required: false
+ choices: [mutable, immutable]
+ default: 'mutable'
+ type: str
+ lifecycle_policy:
+ description:
+ - JSON or dict that represents the new lifecycle policy.
+ required: false
+ type: json
+ purge_lifecycle_policy:
+ description:
+ - if C(true), remove the lifecycle policy from the repository.
+ - Defaults to C(false).
+ required: false
+ type: bool
+ state:
+ description:
+ - Create or destroy the repository.
+ required: false
+ choices: [present, absent]
+ default: 'present'
+ type: str
+ scan_on_push:
+ description:
+ - if C(true), images are scanned for known vulnerabilities after being pushed to the repository.
+ required: false
+ default: false
+ type: bool
+ version_added: 1.3.0
+ encryption_configuration:
+ description:
+ - The encryption configuration for the repository.
+ required: false
+ suboptions:
+ encryption_type:
+ description:
+ - The encryption type to use.
+ choices: [AES256, KMS]
+ default: 'AES256'
+ type: str
+ kms_key:
+ description:
+ - If I(encryption_type=KMS), specify the KMS key to use for encryption.
+ - The alias, key ID, or full ARN of the KMS key can be specified.
+ type: str
+ type: dict
+ version_added: 5.2.0
+author:
+ - David M. Lee (@leedm777)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+# If the repository does not exist, it is created. If it does exist, would not
+# affect any policies already on it.
+- name: ecr-repo
+ community.aws.ecs_ecr:
+ name: super/cool
+
+- name: destroy-ecr-repo
+ community.aws.ecs_ecr:
+ name: old/busted
+ state: absent
+
+- name: Cross account ecr-repo
+ community.aws.ecs_ecr:
+ registry_id: 123456789012
+ name: cross/account
+
+- name: set-policy as object
+ community.aws.ecs_ecr:
+ name: needs-policy-object
+ policy:
+ Version: '2008-10-17'
+ Statement:
+ - Sid: read-only
+ Effect: Allow
+ Principal:
+ AWS: '{{ read_only_arn }}'
+ Action:
+ - ecr:GetDownloadUrlForLayer
+ - ecr:BatchGetImage
+ - ecr:BatchCheckLayerAvailability
+
+- name: set-policy as string
+ community.aws.ecs_ecr:
+ name: needs-policy-string
+ policy: "{{ lookup('template', 'policy.json.j2') }}"
+
+- name: delete-policy
+ community.aws.ecs_ecr:
+ name: needs-no-policy
+ purge_policy: true
+
+- name: create immutable ecr-repo
+ community.aws.ecs_ecr:
+ name: super/cool
+ image_tag_mutability: immutable
+
+- name: set-lifecycle-policy
+ community.aws.ecs_ecr:
+ name: needs-lifecycle-policy
+ scan_on_push: true
+ lifecycle_policy:
+ rules:
+ - rulePriority: 1
+ description: new policy
+ selection:
+ tagStatus: untagged
+ countType: sinceImagePushed
+ countUnit: days
+ countNumber: 365
+ action:
+ type: expire
+
+- name: purge-lifecycle-policy
+ community.aws.ecs_ecr:
+ name: needs-no-lifecycle-policy
+ purge_lifecycle_policy: true
+
+- name: set-encryption-configuration
+ community.aws.ecs_ecr:
+ name: uses-custom-kms-key
+ encryption_configuration:
+ encryption_type: KMS
+ kms_key: custom-kms-key-alias
+'''
+
+RETURN = '''
+state:
+ type: str
+ description: The asserted state of the repository (present, absent)
+ returned: always
+created:
+ type: bool
+ description: If true, the repository was created
+ returned: always
+name:
+ type: str
+ description: The name of the repository
+ returned: I(state=absent)
+policy:
+ type: dict
+ description: The existing, created or updated repository policy.
+ returned: I(state=present)
+ version_added: 4.0.0
+repository:
+ type: dict
+ description: The created or updated repository
+ returned: I(state=present)
+ sample:
+ createdAt: '2017-01-17T08:41:32-06:00'
+ registryId: '123456789012'
+ repositoryArn: arn:aws:ecr:us-east-1:123456789012:repository/ecr-test-1484664090
+ repositoryName: ecr-test-1484664090
+ repositoryUri: 123456789012.dkr.ecr.us-east-1.amazonaws.com/ecr-test-1484664090
+'''
+
+import json
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+from ansible.module_utils.six import string_types
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto_exception
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import sort_json_policy_dict
+
+
+def build_kwargs(registry_id):
+ """
+ Builds a kwargs dict which may contain the optional registryId.
+
+ :param registry_id: Optional string containing the registryId.
+ :return: kwargs dict with registryId, if given
+ """
+ if not registry_id:
+ return dict()
+ else:
+ return dict(registryId=registry_id)
+
+
+class EcsEcr:
+ def __init__(self, module):
+ self.ecr = module.client('ecr')
+ self.sts = module.client('sts')
+ self.check_mode = module.check_mode
+ self.changed = False
+ self.skipped = False
+
+ def get_repository(self, registry_id, name):
+ try:
+ res = self.ecr.describe_repositories(
+ repositoryNames=[name], **build_kwargs(registry_id))
+ repos = res.get('repositories')
+ return repos and repos[0]
+ except is_boto3_error_code('RepositoryNotFoundException'):
+ return None
+
+ def get_repository_policy(self, registry_id, name):
+ try:
+ res = self.ecr.get_repository_policy(
+ repositoryName=name, **build_kwargs(registry_id))
+ text = res.get('policyText')
+ return text and json.loads(text)
+ except is_boto3_error_code(['RepositoryNotFoundException', 'RepositoryPolicyNotFoundException']):
+ return None
+
+ def create_repository(self, registry_id, name, image_tag_mutability, encryption_configuration):
+ if registry_id:
+ default_registry_id = self.sts.get_caller_identity().get('Account')
+ if registry_id != default_registry_id:
+ raise Exception('Cannot create repository in registry {0}.'
+ 'Would be created in {1} instead.'.format(registry_id, default_registry_id))
+
+ if encryption_configuration is None:
+ encryption_configuration = dict(encryptionType='AES256')
+
+ if not self.check_mode:
+ repo = self.ecr.create_repository(
+ repositoryName=name,
+ imageTagMutability=image_tag_mutability,
+ encryptionConfiguration=encryption_configuration).get('repository')
+ self.changed = True
+ return repo
+ else:
+ self.skipped = True
+ return dict(repositoryName=name)
+
+ def set_repository_policy(self, registry_id, name, policy_text, force):
+ if not self.check_mode:
+ policy = self.ecr.set_repository_policy(
+ repositoryName=name,
+ policyText=policy_text,
+ force=force,
+ **build_kwargs(registry_id))
+ self.changed = True
+ return policy
+ else:
+ self.skipped = True
+ if self.get_repository(registry_id, name) is None:
+ printable = name
+ if registry_id:
+ printable = '{0}:{1}'.format(registry_id, name)
+ raise Exception(
+ 'could not find repository {0}'.format(printable))
+ return
+
+ def delete_repository(self, registry_id, name, force):
+ if not self.check_mode:
+ repo = self.ecr.delete_repository(
+ repositoryName=name, force=force, **build_kwargs(registry_id))
+ self.changed = True
+ return repo
+ else:
+ repo = self.get_repository(registry_id, name)
+ if repo:
+ self.skipped = True
+ return repo
+ return None
+
+ def delete_repository_policy(self, registry_id, name):
+ if not self.check_mode:
+ policy = self.ecr.delete_repository_policy(
+ repositoryName=name, **build_kwargs(registry_id))
+ self.changed = True
+ return policy
+ else:
+ policy = self.get_repository_policy(registry_id, name)
+ if policy:
+ self.skipped = True
+ return policy
+ return None
+
+ def put_image_tag_mutability(self, registry_id, name, new_mutability_configuration):
+ repo = self.get_repository(registry_id, name)
+ current_mutability_configuration = repo.get('imageTagMutability')
+
+ if current_mutability_configuration != new_mutability_configuration:
+ if not self.check_mode:
+ self.ecr.put_image_tag_mutability(
+ repositoryName=name,
+ imageTagMutability=new_mutability_configuration,
+ **build_kwargs(registry_id))
+ else:
+ self.skipped = True
+ self.changed = True
+
+ repo['imageTagMutability'] = new_mutability_configuration
+ return repo
+
+ def get_lifecycle_policy(self, registry_id, name):
+ try:
+ res = self.ecr.get_lifecycle_policy(
+ repositoryName=name, **build_kwargs(registry_id))
+ text = res.get('lifecyclePolicyText')
+ return text and json.loads(text)
+ except is_boto3_error_code(['LifecyclePolicyNotFoundException', 'RepositoryNotFoundException']):
+ return None
+
+ def put_lifecycle_policy(self, registry_id, name, policy_text):
+ if not self.check_mode:
+ policy = self.ecr.put_lifecycle_policy(
+ repositoryName=name,
+ lifecyclePolicyText=policy_text,
+ **build_kwargs(registry_id))
+ self.changed = True
+ return policy
+ else:
+ self.skipped = True
+ if self.get_repository(registry_id, name) is None:
+ printable = name
+ if registry_id:
+ printable = '{0}:{1}'.format(registry_id, name)
+ raise Exception(
+ 'could not find repository {0}'.format(printable))
+ return
+
+ def purge_lifecycle_policy(self, registry_id, name):
+ if not self.check_mode:
+ policy = self.ecr.delete_lifecycle_policy(
+ repositoryName=name, **build_kwargs(registry_id))
+ self.changed = True
+ return policy
+ else:
+ policy = self.get_lifecycle_policy(registry_id, name)
+ if policy:
+ self.skipped = True
+ return policy
+ return None
+
+ def put_image_scanning_configuration(self, registry_id, name, scan_on_push):
+ if not self.check_mode:
+ if registry_id:
+ scan = self.ecr.put_image_scanning_configuration(
+ registryId=registry_id,
+ repositoryName=name,
+ imageScanningConfiguration={'scanOnPush': scan_on_push}
+ )
+ else:
+ scan = self.ecr.put_image_scanning_configuration(
+ repositoryName=name,
+ imageScanningConfiguration={'scanOnPush': scan_on_push}
+ )
+ self.changed = True
+ return scan
+ else:
+ self.skipped = True
+ return None
+
+
+def sort_lists_of_strings(policy):
+ for statement_index in range(0, len(policy.get('Statement', []))):
+ for key in policy['Statement'][statement_index]:
+ value = policy['Statement'][statement_index][key]
+ if isinstance(value, list) and all(isinstance(item, string_types) for item in value):
+ policy['Statement'][statement_index][key] = sorted(value)
+ return policy
+
+
+def run(ecr, params):
+ # type: (EcsEcr, dict, int) -> Tuple[bool, dict]
+ result = {}
+ try:
+ name = params['name']
+ state = params['state']
+ policy_text = params['policy']
+ purge_policy = params['purge_policy']
+ force_absent = params['force_absent']
+ registry_id = params['registry_id']
+ force_set_policy = params['force_set_policy']
+ image_tag_mutability = params['image_tag_mutability'].upper()
+ lifecycle_policy_text = params['lifecycle_policy']
+ purge_lifecycle_policy = params['purge_lifecycle_policy']
+ scan_on_push = params['scan_on_push']
+ encryption_configuration = snake_dict_to_camel_dict(params['encryption_configuration'])
+
+ # Parse policies, if they are given
+ try:
+ policy = policy_text and json.loads(policy_text)
+ except ValueError:
+ result['policy'] = policy_text
+ result['msg'] = 'Could not parse policy'
+ return False, result
+
+ try:
+ lifecycle_policy = \
+ lifecycle_policy_text and json.loads(lifecycle_policy_text)
+ except ValueError:
+ result['lifecycle_policy'] = lifecycle_policy_text
+ result['msg'] = 'Could not parse lifecycle_policy'
+ return False, result
+
+ result['state'] = state
+ result['created'] = False
+
+ repo = ecr.get_repository(registry_id, name)
+
+ if state == 'present':
+ result['created'] = False
+
+ if not repo:
+ repo = ecr.create_repository(
+ registry_id, name, image_tag_mutability, encryption_configuration)
+ result['changed'] = True
+ result['created'] = True
+ else:
+ if encryption_configuration is not None:
+ if repo.get('encryptionConfiguration') != encryption_configuration:
+ result['msg'] = 'Cannot modify repository encryption type'
+ return False, result
+
+ repo = ecr.put_image_tag_mutability(registry_id, name, image_tag_mutability)
+ result['repository'] = repo
+
+ if purge_lifecycle_policy:
+ original_lifecycle_policy = \
+ ecr.get_lifecycle_policy(registry_id, name)
+
+ result['lifecycle_policy'] = None
+
+ if original_lifecycle_policy:
+ ecr.purge_lifecycle_policy(registry_id, name)
+ result['changed'] = True
+
+ elif lifecycle_policy_text is not None:
+ try:
+ lifecycle_policy = sort_json_policy_dict(lifecycle_policy)
+ result['lifecycle_policy'] = lifecycle_policy
+
+ original_lifecycle_policy = ecr.get_lifecycle_policy(
+ registry_id, name)
+
+ if original_lifecycle_policy:
+ original_lifecycle_policy = sort_json_policy_dict(
+ original_lifecycle_policy)
+
+ if original_lifecycle_policy != lifecycle_policy:
+ ecr.put_lifecycle_policy(registry_id, name,
+ lifecycle_policy_text)
+ result['changed'] = True
+ except Exception:
+ # Some failure w/ the policy. It's helpful to know what the
+ # policy is.
+ result['lifecycle_policy'] = lifecycle_policy_text
+ raise
+
+ if purge_policy:
+ original_policy = ecr.get_repository_policy(registry_id, name)
+
+ result['policy'] = None
+
+ if original_policy:
+ ecr.delete_repository_policy(registry_id, name)
+ result['changed'] = True
+
+ elif policy_text is not None:
+ try:
+ # Sort any lists containing only string types
+ policy = sort_lists_of_strings(policy)
+
+ result['policy'] = policy
+
+ original_policy = ecr.get_repository_policy(
+ registry_id, name)
+ if original_policy:
+ original_policy = sort_lists_of_strings(original_policy)
+
+ if compare_policies(original_policy, policy):
+ ecr.set_repository_policy(
+ registry_id, name, policy_text, force_set_policy)
+ result['changed'] = True
+ except Exception:
+ # Some failure w/ the policy. It's helpful to know what the
+ # policy is.
+ result['policy'] = policy_text
+ raise
+
+ else:
+ original_policy = ecr.get_repository_policy(registry_id, name)
+ if original_policy:
+ result['policy'] = original_policy
+
+ original_scan_on_push = ecr.get_repository(registry_id, name)
+ if original_scan_on_push is not None:
+ if scan_on_push != original_scan_on_push['imageScanningConfiguration']['scanOnPush']:
+ result['changed'] = True
+ result['repository']['imageScanningConfiguration']['scanOnPush'] = scan_on_push
+ response = ecr.put_image_scanning_configuration(registry_id, name, scan_on_push)
+
+ elif state == 'absent':
+ result['name'] = name
+ if repo:
+ ecr.delete_repository(registry_id, name, force_absent)
+ result['changed'] = True
+
+ except Exception as err:
+ msg = str(err)
+ if isinstance(err, botocore.exceptions.ClientError):
+ msg = boto_exception(err)
+ result['msg'] = msg
+ result['exception'] = traceback.format_exc()
+ return False, result
+
+ if ecr.skipped:
+ result['skipped'] = True
+
+ if ecr.changed:
+ result['changed'] = True
+
+ return True, result
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ registry_id=dict(required=False),
+ state=dict(required=False, choices=['present', 'absent'],
+ default='present'),
+ force_absent=dict(required=False, type='bool', default=False),
+ force_set_policy=dict(required=False, type='bool', default=False),
+ policy=dict(required=False, type='json'),
+ image_tag_mutability=dict(required=False, choices=['mutable', 'immutable'],
+ default='mutable'),
+ purge_policy=dict(required=False, type='bool'),
+ lifecycle_policy=dict(required=False, type='json'),
+ purge_lifecycle_policy=dict(required=False, type='bool'),
+ scan_on_push=(dict(required=False, type='bool', default=False)),
+ encryption_configuration=dict(
+ required=False,
+ type='dict',
+ options=dict(
+ encryption_type=dict(required=False, type='str', default='AES256', choices=['AES256', 'KMS']),
+ kms_key=dict(required=False, type='str', no_log=False),
+ ),
+ required_if=[
+ ['encryption_type', 'KMS', ['kms_key']],
+ ],
+ ),
+ )
+ mutually_exclusive = [
+ ['policy', 'purge_policy'],
+ ['lifecycle_policy', 'purge_lifecycle_policy']]
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=mutually_exclusive)
+
+ ecr = EcsEcr(module)
+ passed, result = run(ecr, module.params)
+
+ if passed:
+ module.exit_json(**result)
+ else:
+ module.fail_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ecs_service.py b/ansible_collections/community/aws/plugins/modules/ecs_service.py
new file mode 100644
index 000000000..2d86a6bd5
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ecs_service.py
@@ -0,0 +1,1253 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ecs_service
+version_added: 1.0.0
+short_description: Create, terminate, start or stop a service in ECS
+description:
+ - Creates or terminates ECS services.
+notes:
+ - The service role specified must be assumable. (i.e. have a trust relationship for the ecs service, ecs.amazonaws.com)
+ - For details of the parameters and returns see U(https://boto3.readthedocs.io/en/latest/reference/services/ecs.html).
+ - An IAM role must have been previously created.
+author:
+ - "Mark Chance (@Java1Guy)"
+ - "Darek Kaczynski (@kaczynskid)"
+ - "Stephane Maarek (@simplesteph)"
+ - "Zac Blazic (@zacblazic)"
+options:
+ state:
+ description:
+ - The desired state of the service.
+ required: true
+ choices: ["present", "absent", "deleting"]
+ type: str
+ name:
+ description:
+ - The name of the service.
+ required: true
+ type: str
+ aliases: ['service']
+ cluster:
+ description:
+ - The name of the cluster in which the service exists.
+ - If not specified, the cluster name will be C(default).
+ required: false
+ type: str
+ default: 'default'
+ task_definition:
+ description:
+ - The task definition the service will run.
+ - This parameter is required when I(state=present) unless I(force_new_deployment=True).
+ - This parameter is ignored when updating a service with a C(CODE_DEPLOY) deployment controller in which case
+ the task definition is managed by Code Pipeline and cannot be updated.
+ required: false
+ type: str
+ load_balancers:
+ description:
+ - The list of ELBs defined for this service.
+ - Load balancers for an existing service cannot be updated, and it is an error to do so.
+ - When the deployment controller is CODE_DEPLOY changes to this value are simply ignored, and do not cause an error.
+ required: false
+ type: list
+ elements: dict
+ default: []
+ desired_count:
+ description:
+ - The count of how many instances of the service.
+ - This parameter is required when I(state=present).
+ required: false
+ type: int
+ client_token:
+ description:
+ - Unique, case-sensitive identifier you provide to ensure the idempotency of the request. Up to 32 ASCII characters are allowed.
+ required: false
+ type: str
+ default: ''
+ role:
+ description:
+ - The name or full Amazon Resource Name (ARN) of the IAM role that allows your Amazon ECS container agent to make calls to your load balancer
+ on your behalf.
+ - This parameter is only required if you are using a load balancer with your service in a network mode other than C(awsvpc).
+ required: false
+ type: str
+ default: ''
+ delay:
+ description:
+ - The time to wait before checking that the service is available.
+ required: false
+ default: 10
+ type: int
+ repeat:
+ description:
+ - The number of times to check that the service is available.
+ required: false
+ default: 10
+ type: int
+ force_new_deployment:
+ description:
+ - Force deployment of service even if there are no changes.
+ required: false
+ type: bool
+ default: false
+ deployment_controller:
+ description:
+ - The deployment controller to use for the service. If no deploymenet controller is specified, the ECS controller is used.
+ required: false
+ version_added: 4.1.0
+ type: dict
+ default: {}
+ suboptions:
+ type:
+ type: str
+ choices: ["ECS", "CODE_DEPLOY", "EXTERNAL"]
+ description: The deployment controller type to use.
+ deployment_configuration:
+ description:
+ - Optional parameters that control the deployment_configuration.
+ - Format is '{"maximum_percent":<integer>, "minimum_healthy_percent":<integer>}
+ required: false
+ type: dict
+ default: {}
+ suboptions:
+ maximum_percent:
+ type: int
+ description: Upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment.
+ minimum_healthy_percent:
+ type: int
+ description: A lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment.
+ deployment_circuit_breaker:
+ type: dict
+ description: The deployment circuit breaker determines whether a service deployment will fail if the service can't reach a steady state.
+ suboptions:
+ enable:
+ type: bool
+ description: If enabled, a service deployment will transition to a failed state and stop launching new tasks.
+ rollback:
+ type: bool
+ description: If enabled, ECS will roll back your service to the last completed deployment after a failure.
+ enable_execute_command:
+ description:
+ - Whether or not to enable the execute command functionality for the containers in the ECS task.
+ - If I(enable_execute_command=true) execute command functionality is enabled on all containers in the ECS task.
+ required: false
+ type: bool
+ version_added: 5.4.0
+ placement_constraints:
+ description:
+ - The placement constraints for the tasks in the service.
+ - See U(https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PlacementConstraint.html) for more details.
+ required: false
+ type: list
+ elements: dict
+ default: []
+ suboptions:
+ type:
+ description: The type of constraint.
+ type: str
+ expression:
+ description: A cluster query language expression to apply to the constraint.
+ required: false
+ type: str
+ purge_placement_constraints:
+ version_added: 5.3.0
+ description:
+ - Toggle overwriting of existing placement constraints. This is needed for backwards compatibility.
+ - By default I(purge_placement_constraints=false). In a release after 2024-06-01 this will be changed to I(purge_placement_constraints=true).
+ required: false
+ type: bool
+ default: false
+ placement_strategy:
+ description:
+ - The placement strategy objects to use for tasks in your service. You can specify a maximum of 5 strategy rules per service.
+ required: false
+ type: list
+ elements: dict
+ default: []
+ suboptions:
+ type:
+ description: The type of placement strategy.
+ type: str
+ field:
+ description: The field to apply the placement strategy against.
+ type: str
+ purge_placement_strategy:
+ version_added: 5.3.0
+ description:
+ - Toggle overwriting of existing placement strategy. This is needed for backwards compatibility.
+ - By default I(purge_placement_strategy=false). In a release after 2024-06-01 this will be changed to I(purge_placement_strategy=true).
+ required: false
+ type: bool
+ default: false
+ force_deletion:
+ description:
+ - Forcibly delete the service. Required when deleting a service with >0 scale, or no target group.
+ default: False
+ type: bool
+ version_added: 2.1.0
+ network_configuration:
+ description:
+ - Network configuration of the service. Only applicable for task definitions created with I(network_mode=awsvpc).
+ type: dict
+ suboptions:
+ subnets:
+ description:
+ - A list of subnet IDs to associate with the task.
+ type: list
+ elements: str
+ security_groups:
+ description:
+ - A list of security group names or group IDs to associate with the task.
+ type: list
+ elements: str
+ assign_public_ip:
+ description:
+ - Whether the task's elastic network interface receives a public IP address.
+ type: bool
+ launch_type:
+ description:
+ - The launch type on which to run your service.
+ required: false
+ choices: ["EC2", "FARGATE"]
+ type: str
+ capacity_provider_strategy:
+ version_added: 4.0.0
+ description:
+ - The capacity provider strategy to use with your service. You can specify a maximum of 6 providers per strategy.
+ required: false
+ type: list
+ elements: dict
+ default: []
+ suboptions:
+ capacity_provider:
+ description:
+ - Name of capacity provider.
+ type: str
+ weight:
+ description:
+ - The relative percentage of the total number of launched tasks that should use the specified provider.
+ type: int
+ base:
+ description:
+ - How many tasks, at a minimum, should use the specified provider.
+ type: int
+ platform_version:
+ type: str
+ description:
+ - Numeric part of platform version or LATEST
+ - See U(https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) for more details.
+ required: false
+ version_added: 1.5.0
+ health_check_grace_period_seconds:
+ description:
+ - Seconds to wait before health checking the freshly added/updated services.
+ required: false
+ type: int
+ service_registries:
+ description:
+ - Describes service discovery registries this service will register with.
+ type: list
+ elements: dict
+ default: []
+ required: false
+ suboptions:
+ container_name:
+ description:
+ - Container name for service discovery registration.
+ type: str
+ container_port:
+ description:
+ - Container port for service discovery registration.
+ type: int
+ arn:
+ description:
+ - Service discovery registry ARN.
+ type: str
+ scheduling_strategy:
+ description:
+ - The scheduling strategy.
+ - Defaults to C(REPLICA) if not given to preserve previous behavior.
+ required: false
+ choices: ["DAEMON", "REPLICA"]
+ type: str
+ wait:
+ description:
+ - Whether or not to wait for the service to be inactive.
+ - Waits only when I(state) is C(absent).
+ type: bool
+ default: false
+ version_added: 4.1.0
+ propagate_tags:
+ description:
+ - Propagate tags from ECS task defintition or ECS service to ECS task.
+ required: false
+ choices: ["TASK_DEFINITION", "SERVICE"]
+ type: str
+ version_added: 4.1.0
+ tags:
+ description:
+ - A dictionary of tags to add or remove from the resource.
+ type: dict
+ required: false
+ version_added: 4.1.0
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+# Basic provisioning example
+- community.aws.ecs_service:
+ state: present
+ name: console-test-service
+ cluster: new_cluster
+ task_definition: 'new_cluster-task:1'
+ desired_count: 0
+
+- name: create ECS service on VPC network
+ community.aws.ecs_service:
+ state: present
+ name: console-test-service
+ cluster: new_cluster
+ task_definition: 'new_cluster-task:1'
+ desired_count: 0
+ network_configuration:
+ subnets:
+ - subnet-abcd1234
+ security_groups:
+ - sg-aaaa1111
+ - my_security_group
+
+# Simple example to delete
+- community.aws.ecs_service:
+ name: default
+ state: absent
+ cluster: new_cluster
+
+# With custom deployment configuration (added in version 2.3), placement constraints and strategy (added in version 2.4)
+- community.aws.ecs_service:
+ state: present
+ name: test-service
+ cluster: test-cluster
+ task_definition: test-task-definition
+ desired_count: 3
+ deployment_configuration:
+ minimum_healthy_percent: 75
+ maximum_percent: 150
+ placement_constraints:
+ - type: memberOf
+ expression: 'attribute:flavor==test'
+ placement_strategy:
+ - type: binpack
+ field: memory
+
+# With deployment circuit breaker (added in version 4.0)
+- community.aws.ecs_service:
+ state: present
+ name: test-service
+ cluster: test-cluster
+ task_definition: test-task-definition
+ desired_count: 3
+ deployment_configuration:
+ deployment_circuit_breaker:
+ enable: True
+ rollback: True
+
+# With capacity_provider_strategy (added in version 4.0)
+- community.aws.ecs_service:
+ state: present
+ name: test-service
+ cluster: test-cluster
+ task_definition: test-task-definition
+ desired_count: 1
+ capacity_provider_strategy:
+ - capacity_provider: test-capacity-provider-1
+ weight: 1
+ base: 0
+
+# With tags and tag propagation
+- community.aws.ecs_service:
+ state: present
+ name: tags-test-service
+ cluster: new_cluster
+ task_definition: 'new_cluster-task:1'
+ desired_count: 1
+ tags:
+ Firstname: jane
+ lastName: doe
+ propagate_tags: SERVICE
+'''
+
+RETURN = r'''
+service:
+ description: Details of created service.
+ returned: when creating a service
+ type: complex
+ contains:
+ capacityProviderStrategy:
+ version_added: 4.0.0
+ description: The capacity provider strategy to use with your service.
+ returned: always
+ type: complex
+ contains:
+ base:
+ description: How many tasks, at a minimum, should use the specified provider.
+ returned: always
+ type: int
+ capacityProvider:
+ description: Name of capacity provider.
+ returned: always
+ type: str
+ weight:
+ description: The relative percentage of the total number of launched tasks that should use the specified provider.
+ returned: always
+ type: int
+ clusterArn:
+ description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
+ returned: always
+ type: str
+ desiredCount:
+ description: The desired number of instantiations of the task definition to keep running on the service.
+ returned: always
+ type: int
+ loadBalancers:
+ description:
+ - A list of load balancer objects
+ - Updating the loadbalancer configuration of an existing service requires botocore>=1.24.14.
+ returned: always
+ type: complex
+ contains:
+ loadBalancerName:
+ description: the name
+ returned: always
+ type: str
+ containerName:
+ description: The name of the container to associate with the load balancer.
+ returned: always
+ type: str
+ containerPort:
+ description: The port on the container to associate with the load balancer.
+ returned: always
+ type: int
+ pendingCount:
+ description: The number of tasks in the cluster that are in the PENDING state.
+ returned: always
+ type: int
+ runningCount:
+ description: The number of tasks in the cluster that are in the RUNNING state.
+ returned: always
+ type: int
+ serviceArn:
+ description:
+ - The Amazon Resource Name (ARN) that identifies the service. The ARN contains the C(arn:aws:ecs) namespace, followed by
+ the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name.
+ sample: 'arn:aws:ecs:us-east-1:123456789012:service/my-service'
+ returned: always
+ type: str
+ serviceName:
+ description: A user-generated string used to identify the service
+ returned: always
+ type: str
+ status:
+ description: The valid values are ACTIVE, DRAINING, or INACTIVE.
+ returned: always
+ type: str
+ tags:
+ description: The tags applied to this resource.
+ returned: success
+ type: dict
+ taskDefinition:
+ description: The ARN of a task definition to use for tasks in the service.
+ returned: always
+ type: str
+ deployments:
+ description: list of service deployments
+ returned: always
+ type: list
+ elements: dict
+ deploymentConfiguration:
+ description: dictionary of deploymentConfiguration
+ returned: always
+ type: complex
+ contains:
+ maximumPercent:
+ description: maximumPercent param
+ returned: always
+ type: int
+ minimumHealthyPercent:
+ description: minimumHealthyPercent param
+ returned: always
+ type: int
+ deploymentCircuitBreaker:
+ description: dictionary of deploymentCircuitBreaker
+ returned: always
+ type: complex
+ contains:
+ enable:
+ description: The state of the circuit breaker feature.
+ returned: always
+ type: bool
+ rollback:
+ description: The state of the rollback feature of the circuit breaker.
+ returned: always
+ type: bool
+ events:
+ description: list of service events
+ returned: always
+ type: list
+ elements: dict
+ placementConstraints:
+ description: List of placement constraints objects
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ type:
+ description: The type of constraint. Valid values are distinctInstance and memberOf.
+ returned: always
+ type: str
+ expression:
+ description: A cluster query language expression to apply to the constraint. Note you cannot specify an expression if the constraint type is
+ distinctInstance.
+ returned: always
+ type: str
+ placementStrategy:
+ description: List of placement strategy objects
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ type:
+ description: The type of placement strategy. Valid values are random, spread and binpack.
+ returned: always
+ type: str
+ field:
+ description: The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId
+ (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance,
+ such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are CPU and MEMORY.
+ returned: always
+ type: str
+ propagateTags:
+ description: The type of tag propagation applied to the resource.
+ returned: always
+ type: str
+ansible_facts:
+ description: Facts about deleted service.
+ returned: when deleting a service
+ type: complex
+ contains:
+ service:
+ description: Details of deleted service.
+ returned: when service existed and was deleted
+ type: complex
+ contains:
+ clusterArn:
+ description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
+ returned: always
+ type: str
+ desiredCount:
+ description: The desired number of instantiations of the task definition to keep running on the service.
+ returned: always
+ type: int
+ loadBalancers:
+ description: A list of load balancer objects
+ returned: always
+ type: complex
+ contains:
+ loadBalancerName:
+ description: the name
+ returned: always
+ type: str
+ containerName:
+ description: The name of the container to associate with the load balancer.
+ returned: always
+ type: str
+ containerPort:
+ description: The port on the container to associate with the load balancer.
+ returned: always
+ type: int
+ pendingCount:
+ description: The number of tasks in the cluster that are in the PENDING state.
+ returned: always
+ type: int
+ runningCount:
+ description: The number of tasks in the cluster that are in the RUNNING state.
+ returned: always
+ type: int
+ serviceArn:
+ description:
+ - The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region
+ of the service, the AWS account ID of the service owner, the service namespace, and then the service name.
+ sample: 'arn:aws:ecs:us-east-1:123456789012:service/my-service'
+ returned: always
+ type: str
+ serviceName:
+ description: A user-generated string used to identify the service
+ returned: always
+ type: str
+ status:
+ description: The valid values are ACTIVE, DRAINING, or INACTIVE.
+ returned: always
+ type: str
+ tags:
+ description: The tags applied to this resource.
+ returned: when tags found
+ type: list
+ elements: dict
+ taskDefinition:
+ description: The ARN of a task definition to use for tasks in the service.
+ returned: always
+ type: str
+ deployments:
+ description: list of service deployments
+ returned: always
+ type: list
+ elements: dict
+ deploymentConfiguration:
+ description: dictionary of deploymentConfiguration
+ returned: always
+ type: complex
+ contains:
+ maximumPercent:
+ description: maximumPercent param
+ returned: always
+ type: int
+ minimumHealthyPercent:
+ description: minimumHealthyPercent param
+ returned: always
+ type: int
+ deploymentCircuitBreaker:
+ description: dictionary of deploymentCircuitBreaker
+ returned: always
+ type: complex
+ contains:
+ enable:
+ description: The state of the circuit breaker feature.
+ returned: always
+ type: bool
+ rollback:
+ description: The state of the rollback feature of the circuit breaker.
+ returned: always
+ type: bool
+ events:
+ description: list of service events
+ returned: always
+ type: list
+ elements: dict
+ placementConstraints:
+ description: List of placement constraints objects
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ type:
+ description: The type of constraint. Valid values are distinctInstance and memberOf.
+ returned: always
+ type: str
+ expression:
+ description: A cluster query language expression to apply to the constraint. Note you cannot specify an expression if
+ the constraint type is distinctInstance.
+ returned: always
+ type: str
+ placementStrategy:
+ description: List of placement strategy objects
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ type:
+ description: The type of placement strategy. Valid values are random, spread and binpack.
+ returned: always
+ type: str
+ field:
+ description: The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId
+ (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance,
+ such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are CPU and MEMORY.
+ returned: always
+ type: str
+ propagateTags:
+ description: The type of tag propagation applied to the resource
+ returned: always
+ type: str
+
+'''
+import time
+
+DEPLOYMENT_CONTROLLER_TYPE_MAP = {
+ 'type': 'str',
+}
+
+DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
+ 'maximum_percent': 'int',
+ 'minimum_healthy_percent': 'int',
+ 'deployment_circuit_breaker': 'dict',
+}
+
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import map_complex_type
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+class EcsServiceManager:
+ """Handles ECS Services"""
+
+ def __init__(self, module):
+ self.module = module
+ self.ecs = module.client('ecs')
+ self.ec2 = module.client('ec2')
+
+ def format_network_configuration(self, network_config):
+ result = dict()
+ if network_config['subnets'] is not None:
+ result['subnets'] = network_config['subnets']
+ else:
+ self.module.fail_json(msg="Network configuration must include subnets")
+ if network_config['security_groups'] is not None:
+ groups = network_config['security_groups']
+ if any(not sg.startswith('sg-') for sg in groups):
+ try:
+ vpc_id = self.ec2.describe_subnets(SubnetIds=[result['subnets'][0]])['Subnets'][0]['VpcId']
+ groups = get_ec2_security_group_ids_from_names(groups, self.ec2, vpc_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't look up security groups")
+ result['securityGroups'] = groups
+ if network_config['assign_public_ip'] is not None:
+ if network_config['assign_public_ip'] is True:
+ result['assignPublicIp'] = "ENABLED"
+ else:
+ result['assignPublicIp'] = "DISABLED"
+ return dict(awsvpcConfiguration=result)
+
+ def find_in_array(self, array_of_services, service_name, field_name='serviceArn'):
+ for c in array_of_services:
+ if c[field_name].endswith(service_name):
+ return c
+ return None
+
+ def describe_service(self, cluster_name, service_name):
+ response = self.ecs.describe_services(
+ cluster=cluster_name,
+ services=[service_name],
+ include=['TAGS'],
+ )
+ msg = ''
+
+ if len(response['failures']) > 0:
+ c = self.find_in_array(response['failures'], service_name, 'arn')
+ msg += ", failure reason is " + c['reason']
+ if c and c['reason'] == 'MISSING':
+ return None
+ # fall thru and look through found ones
+ if len(response['services']) > 0:
+ c = self.find_in_array(response['services'], service_name)
+ if c:
+ return c
+ raise Exception("Unknown problem describing service %s." % service_name)
+
+ def is_matching_service(self, expected, existing):
+ # aws returns the arn of the task definition
+ # arn:aws:ecs:eu-central-1:123456789:task-definition/ansible-fargate-nginx:3
+ # but the user is just entering
+ # ansible-fargate-nginx:3
+ if expected['task_definition'] != existing['taskDefinition'].split('/')[-1]:
+ if existing.get('deploymentController', {}).get('type', None) != 'CODE_DEPLOY':
+ return False
+
+ if expected.get('health_check_grace_period_seconds'):
+ if expected.get('health_check_grace_period_seconds') != existing.get('healthCheckGracePeriodSeconds'):
+ return False
+
+ if (expected['load_balancers'] or []) != existing['loadBalancers']:
+ return False
+
+ if (expected['propagate_tags'] or "NONE") != existing['propagateTags']:
+ return False
+
+ if boto3_tag_list_to_ansible_dict(existing.get('tags', [])) != (expected['tags'] or {}):
+ return False
+
+ if (expected["enable_execute_command"] or False) != existing.get("enableExecuteCommand", False):
+ return False
+
+ # expected is params. DAEMON scheduling strategy returns desired count equal to
+ # number of instances running; don't check desired count if scheduling strat is daemon
+ if (expected['scheduling_strategy'] != 'DAEMON'):
+ if (expected['desired_count'] or 0) != existing['desiredCount']:
+ return False
+
+ return True
+
+ def create_service(
+ self,
+ service_name,
+ cluster_name,
+ task_definition,
+ load_balancers,
+ desired_count,
+ client_token,
+ role,
+ deployment_controller,
+ deployment_configuration,
+ placement_constraints,
+ placement_strategy,
+ health_check_grace_period_seconds,
+ network_configuration,
+ service_registries,
+ launch_type,
+ platform_version,
+ scheduling_strategy,
+ capacity_provider_strategy,
+ tags,
+ propagate_tags,
+ enable_execute_command,
+ ):
+
+ params = dict(
+ cluster=cluster_name,
+ serviceName=service_name,
+ taskDefinition=task_definition,
+ loadBalancers=load_balancers,
+ clientToken=client_token,
+ role=role,
+ deploymentConfiguration=deployment_configuration,
+ placementStrategy=placement_strategy
+ )
+ if network_configuration:
+ params['networkConfiguration'] = network_configuration
+ if deployment_controller:
+ params['deploymentController'] = deployment_controller
+ if launch_type:
+ params['launchType'] = launch_type
+ if platform_version:
+ params['platformVersion'] = platform_version
+ if self.health_check_setable(params) and health_check_grace_period_seconds is not None:
+ params['healthCheckGracePeriodSeconds'] = health_check_grace_period_seconds
+ if service_registries:
+ params['serviceRegistries'] = service_registries
+
+ # filter placement_constraint and left only those where value is not None
+ # use-case: `distinctInstance` type should never contain `expression`, but None will fail `str` type validation
+ if placement_constraints:
+ params['placementConstraints'] = [{key: value for key, value in constraint.items() if value is not None}
+ for constraint in placement_constraints]
+
+ # desired count is not required if scheduling strategy is daemon
+ if desired_count is not None:
+ params['desiredCount'] = desired_count
+ if capacity_provider_strategy:
+ params['capacityProviderStrategy'] = capacity_provider_strategy
+ if propagate_tags:
+ params['propagateTags'] = propagate_tags
+ # desired count is not required if scheduling strategy is daemon
+ if desired_count is not None:
+ params['desiredCount'] = desired_count
+ if tags:
+ params['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value')
+
+ if scheduling_strategy:
+ params['schedulingStrategy'] = scheduling_strategy
+ if enable_execute_command:
+ params["enableExecuteCommand"] = enable_execute_command
+
+ response = self.ecs.create_service(**params)
+ return self.jsonize(response['service'])
+
+ def update_service(
+ self,
+ service_name,
+ cluster_name,
+ task_definition,
+ desired_count,
+ deployment_configuration,
+ placement_constraints,
+ placement_strategy,
+ network_configuration,
+ health_check_grace_period_seconds,
+ force_new_deployment,
+ capacity_provider_strategy,
+ load_balancers,
+ purge_placement_constraints,
+ purge_placement_strategy,
+ enable_execute_command,
+ ):
+ params = dict(
+ cluster=cluster_name,
+ service=service_name,
+ taskDefinition=task_definition,
+ deploymentConfiguration=deployment_configuration)
+ # filter placement_constraint and left only those where value is not None
+ # use-case: `distinctInstance` type should never contain `expression`, but None will fail `str` type validation
+ if placement_constraints:
+ params['placementConstraints'] = [{key: value for key, value in constraint.items() if value is not None}
+ for constraint in placement_constraints]
+
+ if purge_placement_constraints and not placement_constraints:
+ params['placementConstraints'] = []
+
+ if placement_strategy:
+ params['placementStrategy'] = placement_strategy
+
+ if purge_placement_strategy and not placement_strategy:
+ params['placementStrategy'] = []
+
+ if network_configuration:
+ params['networkConfiguration'] = network_configuration
+ if force_new_deployment:
+ params['forceNewDeployment'] = force_new_deployment
+ if capacity_provider_strategy:
+ params['capacityProviderStrategy'] = capacity_provider_strategy
+ if health_check_grace_period_seconds is not None:
+ params['healthCheckGracePeriodSeconds'] = health_check_grace_period_seconds
+ # desired count is not required if scheduling strategy is daemon
+ if desired_count is not None:
+ params['desiredCount'] = desired_count
+ if enable_execute_command is not None:
+ params["enableExecuteCommand"] = enable_execute_command
+
+ if load_balancers:
+ params['loadBalancers'] = load_balancers
+
+ response = self.ecs.update_service(**params)
+
+ return self.jsonize(response['service'])
+
+ def jsonize(self, service):
+ # some fields are datetime which is not JSON serializable
+ # make them strings
+ if 'createdAt' in service:
+ service['createdAt'] = str(service['createdAt'])
+ if 'deployments' in service:
+ for d in service['deployments']:
+ if 'createdAt' in d:
+ d['createdAt'] = str(d['createdAt'])
+ if 'updatedAt' in d:
+ d['updatedAt'] = str(d['updatedAt'])
+ if 'events' in service:
+ for e in service['events']:
+ if 'createdAt' in e:
+ e['createdAt'] = str(e['createdAt'])
+ return service
+
+ def delete_service(self, service, cluster=None, force=False):
+ return self.ecs.delete_service(cluster=cluster, service=service, force=force)
+
+ def health_check_setable(self, params):
+ load_balancers = params.get('loadBalancers', [])
+ return len(load_balancers) > 0
+
+
+def main():
+ argument_spec = dict(
+ state=dict(required=True, choices=['present', 'absent', 'deleting']),
+ name=dict(required=True, type='str', aliases=['service']),
+ cluster=dict(required=False, type='str', default='default'),
+ task_definition=dict(required=False, type='str'),
+ load_balancers=dict(required=False, default=[], type='list', elements='dict'),
+ desired_count=dict(required=False, type='int'),
+ client_token=dict(required=False, default='', type='str', no_log=False),
+ role=dict(required=False, default='', type='str'),
+ delay=dict(required=False, type='int', default=10),
+ repeat=dict(required=False, type='int', default=10),
+ force_new_deployment=dict(required=False, default=False, type='bool'),
+ force_deletion=dict(required=False, default=False, type='bool'),
+ deployment_controller=dict(required=False, default={}, type='dict'),
+ deployment_configuration=dict(required=False, default={}, type='dict'),
+ wait=dict(required=False, default=False, type='bool'),
+ placement_constraints=dict(
+ required=False,
+ default=[],
+ type='list',
+ elements='dict',
+ options=dict(
+ type=dict(type='str'),
+ expression=dict(required=False, type='str')
+ )
+ ),
+ purge_placement_constraints=dict(required=False, default=False, type='bool'),
+ placement_strategy=dict(
+ required=False,
+ default=[],
+ type='list',
+ elements='dict',
+ options=dict(
+ type=dict(type='str'),
+ field=dict(type='str'),
+ )
+ ),
+ purge_placement_strategy=dict(required=False, default=False, type='bool'),
+ health_check_grace_period_seconds=dict(required=False, type='int'),
+ network_configuration=dict(required=False, type='dict', options=dict(
+ subnets=dict(type='list', elements='str'),
+ security_groups=dict(type='list', elements='str'),
+ assign_public_ip=dict(type='bool')
+ )),
+ launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
+ platform_version=dict(required=False, type='str'),
+ service_registries=dict(required=False, type='list', default=[], elements='dict'),
+ scheduling_strategy=dict(required=False, choices=['DAEMON', 'REPLICA']),
+ capacity_provider_strategy=dict(
+ required=False,
+ type='list',
+ default=[],
+ elements='dict',
+ options=dict(
+ capacity_provider=dict(type='str'),
+ weight=dict(type='int'),
+ base=dict(type='int')
+ )
+ ),
+ propagate_tags=dict(required=False, choices=["TASK_DEFINITION", "SERVICE"]),
+ tags=dict(required=False, type="dict"),
+ enable_execute_command=dict(required=False, type="bool"),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[('launch_type', 'FARGATE', ['network_configuration'])],
+ required_together=[['load_balancers', 'role']],
+ mutually_exclusive=[['launch_type', 'capacity_provider_strategy']])
+
+ if module.params['state'] == 'present':
+ if module.params['scheduling_strategy'] == 'REPLICA' and module.params['desired_count'] is None:
+ module.fail_json(msg='state is present, scheduling_strategy is REPLICA; missing desired_count')
+ if module.params['task_definition'] is None and not module.params['force_new_deployment']:
+ module.fail_json(msg='Either task_definition or force_new_deployment is required when status is present.')
+
+ if len(module.params['capacity_provider_strategy']) > 6:
+ module.fail_json(msg='AWS allows a maximum of six capacity providers in the strategy.')
+
+ service_mgr = EcsServiceManager(module)
+ if module.params['network_configuration']:
+ network_configuration = service_mgr.format_network_configuration(module.params['network_configuration'])
+ else:
+ network_configuration = None
+
+ deployment_controller = map_complex_type(module.params['deployment_controller'],
+ DEPLOYMENT_CONTROLLER_TYPE_MAP)
+
+ deploymentController = snake_dict_to_camel_dict(deployment_controller)
+
+ deployment_configuration = map_complex_type(module.params['deployment_configuration'],
+ DEPLOYMENT_CONFIGURATION_TYPE_MAP)
+
+ deploymentConfiguration = snake_dict_to_camel_dict(deployment_configuration)
+ serviceRegistries = list(map(snake_dict_to_camel_dict, module.params['service_registries']))
+ capacityProviders = list(map(snake_dict_to_camel_dict, module.params['capacity_provider_strategy']))
+
+ try:
+ existing = service_mgr.describe_service(module.params['cluster'], module.params['name'])
+ except Exception as e:
+ module.fail_json_aws(e,
+ msg="Exception describing service '{0}' in cluster '{1}'"
+ .format(module.params['name'], module.params['cluster']))
+
+ results = dict(changed=False)
+
+ if module.params['state'] == 'present':
+
+ matching = False
+ update = False
+
+ if existing and 'status' in existing and existing['status'] == "ACTIVE":
+ if module.params['force_new_deployment']:
+ update = True
+ elif service_mgr.is_matching_service(module.params, existing):
+ matching = True
+ results['service'] = existing
+ else:
+ update = True
+
+ if not matching:
+ if not module.check_mode:
+
+ role = module.params['role']
+ clientToken = module.params['client_token']
+
+ loadBalancers = []
+ for loadBalancer in module.params['load_balancers']:
+ if 'containerPort' in loadBalancer:
+ loadBalancer['containerPort'] = int(loadBalancer['containerPort'])
+ loadBalancers.append(loadBalancer)
+
+ for loadBalancer in loadBalancers:
+ if 'containerPort' in loadBalancer:
+ loadBalancer['containerPort'] = int(loadBalancer['containerPort'])
+
+ if update:
+ # check various parameters and AWS SDK versions and give a helpful error if the SDK is not new enough for feature
+
+ if module.params['scheduling_strategy']:
+ if (existing['schedulingStrategy']) != module.params['scheduling_strategy']:
+ module.fail_json(msg="It is not possible to update the scheduling strategy of an existing service")
+
+ if module.params['service_registries']:
+ if (existing['serviceRegistries'] or []) != serviceRegistries:
+ module.fail_json(msg="It is not possible to update the service registries of an existing service")
+ if module.params['capacity_provider_strategy']:
+ if 'launchType' in existing.keys():
+ module.fail_json(msg="It is not possible to change an existing service from launch_type to capacity_provider_strategy.")
+ if module.params['launch_type']:
+ if 'capacityProviderStrategy' in existing.keys():
+ module.fail_json(msg="It is not possible to change an existing service from capacity_provider_strategy to launch_type.")
+ if (existing['loadBalancers'] or []) != loadBalancers:
+ # fails if deployment type is not CODE_DEPLOY or ECS
+ if existing['deploymentController']['type'] not in ['CODE_DEPLOY', 'ECS']:
+ module.fail_json(msg="It is not possible to update the load balancers of an existing service")
+
+ if existing.get('deploymentController', {}).get('type', None) == 'CODE_DEPLOY':
+ task_definition = ''
+ network_configuration = []
+ else:
+ task_definition = module.params['task_definition']
+
+ if module.params['propagate_tags'] and module.params['propagate_tags'] != existing['propagateTags']:
+ module.fail_json(msg="It is not currently supported to enable propagation tags of an existing service")
+
+ if module.params['tags'] and boto3_tag_list_to_ansible_dict(existing['tags']) != module.params['tags']:
+ module.fail_json(msg="It is not currently supported to change tags of an existing service")
+
+ updatedLoadBalancers = loadBalancers if existing['deploymentController']['type'] == 'ECS' else []
+
+ if task_definition is None and module.params['force_new_deployment']:
+ task_definition = existing['taskDefinition']
+
+ try:
+ # update required
+ response = service_mgr.update_service(
+ module.params["name"],
+ module.params["cluster"],
+ task_definition,
+ module.params["desired_count"],
+ deploymentConfiguration,
+ module.params["placement_constraints"],
+ module.params["placement_strategy"],
+ network_configuration,
+ module.params["health_check_grace_period_seconds"],
+ module.params["force_new_deployment"],
+ capacityProviders,
+ updatedLoadBalancers,
+ module.params["purge_placement_constraints"],
+ module.params["purge_placement_strategy"],
+ module.params["enable_execute_command"],
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't create service")
+
+ else:
+ try:
+ response = service_mgr.create_service(
+ module.params["name"],
+ module.params["cluster"],
+ module.params["task_definition"],
+ loadBalancers,
+ module.params["desired_count"],
+ clientToken,
+ role,
+ deploymentController,
+ deploymentConfiguration,
+ module.params["placement_constraints"],
+ module.params["placement_strategy"],
+ module.params["health_check_grace_period_seconds"],
+ network_configuration,
+ serviceRegistries,
+ module.params["launch_type"],
+ module.params["platform_version"],
+ module.params["scheduling_strategy"],
+ capacityProviders,
+ module.params["tags"],
+ module.params["propagate_tags"],
+ module.params["enable_execute_command"],
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't create service")
+
+ if response.get('tags', None):
+ response['tags'] = boto3_tag_list_to_ansible_dict(response['tags'])
+ results['service'] = response
+
+ results['changed'] = True
+
+ elif module.params['state'] == 'absent':
+ if not existing:
+ pass
+ else:
+ # it exists, so we should delete it and mark changed.
+ # return info about the cluster deleted
+ del existing['deployments']
+ del existing['events']
+ results['ansible_facts'] = existing
+ if 'status' in existing and existing['status'] == "INACTIVE":
+ results['changed'] = False
+ else:
+ if not module.check_mode:
+ try:
+ service_mgr.delete_service(
+ module.params['name'],
+ module.params['cluster'],
+ module.params['force_deletion'],
+ )
+
+ # Wait for service to be INACTIVE prior to exiting
+ if module.params['wait']:
+ waiter = service_mgr.ecs.get_waiter('services_inactive')
+ try:
+ waiter.wait(
+ services=[module.params['name']],
+ cluster=module.params['cluster'],
+ WaiterConfig={
+ 'Delay': module.params['delay'],
+ 'MaxAttempts': module.params['repeat']
+ }
+ )
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, 'Timeout waiting for service removal')
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e, msg="Couldn't delete service")
+
+ results['changed'] = True
+
+ elif module.params['state'] == 'deleting':
+ if not existing:
+ module.fail_json(msg="Service '" + module.params['name'] + " not found.")
+ return
+ # it exists, so we should delete it and mark changed.
+ # return info about the cluster deleted
+ delay = module.params['delay']
+ repeat = module.params['repeat']
+ time.sleep(delay)
+ for i in range(repeat):
+ existing = service_mgr.describe_service(module.params['cluster'], module.params['name'])
+ status = existing['status']
+ if status == "INACTIVE":
+ results['changed'] = True
+ break
+ time.sleep(delay)
+ if i is repeat - 1:
+ module.fail_json(
+ msg="Service still not deleted after {0} tries of {1} seconds each."
+ .format(repeat, delay)
+ )
+ return
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ecs_service_info.py b/ansible_collections/community/aws/plugins/modules/ecs_service_info.py
new file mode 100644
index 000000000..f174a31cd
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ecs_service_info.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ecs_service_info
+version_added: 1.0.0
+short_description: List or describe services in ECS
+description:
+ - Lists or describes services in ECS.
+author:
+ - "Mark Chance (@Java1Guy)"
+ - "Darek Kaczynski (@kaczynskid)"
+options:
+ details:
+ description:
+ - Set this to true if you want detailed information about the services.
+ required: false
+ default: false
+ type: bool
+ events:
+ description:
+ - Whether to return ECS service events. Only has an effect if I(details=true).
+ required: false
+ default: true
+ type: bool
+ cluster:
+ description:
+ - The cluster ARNS in which to list the services.
+ required: false
+ type: str
+ service:
+ description:
+ - One or more services to get details for
+ required: false
+ type: list
+ elements: str
+ aliases: ['name']
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Basic listing example
+- community.aws.ecs_service_info:
+ cluster: test-cluster
+ service: console-test-service
+ details: true
+ register: output
+
+# Basic listing example
+- community.aws.ecs_service_info:
+ cluster: test-cluster
+ register: output
+'''
+
+RETURN = r'''
+services:
+ description: When details is false, returns an array of service ARNs, otherwise an array of complex objects as described below.
+ returned: success
+ type: complex
+ contains:
+ clusterArn:
+ description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
+ returned: always
+ type: str
+ desiredCount:
+ description: The desired number of instantiations of the task definition to keep running on the service.
+ returned: always
+ type: int
+ loadBalancers:
+ description: A list of load balancer objects
+ returned: always
+ type: complex
+ contains:
+ loadBalancerName:
+ description: the name
+ returned: always
+ type: str
+ containerName:
+ description: The name of the container to associate with the load balancer.
+ returned: always
+ type: str
+ containerPort:
+ description: The port on the container to associate with the load balancer.
+ returned: always
+ type: int
+ pendingCount:
+ description: The number of tasks in the cluster that are in the PENDING state.
+ returned: always
+ type: int
+ runningCount:
+ description: The number of tasks in the cluster that are in the RUNNING state.
+ returned: always
+ type: int
+ serviceArn:
+ description:
+ - The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the
+ service, the AWS account ID of the service owner, the service namespace, and then the service name.
+ sample: 'arn:aws:ecs:us-east-1:123456789012:service/my-service'
+ returned: always
+ type: str
+ serviceName:
+ description: A user-generated string used to identify the service
+ returned: always
+ type: str
+ status:
+ description: The valid values are ACTIVE, DRAINING, or INACTIVE.
+ returned: always
+ type: str
+ taskDefinition:
+ description: The ARN of a task definition to use for tasks in the service.
+ returned: always
+ type: str
+ deployments:
+ description: list of service deployments
+ returned: always
+ type: list
+ elements: dict
+ events:
+ description: list of service events
+ returned: when events is true
+ type: list
+ elements: dict
+''' # NOQA
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+class EcsServiceManager:
+ """Handles ECS Services"""
+
+ def __init__(self, module):
+ self.module = module
+ self.ecs = module.client('ecs')
+
+ @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
+ def list_services_with_backoff(self, **kwargs):
+ paginator = self.ecs.get_paginator('list_services')
+ try:
+ return paginator.paginate(**kwargs).build_full_result()
+ except is_boto3_error_code('ClusterNotFoundException') as e:
+ self.module.fail_json_aws(e, "Could not find cluster to list services")
+
+ @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
+ def describe_services_with_backoff(self, **kwargs):
+ return self.ecs.describe_services(**kwargs)
+
+ def list_services(self, cluster):
+ fn_args = dict()
+ if cluster and cluster is not None:
+ fn_args['cluster'] = cluster
+ try:
+ response = self.list_services_with_backoff(**fn_args)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't list ECS services")
+ relevant_response = dict(services=response['serviceArns'])
+ return relevant_response
+
+ def describe_services(self, cluster, services):
+ fn_args = dict()
+ if cluster and cluster is not None:
+ fn_args['cluster'] = cluster
+ fn_args['services'] = services
+ try:
+ response = self.describe_services_with_backoff(**fn_args)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't describe ECS services")
+ running_services = [self.extract_service_from(service) for service in response.get('services', [])]
+ services_not_running = response.get('failures', [])
+ return running_services, services_not_running
+
+ def extract_service_from(self, service):
+ # some fields are datetime which is not JSON serializable
+ # make them strings
+ if 'deployments' in service:
+ for d in service['deployments']:
+ if 'createdAt' in d:
+ d['createdAt'] = str(d['createdAt'])
+ if 'updatedAt' in d:
+ d['updatedAt'] = str(d['updatedAt'])
+ if 'events' in service:
+ if not self.module.params['events']:
+ del service['events']
+ else:
+ for e in service['events']:
+ if 'createdAt' in e:
+ e['createdAt'] = str(e['createdAt'])
+ return service
+
+
+def chunks(l, n):
+ """Yield successive n-sized chunks from l."""
+ """ https://stackoverflow.com/a/312464 """
+ for i in range(0, len(l), n):
+ yield l[i:i + n]
+
+
+def main():
+
+ argument_spec = dict(
+ details=dict(type='bool', default=False),
+ events=dict(type='bool', default=True),
+ cluster=dict(),
+ service=dict(type='list', elements='str', aliases=['name'])
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ show_details = module.params.get('details')
+
+ task_mgr = EcsServiceManager(module)
+ if show_details:
+ if module.params['service']:
+ services = module.params['service']
+ else:
+ services = task_mgr.list_services(module.params['cluster'])['services']
+ ecs_info = dict(services=[], services_not_running=[])
+ for chunk in chunks(services, 10):
+ running_services, services_not_running = task_mgr.describe_services(module.params['cluster'], chunk)
+ ecs_info['services'].extend(running_services)
+ ecs_info['services_not_running'].extend(services_not_running)
+ else:
+ ecs_info = task_mgr.list_services(module.params['cluster'])
+
+ module.exit_json(changed=False, **ecs_info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ecs_tag.py b/ansible_collections/community/aws/plugins/modules/ecs_tag.py
new file mode 100644
index 000000000..8698a7bbd
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ecs_tag.py
@@ -0,0 +1,223 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2019, Michael Pechner <mikey@mikey.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+
+DOCUMENTATION = r'''
+---
+module: ecs_tag
+version_added: 1.0.0
+short_description: create and remove tags on Amazon ECS resources
+notes:
+ - none
+description:
+ - Creates and removes tags for Amazon ECS resources.
+ - Resources are referenced by their cluster name.
+author:
+ - Michael Pechner (@mpechner)
+options:
+ cluster_name:
+ description:
+ - The name of the cluster whose resources we are tagging.
+ required: true
+ type: str
+ resource:
+ description:
+ - The ECS resource name.
+ - Required unless I(resource_type=cluster).
+ type: str
+ resource_type:
+ description:
+ - The type of resource.
+ default: cluster
+ choices: ['cluster', 'task', 'service', 'task_definition', 'container']
+ type: str
+ state:
+ description:
+ - Whether the tags should be present or absent on the resource.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ tags:
+ description:
+ - A dictionary of tags to add or remove from the resource.
+ - If the value provided for a tag is null and I(state=absent), the tag will be removed regardless of its current value.
+ type: dict
+ aliases: ['resource_tags']
+ purge_tags:
+ description:
+ - Whether unspecified tags should be removed from the resource.
+ - Note that when combined with I(state=absent), specified tags with non-matching values are not purged.
+ type: bool
+ default: false
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure tags are present on a resource
+ community.aws.ecs_tag:
+ cluster_name: mycluster
+ resource_type: cluster
+ state: present
+ tags:
+ Name: ubervol
+ env: prod
+
+- name: Remove the Env tag
+ community.aws.ecs_tag:
+ cluster_name: mycluster
+ resource_type: cluster
+ tags:
+ Env:
+ state: absent
+
+- name: Remove the Env tag if it's currently 'development'
+ community.aws.ecs_tag:
+ cluster_name: mycluster
+ resource_type: cluster
+ tags:
+ Env: development
+ state: absent
+
+- name: Remove all tags except for Name from a cluster
+ community.aws.ecs_tag:
+ cluster_name: mycluster
+ resource_type: cluster
+ tags:
+ Name: foo
+ state: absent
+ purge_tags: true
+'''
+
+RETURN = r'''
+tags:
+ description: A dict containing the tags on the resource
+ returned: always
+ type: dict
+added_tags:
+ description: A dict of tags that were added to the resource
+ returned: If tags were added
+ type: dict
+removed_tags:
+ description: A dict of tags that were removed from the resource
+ returned: If tags were removed
+ type: dict
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+__metaclass__ = type
+
+
+def get_tags(ecs, module, resource):
+ try:
+ return boto3_tag_list_to_ansible_dict(ecs.list_tags_for_resource(resourceArn=resource)['tags'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to fetch tags for resource {0}'.format(resource))
+
+
+def get_arn(ecs, module, cluster_name, resource_type, resource):
+
+ try:
+ if resource_type == 'cluster':
+ description = ecs.describe_clusters(clusters=[resource])
+ resource_arn = description['clusters'][0]['clusterArn']
+ elif resource_type == 'task':
+ description = ecs.describe_tasks(cluster=cluster_name, tasks=[resource])
+ resource_arn = description['tasks'][0]['taskArn']
+ elif resource_type == 'service':
+ description = ecs.describe_services(cluster=cluster_name, services=[resource])
+ resource_arn = description['services'][0]['serviceArn']
+ elif resource_type == 'task_definition':
+ description = ecs.describe_task_definition(taskDefinition=resource)
+ resource_arn = description['taskDefinition']['taskDefinitionArn']
+ elif resource_type == 'container':
+ description = ecs.describe_container_instances(clusters=[resource])
+ resource_arn = description['containerInstances'][0]['containerInstanceArn']
+ except (IndexError, KeyError):
+ module.fail_json(msg='Failed to find {0} {1}'.format(resource_type, resource))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to find {0} {1}'.format(resource_type, resource))
+
+ return resource_arn
+
+
+def main():
+ argument_spec = dict(
+ cluster_name=dict(required=True),
+ resource=dict(required=False),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ resource_type=dict(default='cluster', choices=['cluster', 'task', 'service', 'task_definition', 'container'])
+ )
+ required_if = [('state', 'present', ['tags']), ('state', 'absent', ['tags'])]
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True)
+
+ resource_type = module.params['resource_type']
+ cluster_name = module.params['cluster_name']
+ if resource_type == 'cluster':
+ resource = cluster_name
+ else:
+ resource = module.params['resource']
+ tags = module.params['tags']
+ state = module.params['state']
+ purge_tags = module.params['purge_tags']
+
+ result = {'changed': False}
+
+ ecs = module.client('ecs')
+
+ resource_arn = get_arn(ecs, module, cluster_name, resource_type, resource)
+
+ current_tags = get_tags(ecs, module, resource_arn)
+
+ add_tags, remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags)
+
+ remove_tags = {}
+ if state == 'absent':
+ for key in tags:
+ if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]):
+ remove_tags[key] = current_tags[key]
+
+ for key in remove:
+ remove_tags[key] = current_tags[key]
+
+ if remove_tags:
+ result['changed'] = True
+ result['removed_tags'] = remove_tags
+ if not module.check_mode:
+ try:
+ ecs.untag_resource(resourceArn=resource_arn, tagKeys=list(remove_tags.keys()))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to remove tags {0} from resource {1}'.format(remove_tags, resource))
+
+ if state == 'present' and add_tags:
+ result['changed'] = True
+ result['added_tags'] = add_tags
+ current_tags.update(add_tags)
+ if not module.check_mode:
+ try:
+ tags = ansible_dict_to_boto3_tag_list(add_tags, tag_name_key_name='key', tag_value_key_name='value')
+ ecs.tag_resource(resourceArn=resource_arn, tags=tags)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to set tags {0} on resource {1}'.format(add_tags, resource))
+
+ result['tags'] = get_tags(ecs, module, resource_arn)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ecs_task.py b/ansible_collections/community/aws/plugins/modules/ecs_task.py
new file mode 100644
index 000000000..54948ce21
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ecs_task.py
@@ -0,0 +1,481 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ecs_task
+version_added: 1.0.0
+short_description: Run, start or stop a task in ECS
+description:
+ - Creates or deletes instances of task definitions.
+author:
+ - Mark Chance (@Java1Guy)
+options:
+ operation:
+ description:
+ - Which task operation to execute.
+ - When I(operation=run) I(task_definition) must be set.
+ - When I(operation=start) both I(task_definition) and I(container_instances) must be set.
+ - When I(operation=stop) both I(task_definition) and I(task) must be set.
+ required: True
+ choices: ['run', 'start', 'stop']
+ type: str
+ cluster:
+ description:
+ - The name of the cluster to run the task on.
+ - If not specified, the cluster name will be C(default).
+ required: False
+ type: str
+ default: 'default'
+ task_definition:
+ description:
+ - The task definition to start, run or stop.
+ required: False
+ type: str
+ overrides:
+ description:
+ - A dictionary of values to pass to the new instances.
+ required: False
+ type: dict
+ count:
+ description:
+ - How many new instances to start.
+ required: False
+ type: int
+ task:
+ description:
+ - The ARN of the task to stop.
+ required: False
+ type: str
+ container_instances:
+ description:
+ - The list of container instances on which to deploy the task.
+ required: False
+ type: list
+ elements: str
+ started_by:
+ description:
+ - A value showing who or what started the task (for informational purposes).
+ required: False
+ type: str
+ network_configuration:
+ description:
+ - Network configuration of the service. Only applicable for task definitions created with I(network_mode=awsvpc).
+ type: dict
+ suboptions:
+ assign_public_ip:
+ description: Whether the task's elastic network interface receives a public IP address.
+ type: bool
+ version_added: 1.5.0
+ subnets:
+ description: A list of subnet IDs to which the task is attached.
+ type: list
+ elements: str
+ security_groups:
+ description: A list of group names or group IDs for the task.
+ type: list
+ elements: str
+ launch_type:
+ description:
+ - The launch type on which to run your service.
+ required: false
+ choices: ["EC2", "FARGATE"]
+ type: str
+ tags:
+ type: dict
+ description:
+ - Tags that will be added to ecs tasks on start and run
+ required: false
+ aliases: ['resource_tags']
+ wait:
+ description:
+ - Whether or not to wait for the desired state.
+ type: bool
+ default: false
+ version_added: 4.1.0
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+
+'''
+
+EXAMPLES = r'''
+# Simple example of run task
+- name: Run task
+ community.aws.ecs_task:
+ operation: run
+ cluster: console-sample-app-static-cluster
+ task_definition: console-sample-app-static-taskdef
+ count: 1
+ started_by: ansible_user
+ register: task_output
+
+# Simple example of start task
+
+- name: Start a task
+ community.aws.ecs_task:
+ operation: start
+ cluster: console-sample-app-static-cluster
+ task_definition: console-sample-app-static-taskdef
+ task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
+ tags:
+ resourceName: a_task_for_ansible_to_run
+ type: long_running_task
+ network: internal
+ version: 1.4
+ container_instances:
+ - arn:aws:ecs:us-west-2:123456789012:container-instance/79c23f22-876c-438a-bddf-55c98a3538a8
+ started_by: ansible_user
+ network_configuration:
+ subnets:
+ - subnet-abcd1234
+ security_groups:
+ - sg-aaaa1111
+ - my_security_group
+ register: task_output
+
+- name: RUN a task on Fargate
+ community.aws.ecs_task:
+ operation: run
+ cluster: console-sample-app-static-cluster
+ task_definition: console-sample-app-static-taskdef
+ task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
+ started_by: ansible_user
+ launch_type: FARGATE
+ network_configuration:
+ subnets:
+ - subnet-abcd1234
+ security_groups:
+ - sg-aaaa1111
+ - my_security_group
+ register: task_output
+
+- name: RUN a task on Fargate with public ip assigned
+ community.aws.ecs_task:
+ operation: run
+ count: 2
+ cluster: console-sample-app-static-cluster
+ task_definition: console-sample-app-static-taskdef
+ task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
+ started_by: ansible_user
+ launch_type: FARGATE
+ network_configuration:
+ assign_public_ip: true
+ subnets:
+ - subnet-abcd1234
+ register: task_output
+
+- name: Stop a task
+ community.aws.ecs_task:
+ operation: stop
+ cluster: console-sample-app-static-cluster
+ task_definition: console-sample-app-static-taskdef
+ task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
+'''
+RETURN = r'''
+task:
+ description: details about the task that was started
+ returned: success
+ type: complex
+ contains:
+ taskArn:
+ description: The Amazon Resource Name (ARN) that identifies the task.
+ returned: always
+ type: str
+ clusterArn:
+ description: The Amazon Resource Name (ARN) of the of the cluster that hosts the task.
+ returned: only when details is true
+ type: str
+ taskDefinitionArn:
+ description: The Amazon Resource Name (ARN) of the task definition.
+ returned: only when details is true
+ type: str
+ containerInstanceArn:
+ description: The Amazon Resource Name (ARN) of the container running the task.
+ returned: only when details is true
+ type: str
+ overrides:
+ description: The container overrides set for this task.
+ returned: only when details is true
+ type: list
+ elements: dict
+ lastStatus:
+ description: The last recorded status of the task.
+ returned: only when details is true
+ type: str
+ desiredStatus:
+ description: The desired status of the task.
+ returned: only when details is true
+ type: str
+ containers:
+ description: The container details.
+ returned: only when details is true
+ type: list
+ elements: dict
+ startedBy:
+ description: The used who started the task.
+ returned: only when details is true
+ type: str
+ stoppedReason:
+ description: The reason why the task was stopped.
+ returned: only when details is true
+ type: str
+ createdAt:
+ description: The timestamp of when the task was created.
+ returned: only when details is true
+ type: str
+ startedAt:
+ description: The timestamp of when the task was started.
+ returned: only when details is true
+ type: str
+ stoppedAt:
+ description: The timestamp of when the task was stopped.
+ returned: only when details is true
+ type: str
+ launchType:
+ description: The launch type on which to run your task.
+ returned: always
+ type: str
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names, ansible_dict_to_boto3_tag_list
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+class EcsExecManager:
+ """Handles ECS Tasks"""
+
+ def __init__(self, module):
+ self.module = module
+ self.ecs = module.client('ecs')
+ self.ec2 = module.client('ec2')
+
+ def format_network_configuration(self, network_config):
+ result = dict()
+ if 'subnets' in network_config:
+ result['subnets'] = network_config['subnets']
+ else:
+ self.module.fail_json(msg="Network configuration must include subnets")
+ if 'security_groups' in network_config:
+ groups = network_config['security_groups']
+ if any(not sg.startswith('sg-') for sg in groups):
+ try:
+ vpc_id = self.ec2.describe_subnets(SubnetIds=[result['subnets'][0]])['Subnets'][0]['VpcId']
+ groups = get_ec2_security_group_ids_from_names(groups, self.ec2, vpc_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't look up security groups")
+ result['securityGroups'] = groups
+ if 'assign_public_ip' in network_config:
+ if network_config['assign_public_ip'] is True:
+ result['assignPublicIp'] = "ENABLED"
+ else:
+ result['assignPublicIp'] = "DISABLED"
+
+ return dict(awsvpcConfiguration=result)
+
+ def list_tasks(self, cluster_name, service_name, status):
+ response = self.ecs.list_tasks(
+ cluster=cluster_name,
+ family=service_name,
+ desiredStatus=status
+ )
+ if len(response['taskArns']) > 0:
+ for c in response['taskArns']:
+ if c.endswith(service_name):
+ return c
+ return None
+
+ def run_task(self, cluster, task_definition, overrides, count, startedBy, launch_type, tags):
+ if overrides is None:
+ overrides = dict()
+ params = dict(cluster=cluster, taskDefinition=task_definition,
+ overrides=overrides, count=count, startedBy=startedBy)
+ if self.module.params['network_configuration']:
+ params['networkConfiguration'] = self.format_network_configuration(self.module.params['network_configuration'])
+ if launch_type:
+ params['launchType'] = launch_type
+ if tags:
+ params['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value')
+
+ # TODO: need to check if long arn format enabled.
+ try:
+ response = self.ecs.run_task(**params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't run task")
+ # include tasks and failures
+ return response['tasks']
+
+ def start_task(self, cluster, task_definition, overrides, container_instances, startedBy, tags):
+ args = dict()
+ if cluster:
+ args['cluster'] = cluster
+ if task_definition:
+ args['taskDefinition'] = task_definition
+ if overrides:
+ args['overrides'] = overrides
+ if container_instances:
+ args['containerInstances'] = container_instances
+ if startedBy:
+ args['startedBy'] = startedBy
+ if self.module.params['network_configuration']:
+ args['networkConfiguration'] = self.format_network_configuration(self.module.params['network_configuration'])
+ if tags:
+ args['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value')
+ try:
+ response = self.ecs.start_task(**args)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't start task")
+ # include tasks and failures
+ return response['tasks']
+
+ def stop_task(self, cluster, task):
+ response = self.ecs.stop_task(cluster=cluster, task=task)
+ return response['task']
+
+ def ecs_task_long_format_enabled(self):
+ account_support = self.ecs.list_account_settings(name='taskLongArnFormat', effectiveSettings=True)
+ return account_support['settings'][0]['value'] == 'enabled'
+
+
+def main():
+ argument_spec = dict(
+ operation=dict(required=True, choices=['run', 'start', 'stop']),
+ cluster=dict(required=False, type='str', default='default'), # R S P
+ task_definition=dict(required=False, type='str'), # R* S*
+ overrides=dict(required=False, type='dict'), # R S
+ count=dict(required=False, type='int'), # R
+ task=dict(required=False, type='str'), # P*
+ container_instances=dict(required=False, type='list', elements='str'), # S*
+ started_by=dict(required=False, type='str'), # R S
+ network_configuration=dict(required=False, type='dict'),
+ launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
+ tags=dict(required=False, type='dict', aliases=['resource_tags']),
+ wait=dict(required=False, default=False, type='bool'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True,
+ required_if=[
+ ('launch_type', 'FARGATE', ['network_configuration']),
+ ('operation', 'run', ['task_definition']),
+ ('operation', 'start', [
+ 'task_definition',
+ 'container_instances'
+ ]),
+ ('operation', 'stop', ['task_definition', 'task']),
+ ])
+
+ # Validate Inputs
+ if module.params['operation'] == 'run':
+ task_to_list = module.params['task_definition']
+ status_type = "RUNNING"
+
+ if module.params['operation'] == 'start':
+ task_to_list = module.params['task']
+ status_type = "RUNNING"
+
+ if module.params['operation'] == 'stop':
+ task_to_list = module.params['task_definition']
+ status_type = "STOPPED"
+
+ service_mgr = EcsExecManager(module)
+
+ if module.params['tags']:
+ if not service_mgr.ecs_task_long_format_enabled():
+ module.fail_json(msg="Cannot set task tags: long format task arns are required to set tags")
+
+ existing = service_mgr.list_tasks(module.params['cluster'], task_to_list, status_type)
+
+ results = dict(changed=False)
+ if module.params['operation'] == 'run':
+ if existing:
+ # TBD - validate the rest of the details
+ results['task'] = existing
+ else:
+ if not module.check_mode:
+
+ # run_task returns a list of tasks created
+ tasks = service_mgr.run_task(
+ module.params['cluster'],
+ module.params['task_definition'],
+ module.params['overrides'],
+ module.params['count'],
+ module.params['started_by'],
+ module.params['launch_type'],
+ module.params['tags'],
+ )
+
+ # Wait for task(s) to be running prior to exiting
+ if module.params['wait']:
+
+ waiter = service_mgr.ecs.get_waiter('tasks_running')
+ try:
+ waiter.wait(
+ tasks=[task['taskArn'] for task in tasks],
+ cluster=module.params['cluster'],
+ )
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, 'Timeout waiting for tasks to run')
+
+ results['task'] = tasks
+
+ results['changed'] = True
+
+ elif module.params['operation'] == 'start':
+ if existing:
+ # TBD - validate the rest of the details
+ results['task'] = existing
+ else:
+ if not module.check_mode:
+ results['task'] = service_mgr.start_task(
+ module.params['cluster'],
+ module.params['task_definition'],
+ module.params['overrides'],
+ module.params['container_instances'],
+ module.params['started_by'],
+ module.params['tags'],
+ )
+
+ results['changed'] = True
+
+ elif module.params['operation'] == 'stop':
+ if existing:
+ results['task'] = existing
+ else:
+ if not module.check_mode:
+ # it exists, so we should delete it and mark changed.
+ # return info about the cluster deleted
+ results['task'] = service_mgr.stop_task(
+ module.params['cluster'],
+ module.params['task']
+ )
+
+ # Wait for task to be stopped prior to exiting
+ if module.params['wait']:
+
+ waiter = service_mgr.ecs.get_waiter('tasks_stopped')
+ try:
+ waiter.wait(
+ tasks=[module.params['task']],
+ cluster=module.params['cluster'],
+ )
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, 'Timeout waiting for task to stop')
+
+ results['changed'] = True
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition.py b/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition.py
new file mode 100644
index 000000000..a8b5e97d8
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition.py
@@ -0,0 +1,1153 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ecs_taskdefinition
+version_added: 1.0.0
+short_description: register a task definition in ecs
+description:
+ - Registers or deregisters task definitions in the Amazon Web Services (AWS) EC2 Container Service (ECS).
+author:
+ - Mark Chance (@Java1Guy)
+ - Alina Buzachis (@alinabuzachis)
+options:
+ state:
+ description:
+ - State whether the task definition should exist or be deleted.
+ required: true
+ choices: ['present', 'absent']
+ type: str
+ arn:
+ description:
+ - The ARN of the task description to delete.
+ required: false
+ type: str
+ family:
+ description:
+ - A Name that would be given to the task definition.
+ required: false
+ type: str
+ revision:
+ description:
+ - A revision number for the task definition.
+ required: False
+ type: int
+ force_create:
+ description:
+ - Always create new task definition.
+ required: False
+ type: bool
+ default: false
+ containers:
+ description:
+ - A list of containers definitions.
+ - See U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html) for a complete list of parameters.
+ required: True
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description: The name of a container.
+ required: False
+ type: str
+ image:
+ description: The image used to start a container.
+ required: False
+ type: str
+ repositoryCredentials:
+ description: The private repository authentication credentials to use.
+ required: False
+ type: dict
+ suboptions:
+ credentialsParameter:
+ description:
+ - The Amazon Resource Name (ARN) of the secret containing the private repository credentials.
+ required: True
+ type: str
+ cpu:
+ description: The number of cpu units reserved for the container.
+ required: False
+ type: int
+ memory:
+ description: The amount (in MiB) of memory to present to the container.
+ required: False
+ type: int
+ memoryReservation:
+ description: The soft limit (in MiB) of memory to reserve for the container.
+ required: False
+ type: int
+ links:
+ description:
+ - Allows containers to communicate with each other without the need for port mappings.
+ - This parameter is only supported if I(network_mode=bridge).
+ required: False
+ type: list
+ elements: str
+ portMappings:
+ description: The list of port mappings for the container.
+ required: False
+ type: list
+ elements: dict
+ suboptions:
+ containerPort:
+ description: The port number on the container that is bound to the user-specified or automatically assigned host port.
+ required: False
+ type: int
+ hostPort:
+ description: The port number on the container instance to reserve for your container.
+ required: False
+ type: int
+ protocol:
+ description: The protocol used for the port mapping.
+ required: False
+ type: str
+ default: tcp
+ choices: ['tcp', 'udp']
+ essential:
+ description:
+ - If I(essential=True), and the container fails or stops for any reason, all other containers that are part of the task are stopped.
+ required: False
+ type: bool
+ entryPoint:
+ description: The entry point that is passed to the container.
+ required: False
+ type: str
+ command:
+ description: The command that is passed to the container. If there are multiple arguments, each argument is a separated string in the array.
+ required: False
+ type: list
+ elements: str
+ environment:
+ description: The environment variables to pass to a container.
+ required: False
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description: The name of the key-value pair.
+ required: False
+ type: str
+ value:
+ description: The value of the key-value pair.
+ required: False
+ type: str
+ environmentFiles:
+ description: A list of files containing the environment variables to pass to a container.
+ required: False
+ type: list
+ elements: dict
+ suboptions:
+ value:
+ description: The Amazon Resource Name (ARN) of the Amazon S3 object containing the environment variable file.
+ required: False
+ type: str
+ type:
+ description: The file type to use. The only supported value is C(s3).
+ required: False
+ type: str
+ mountPoints:
+ description: The mount points for data volumes in your container.
+ required: False
+ type: list
+ elements: dict
+ suboptions:
+ sourceVolume:
+ description: The name of the volume to mount.
+ required: False
+ type: str
+ containerPath:
+ description: The path on the container to mount the host volume at.
+ required: False
+ type: str
+ readOnly:
+ description:
+ - If this value is C(True), the container has read-only access to the volume.
+ - If this value is C(False), then the container can write to the volume.
+ required: False
+ default: False
+ type: bool
+ volumesFrom:
+ description: Data volumes to mount from another container.
+ required: False
+ type: list
+ elements: dict
+ suboptions:
+ sourceContainer:
+ description:
+ - The name of another container within the same task definition from which to mount volumes.
+ required: False
+ type: str
+ readOnly:
+ description:
+ - If this value is C(True), the container has read-only access to the volume.
+ - If this value is C(False), then the container can write to the volume.
+ required: False
+ default: False
+ type: bool
+ linuxParameters:
+ description: Linux-specific modifications that are applied to the container, such as Linux kernel capabilities.
+ required: False
+ type: dict
+ suboptions:
+ capabilities:
+ description:
+ - The Linux capabilities for the container that are added to or dropped from the default configuration provided by Docker.
+ required: False
+ type: dict
+ suboptions:
+ add:
+ description:
+ - The Linux capabilities for the container that have been added to the default configuration provided by Docker.
+ - If I(launch_type=FARGATE), this parameter is not supported.
+ required: False
+ type: list
+ choices: ["ALL", "AUDIT_CONTROL", "AUDIT_WRITE", "BLOCK_SUSPEND", "CHOWN", "DAC_OVERRIDE", "DAC_READ_SEARCH", "FOWNER",
+ "FSETID", "IPC_LOCK", "IPC_OWNER", "KILL", "LEASE", "LINUX_IMMUTABLE", "MAC_ADMIN", "MAC_OVERRIDE", "MKNOD",
+ "NET_ADMIN", "NET_BIND_SERVICE", "NET_BROADCAST", "NET_RAW", "SETFCAP", "SETGID", "SETPCAP", "SETUID",
+ "SYS_ADMIN", "SYS_BOOT", "SYS_CHROOT", "SYS_MODULE", "SYS_NICE", "SYS_PACCT", "SYS_PTRACE", "SYS_RAWIO",
+ "SYS_RESOURCE", "SYS_TIME", "SYS_TTY_CONFIG", "SYSLOG", "WAKE_ALARM"]
+ elements: str
+ drop:
+ description:
+ - The Linux capabilities for the container that have been removed from the default configuration provided by Docker.
+ required: False
+ type: list
+ choices: ["ALL", "AUDIT_CONTROL", "AUDIT_WRITE", "BLOCK_SUSPEND", "CHOWN", "DAC_OVERRIDE", "DAC_READ_SEARCH", "FOWNER",
+ "FSETID", "IPC_LOCK", "IPC_OWNER", "KILL", "LEASE", "LINUX_IMMUTABLE", "MAC_ADMIN", "MAC_OVERRIDE", "MKNOD",
+ "NET_ADMIN", "NET_BIND_SERVICE", "NET_BROADCAST", "NET_RAW", "SETFCAP", "SETGID", "SETPCAP", "SETUID",
+ "SYS_ADMIN", "SYS_BOOT", "SYS_CHROOT", "SYS_MODULE", "SYS_NICE", "SYS_PACCT", "SYS_PTRACE", "SYS_RAWIO",
+ "SYS_RESOURCE", "SYS_TIME", "SYS_TTY_CONFIG", "SYSLOG", "WAKE_ALARM"]
+ elements: str
+ devices:
+ description:
+ - Any host devices to expose to the container.
+ - If I(launch_type=FARGATE), this parameter is not supported.
+ required: False
+ type: list
+ elements: dict
+ suboptions:
+ hostPath:
+ description: The path for the device on the host container instance.
+ required: True
+ type: str
+ containerPath:
+ description: The path inside the container at which to expose the host device.
+ required: False
+ type: str
+ permissions:
+ description: The explicit permissions to provide to the container for the device.
+ required: False
+ type: list
+ elements: str
+ initProcessEnabled:
+ description: Run an init process inside the container that forwards signals and reaps processes.
+ required: False
+ type: bool
+ sharedMemorySize:
+ description:
+ - The value for the size (in MiB) of the /dev/shm volume.
+ - If I(launch_type=FARGATE), this parameter is not supported.
+ required: False
+ type: int
+ tmpfs:
+ description:
+ - The container path, mount options, and size (in MiB) of the tmpfs mount.
+ - If I(launch_type=FARGATE), this parameter is not supported.
+ required: False
+ type: list
+ elements: dict
+ suboptions:
+ containerPath:
+ description: The absolute file path where the tmpfs volume is to be mounted.
+ required: True
+ type: str
+ size:
+ description: The size (in MiB) of the tmpfs volume.
+ required: True
+ type: int
+ mountOptions:
+ description: The list of tmpfs volume mount options.
+ required: False
+ type: list
+ choices: ["defaults", "ro", "rw", "suid", "nosuid", "dev", "nodev", "exec", "noexec", "sync", "async", "dirsync",
+ "remount", "mand", "nomand", "atime", "noatime", "diratime", "nodiratime", "bind", "rbind", "unbindable",
+ "runbindable", "private", "rprivate", "shared", "rshared", "slave", "rslave", "relatime", "norelatime",
+ "strictatime", "nostrictatime", "mode", "uid", "gid", "nr_inodes", "nr_blocks", "mpol"]
+ elements: str
+ maxSwap:
+ description:
+ - The total amount of swap memory (in MiB) a container can use.
+ - If I(launch_type=FARGATE), this parameter is not supported.
+ required: False
+ type: int
+ swappiness:
+ description:
+ - This allows you to tune a container's memory swappiness behavior.
+ - If I(launch_type=FARGATE), this parameter is not supported.
+ required: False
+ type: int
+ secrets:
+ description: The secrets to pass to the container.
+ required: False
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description: The value to set as the environment variable on the container.
+ required: True
+ type: str
+ size:
+ description: The secret to expose to the container.
+ required: True
+ type: str
+ dependsOn:
+ description:
+ - The dependencies defined for container startup and shutdown.
+ - When a dependency is defined for container startup, for container shutdown it is reversed.
+ required: False
+ type: list
+ elements: dict
+ suboptions:
+ containerName:
+ description: The name of a container.
+ type: str
+ required: True
+ condition:
+ description: The dependency condition of the container.
+ type: str
+ required: True
+ choices: ["start", "complete", "success", "healthy"]
+ startTimeout:
+ description: Time duration (in seconds) to wait before giving up on resolving dependencies for a container.
+ required: False
+ type: int
+ stopTimeout:
+ description: Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own.
+ required: False
+ type: int
+ hostname:
+ description:
+ - The hostname to use for your container.
+ - This parameter is not supported if I(network_mode=awsvpc).
+ required: False
+ type: str
+ user:
+ description:
+ - The user to use inside the container.
+ - This parameter is not supported for Windows containers.
+ required: False
+ type: str
+ workingDirectory:
+ description: The working directory in which to run commands inside the container.
+ required: False
+ type: str
+ disableNetworking:
+ description: When this parameter is C(True), networking is disabled within the container.
+ required: False
+ type: bool
+ privileged:
+ description: When this parameter is C(True), the container is given elevated privileges on the host container instance.
+ required: False
+ type: bool
+ readonlyRootFilesystem:
+ description: When this parameter is C(True), the container is given read-only access to its root file system.
+ required: false
+ type: bool
+ dnsServers:
+ description:
+ - A list of DNS servers that are presented to the container.
+ - This parameter is not supported for Windows containers.
+ required: False
+ type: list
+ elements: str
+ dnsSearchDomains:
+ description:
+ - A list of DNS search domains that are presented to the container.
+ - This parameter is not supported for Windows containers.
+ required: False
+ type: list
+ elements: str
+ extraHosts:
+ description:
+ - A list of hostnames and IP address mappings to append to the /etc/hosts file on the container.
+ - This parameter is not supported for Windows containers or tasks that use I(network_mode=awsvpc).
+ required: False
+ type: list
+ elements: dict
+ suboptions:
+ hostname:
+ description: The hostname to use in the /etc/hosts entry.
+ type: str
+ required: False
+ ipAddress:
+ description: The IP address to use in the /etc/hosts entry.
+ type: str
+ required: False
+ dockerSecurityOptions:
+ description:
+ - A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems.
+ - This parameter is not supported for Windows containers.
+ required: False
+ type: list
+ elements: str
+ interactive:
+ description:
+ - When I(interactive=True), it allows to deploy containerized applications that require stdin or a tty to be allocated.
+ required: False
+ type: bool
+ pseudoTerminal:
+ description: When this parameter is C(True), a TTY is allocated.
+ required: False
+ type: bool
+ dockerLabels:
+ description: A key/value map of labels to add to the container.
+ required: False
+ type: dict
+ ulimits:
+ description:
+ - A list of ulimits to set in the container.
+ - This parameter is not supported for Windows containers.
+ required: False
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description: The type of the ulimit.
+ type: str
+ required: False
+ choices: ['core', 'cpu', 'data', 'fsize', 'locks', 'memlock', 'msgqueue', 'nice', 'nofile', 'nproc', 'rss',
+ 'rtprio', 'rttime', 'sigpending', 'stack']
+ softLimit:
+ description: The soft limit for the ulimit type.
+ type: int
+ required: False
+ hardLimit:
+ description: The hard limit for the ulimit type.
+ type: int
+ required: False
+ logConfiguration:
+ description: The log configuration specification for the container.
+ required: False
+ type: dict
+ suboptions:
+ logDriver:
+ description:
+ - The log driver to use for the container.
+ - For tasks on AWS Fargate, the supported log drivers are C(awslogs), C(splunk), and C(awsfirelens).
+ - For tasks hosted on Amazon EC2 instances, the supported log drivers are C(awslogs), C(fluentd),
+ C(gelf), C(json-file), C(journald), C(logentries), C(syslog), C(splunk), and C(awsfirelens).
+ type: str
+ required: False
+ options:
+ description: The configuration options to send to the log driver.
+ required: False
+ type: str
+ secretOptions:
+ description: The secrets to pass to the log configuration.
+ required: False
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description: The name of the secret.
+ type: str
+ required: False
+ valueFrom:
+ description: The secret to expose to the container.
+ type: str
+ required: False
+ healthCheck:
+ description: The health check command and associated configuration parameters for the container.
+ required: False
+ type: dict
+ suboptions:
+ command:
+ description:
+ - A string array representing the command that the container runs to determine if it is healthy.
+ - >
+ The string array must start with CMD to run the command arguments directly,
+ or CMD-SHELL to run the command with the container's default shell.
+ - An exit code of 0 indicates success, and non-zero exit code indicates failure.
+ required: False
+ type: list
+ elements: str
+ interval:
+ description:
+ - The time period in seconds between each health check execution.
+ - You may specify between 5 and 300 seconds. The default value is 30 seconds.
+ required: False
+ type: int
+ default: 30
+ retries:
+ description:
+ - The number of times to retry a failed health check before the container is considered unhealthy.
+ - You may specify between 1 and 10 retries. The default value is 3.
+ required: False
+ type: int
+ default: 3
+ startPeriod:
+ description:
+ - >
+ The optional grace period to provide containers time to bootstrap
+ before failed health checks count towards the maximum number of retries.
+ - You can specify between 0 and 300 seconds. By default, the startPeriod is disabled.
+ - >
+ Note: If a health check succeeds within the startPeriod,
+ then the container is considered healthy and any subsequent failures count toward the maximum number of retries.
+ required: False
+ type: int
+ timeout:
+ description:
+ - The time period in seconds to wait for a health check to succeed before it is considered a failure.
+ - You may specify between 2 and 60 seconds. The default value is 5.
+ required: False
+ type: int
+ default: 5
+ systemControls:
+ description: A list of namespaced kernel parameters to set in the container.
+ required: False
+ type: list
+ elements: dict
+ suboptions:
+ namespace:
+ description: The namespaced kernel parameter to set a C(value) for.
+ type: str
+ value:
+ description: The value for the namespaced kernel parameter that's specified in C(namespace).
+ type: str
+ resourceRequirements:
+ description:
+ - The type and amount of a resource to assign to a container.
+ - The only supported resources are C(GPU) and C(InferenceAccelerator).
+ required: False
+ type: list
+ elements: dict
+ suboptions:
+ value:
+ description: The value for the specified resource type.
+ type: str
+ type:
+ description: The type of resource to assign to a container.
+ type: str
+ choices: ['GPU', 'InferenceAccelerator']
+ firelensConfiguration:
+ description:
+ - The FireLens configuration for the container.
+ - This is used to specify and configure a log router for container logs.
+ required: False
+ type: dict
+ suboptions:
+ type:
+ description:
+ - The log router to use. The valid values are C(fluentd) or C(fluentbit).
+ required: False
+ type: str
+ choices:
+ - fluentd
+ - fluentbit
+ options:
+ description:
+ - The options to use when configuring the log router.
+ - This field is optional and can be used to specify a custom configuration
+ file or to add additional metadata, such as the task, task definition, cluster,
+ and container instance details to the log event.
+ - If specified, the syntax to use is
+ C({"enable-ecs-log-metadata":"true|false","config-file-type:"s3|file","config-file-value":"arn:aws:s3:::mybucket/fluent.conf|filepath"}).
+ - For more information, see U(https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html#firelens-taskdef).
+ required: False
+ type: dict
+ network_mode:
+ description:
+ - The Docker networking mode to use for the containers in the task.
+ - Windows containers must use I(network_mode=default), which will utilize docker NAT networking.
+ - Setting I(network_mode=default) for a Linux container will use C(bridge) mode.
+ required: false
+ default: bridge
+ choices: [ 'default', 'bridge', 'host', 'none', 'awsvpc' ]
+ type: str
+ task_role_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted
+ the permissions that are specified in this role.
+ required: false
+ type: str
+ default: ''
+ execution_role_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the task execution role that the Amazon ECS container agent and the Docker daemon can assume.
+ required: false
+ type: str
+ default: ''
+ volumes:
+ description:
+ - A list of names of volumes to be attached.
+ required: False
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ type: str
+ description: The name of the volume.
+ required: true
+ launch_type:
+ description:
+ - The launch type on which to run your task.
+ required: false
+ type: str
+ choices: ["EC2", "FARGATE"]
+ cpu:
+ description:
+ - The number of cpu units used by the task. If I(launch_type=EC2), this field is optional and any value can be used.
+ - If I(launch_type=FARGATE), this field is required and you must use one of C(256), C(512), C(1024), C(2048), C(4096).
+ required: false
+ type: str
+ memory:
+ description:
+ - The amount (in MiB) of memory used by the task. If I(launch_type=EC2), this field is optional and any value can be used.
+ - If I(launch_type=FARGATE), this field is required and is limited by the CPU.
+ required: false
+ type: str
+ placement_constraints:
+ version_added: 2.1.0
+ description:
+ - Placement constraint objects to use for the task.
+ - You can specify a maximum of 10 constraints per task.
+ - Task placement constraints are not supported for tasks run on Fargate.
+ required: false
+ type: list
+ elements: dict
+ suboptions:
+ type:
+ description: The type of constraint.
+ type: str
+ expression:
+ description: A cluster query language expression to apply to the constraint.
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+- name: Create task definition
+ community.aws.ecs_taskdefinition:
+ containers:
+ - name: simple-app
+ cpu: 10
+ essential: true
+ image: "httpd:2.4"
+ memory: 300
+ mountPoints:
+ - containerPath: /usr/local/apache2/htdocs
+ sourceVolume: my-vol
+ portMappings:
+ - containerPort: 80
+ hostPort: 80
+ logConfiguration:
+ logDriver: awslogs
+ options:
+ awslogs-group: /ecs/test-cluster-taskdef
+ awslogs-region: us-west-2
+ awslogs-stream-prefix: ecs
+ - name: busybox
+ command:
+ - >
+ /bin/sh -c "while true; do echo '<html><head><title>Amazon ECS Sample App</title></head><body><div><h1>Amazon ECS Sample App</h1><h2>Congratulations!
+ </h2><p>Your application is now running on a container in Amazon ECS.</p>' > top; /bin/date > date ; echo '</div></body></html>' > bottom;
+ cat top date bottom > /usr/local/apache2/htdocs/index.html ; sleep 1; done"
+ cpu: 10
+ entryPoint:
+ - sh
+ - "-c"
+ essential: false
+ image: busybox
+ memory: 200
+ volumesFrom:
+ - sourceContainer: simple-app
+ volumes:
+ - name: my-vol
+ family: test-cluster-taskdef
+ state: present
+ register: task_output
+
+- name: Create task definition
+ community.aws.ecs_taskdefinition:
+ family: nginx
+ containers:
+ - name: nginx
+ essential: true
+ image: "nginx"
+ portMappings:
+ - containerPort: 8080
+ hostPort: 8080
+ cpu: 512
+ memory: 1024
+ state: present
+
+- name: Create task definition
+ community.aws.ecs_taskdefinition:
+ family: nginx
+ containers:
+ - name: nginx
+ essential: true
+ image: "nginx"
+ portMappings:
+ - containerPort: 8080
+ hostPort: 8080
+ launch_type: FARGATE
+ cpu: 512
+ memory: 1024
+ state: present
+ network_mode: awsvpc
+
+- name: Create task definition
+ community.aws.ecs_taskdefinition:
+ family: nginx
+ containers:
+ - name: nginx
+ essential: true
+ image: "nginx"
+ portMappings:
+ - containerPort: 8080
+ hostPort: 8080
+ cpu: 512
+ memory: 1024
+ dependsOn:
+ - containerName: "simple-app"
+ condition: "start"
+
+# Create Task Definition with Environment Variables and Secrets
+- name: Create task definition
+ community.aws.ecs_taskdefinition:
+ family: nginx
+ containers:
+ - name: nginx
+ essential: true
+ image: "nginx"
+ environment:
+ - name: "PORT"
+ value: "8080"
+ secrets:
+ # For variables stored in Secrets Manager
+ - name: "NGINX_HOST"
+ valueFrom: "arn:aws:secretsmanager:us-west-2:123456789012:secret:nginx/NGINX_HOST"
+ # For variables stored in Parameter Store
+ - name: "API_KEY"
+ valueFrom: "arn:aws:ssm:us-west-2:123456789012:parameter/nginx/API_KEY"
+ launch_type: FARGATE
+ cpu: 512
+ memory: 1GB
+ state: present
+ network_mode: awsvpc
+
+# Create Task Definition with health check
+- name: Create task definition
+ community.aws.ecs_taskdefinition:
+ family: nginx
+ containers:
+ - name: nginx
+ essential: true
+ image: "nginx"
+ portMappings:
+ - containerPort: 8080
+ hostPort: 8080
+ cpu: 512
+ memory: 1024
+ healthCheck:
+ command:
+ - CMD-SHELL
+ - /app/healthcheck.py
+ interval: 60
+ retries: 3
+ startPeriod: 15
+ timeout: 15
+ state: present
+'''
+RETURN = r'''
+taskdefinition:
+ description: a reflection of the input parameters
+ type: dict
+ returned: always
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+class EcsTaskManager:
+ """Handles ECS Tasks"""
+
+ def __init__(self, module):
+ self.module = module
+
+ self.ecs = module.client('ecs', AWSRetry.jittered_backoff())
+
+ def describe_task(self, task_name):
+ try:
+ response = self.ecs.describe_task_definition(aws_retry=True, taskDefinition=task_name)
+ return response['taskDefinition']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ return None
+
+ def register_task(self, family, task_role_arn, execution_role_arn, network_mode, container_definitions,
+ volumes, launch_type, cpu, memory, placement_constraints):
+ validated_containers = []
+
+ # Ensures the number parameters are int as required by the AWS SDK
+ for container in container_definitions:
+ for param in ('memory', 'cpu', 'memoryReservation', 'startTimeout', 'stopTimeout'):
+ if param in container:
+ container[param] = int(container[param])
+
+ if 'portMappings' in container:
+ for port_mapping in container['portMappings']:
+ for port in ('hostPort', 'containerPort'):
+ if port in port_mapping:
+ port_mapping[port] = int(port_mapping[port])
+ if network_mode == 'awsvpc' and 'hostPort' in port_mapping:
+ if port_mapping['hostPort'] != port_mapping.get('containerPort'):
+ self.module.fail_json(msg="In awsvpc network mode, host port must be set to the same as "
+ "container port or not be set")
+
+ if 'linuxParameters' in container:
+ for linux_param in container.get('linuxParameters'):
+ if linux_param == 'tmpfs':
+ for tmpfs_param in container['linuxParameters']['tmpfs']:
+ if 'size' in tmpfs_param:
+ tmpfs_param['size'] = int(tmpfs_param['size'])
+
+ for param in ('maxSwap', 'swappiness', 'sharedMemorySize'):
+ if param in linux_param:
+ container['linuxParameters'][param] = int(container['linuxParameters'][param])
+
+ if 'ulimits' in container:
+ for limits_mapping in container['ulimits']:
+ for limit in ('softLimit', 'hardLimit'):
+ if limit in limits_mapping:
+ limits_mapping[limit] = int(limits_mapping[limit])
+
+ validated_containers.append(container)
+
+ params = dict(
+ family=family,
+ taskRoleArn=task_role_arn,
+ containerDefinitions=container_definitions,
+ volumes=volumes
+ )
+ if network_mode != 'default':
+ params['networkMode'] = network_mode
+ if cpu:
+ params['cpu'] = cpu
+ if memory:
+ params['memory'] = memory
+ if launch_type:
+ params['requiresCompatibilities'] = [launch_type]
+ if execution_role_arn:
+ params['executionRoleArn'] = execution_role_arn
+ if placement_constraints:
+ params['placementConstraints'] = placement_constraints
+
+ try:
+ response = self.ecs.register_task_definition(aws_retry=True, **params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Failed to register task")
+
+ return response['taskDefinition']
+
+ def describe_task_definitions(self, family):
+ data = {
+ "taskDefinitionArns": [],
+ "nextToken": None
+ }
+
+ def fetch():
+ # Boto3 is weird about params passed, so only pass nextToken if we have a value
+ params = {
+ 'familyPrefix': family
+ }
+
+ if data['nextToken']:
+ params['nextToken'] = data['nextToken']
+
+ result = self.ecs.list_task_definitions(**params)
+ data['taskDefinitionArns'] += result['taskDefinitionArns']
+ data['nextToken'] = result.get('nextToken', None)
+ return data['nextToken'] is not None
+
+ # Fetch all the arns, possibly across multiple pages
+ while fetch():
+ pass
+
+ # Return the full descriptions of the task definitions, sorted ascending by revision
+ return list(
+ sorted(
+ [self.ecs.describe_task_definition(taskDefinition=arn)['taskDefinition'] for arn in data['taskDefinitionArns']],
+ key=lambda td: td['revision']
+ )
+ )
+
+ def deregister_task(self, taskArn):
+ response = self.ecs.deregister_task_definition(taskDefinition=taskArn)
+ return response['taskDefinition']
+
+
+def main():
+ argument_spec = dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ arn=dict(required=False, type='str'),
+ family=dict(required=False, type='str'),
+ revision=dict(required=False, type='int'),
+ force_create=dict(required=False, default=False, type='bool'),
+ containers=dict(required=True, type='list', elements='dict'),
+ network_mode=dict(required=False, default='bridge', choices=['default', 'bridge', 'host', 'none', 'awsvpc'], type='str'),
+ task_role_arn=dict(required=False, default='', type='str'),
+ execution_role_arn=dict(required=False, default='', type='str'),
+ volumes=dict(required=False, type='list', elements='dict'),
+ launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
+ cpu=dict(),
+ memory=dict(required=False, type='str'),
+ placement_constraints=dict(required=False, type='list', elements='dict',
+ options=dict(type=dict(type='str'), expression=dict(type='str'))),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[('launch_type', 'FARGATE', ['cpu', 'memory'])]
+ )
+
+ task_to_describe = None
+ task_mgr = EcsTaskManager(module)
+ results = dict(changed=False)
+
+ if module.params['state'] == 'present':
+ if 'containers' not in module.params or not module.params['containers']:
+ module.fail_json(msg="To use task definitions, a list of containers must be specified")
+
+ if 'family' not in module.params or not module.params['family']:
+ module.fail_json(msg="To use task definitions, a family must be specified")
+
+ network_mode = module.params['network_mode']
+ launch_type = module.params['launch_type']
+ placement_constraints = module.params['placement_constraints']
+ if launch_type == 'FARGATE':
+ if network_mode != 'awsvpc':
+ module.fail_json(msg="To use FARGATE launch type, network_mode must be awsvpc")
+ if placement_constraints:
+ module.fail_json(msg="Task placement constraints are not supported for tasks run on Fargate")
+
+ for container in module.params['containers']:
+ if container.get('links') and network_mode == 'awsvpc':
+ module.fail_json(msg='links parameter is not supported if network mode is awsvpc.')
+
+ for environment in container.get('environment', []):
+ environment['value'] = environment['value']
+
+ for environment_file in container.get('environmentFiles', []):
+ if environment_file['type'] != 's3':
+ module.fail_json(msg='The only supported value for environmentFiles is s3.')
+
+ for linux_param in container.get('linuxParameters', {}):
+ if linux_param == 'maxSwap' and launch_type == 'FARGATE':
+ module.fail_json(msg='devices parameter is not supported with the FARGATE launch type.')
+
+ if linux_param == 'maxSwap' and launch_type == 'FARGATE':
+ module.fail_json(msg='maxSwap parameter is not supported with the FARGATE launch type.')
+ elif linux_param == 'maxSwap' and int(container['linuxParameters']['maxSwap']) < 0:
+ module.fail_json(msg='Accepted values for maxSwap are 0 or any positive integer.')
+
+ if (
+ linux_param == 'swappiness' and
+ (int(container['linuxParameters']['swappiness']) < 0 or int(container['linuxParameters']['swappiness']) > 100)
+ ):
+ module.fail_json(msg='Accepted values for swappiness are whole numbers between 0 and 100.')
+
+ if linux_param == 'sharedMemorySize' and launch_type == 'FARGATE':
+ module.fail_json(msg='sharedMemorySize parameter is not supported with the FARGATE launch type.')
+
+ if linux_param == 'tmpfs' and launch_type == 'FARGATE':
+ module.fail_json(msg='tmpfs parameter is not supported with the FARGATE launch type.')
+
+ if container.get('hostname') and network_mode == 'awsvpc':
+ module.fail_json(msg='hostname parameter is not supported when the awsvpc network mode is used.')
+
+ if container.get('extraHosts') and network_mode == 'awsvpc':
+ module.fail_json(msg='extraHosts parameter is not supported when the awsvpc network mode is used.')
+
+ family = module.params['family']
+ existing_definitions_in_family = task_mgr.describe_task_definitions(module.params['family'])
+
+ if 'revision' in module.params and module.params['revision']:
+ # The definition specifies revision. We must guarantee that an active revision of that number will result from this.
+ revision = int(module.params['revision'])
+
+ # A revision has been explicitly specified. Attempt to locate a matching revision
+ tasks_defs_for_revision = [td for td in existing_definitions_in_family if td['revision'] == revision]
+ existing = tasks_defs_for_revision[0] if len(tasks_defs_for_revision) > 0 else None
+
+ if existing and existing['status'] != "ACTIVE":
+ # We cannot reactivate an inactive revision
+ module.fail_json(msg="A task in family '%s' already exists for revision %d, but it is inactive" % (family, revision))
+ elif not existing:
+ if not existing_definitions_in_family and revision != 1:
+ module.fail_json(msg="You have specified a revision of %d but a created revision would be 1" % revision)
+ elif existing_definitions_in_family and existing_definitions_in_family[-1]['revision'] + 1 != revision:
+ module.fail_json(msg="You have specified a revision of %d but a created revision would be %d" %
+ (revision, existing_definitions_in_family[-1]['revision'] + 1))
+ else:
+ existing = None
+
+ def _right_has_values_of_left(left, right):
+ # Make sure the values are equivalent for everything left has
+ for k, v in left.items():
+ if not ((not v and (k not in right or not right[k])) or (k in right and v == right[k])):
+ # We don't care about list ordering because ECS can change things
+ if isinstance(v, list) and k in right:
+ left_list = v
+ right_list = right[k] or []
+
+ if len(left_list) != len(right_list):
+ return False
+
+ for list_val in left_list:
+ if list_val not in right_list:
+ # if list_val is the port mapping, the key 'protocol' may be absent (but defaults to 'tcp')
+ # fill in that default if absent and see if it is in right_list then
+ if isinstance(list_val, dict) and not list_val.get('protocol'):
+ modified_list_val = dict(list_val)
+ modified_list_val.update(protocol='tcp')
+ if modified_list_val in right_list:
+ continue
+ else:
+ return False
+
+ # Make sure right doesn't have anything that left doesn't
+ for k, v in right.items():
+ if v and k not in left:
+ # 'essential' defaults to True when not specified
+ if k == 'essential' and v is True:
+ pass
+ else:
+ return False
+
+ return True
+
+ def _task_definition_matches(requested_volumes, requested_containers, requested_task_role_arn, requested_launch_type, existing_task_definition):
+ if td['status'] != "ACTIVE":
+ return None
+
+ if requested_task_role_arn != td.get('taskRoleArn', ""):
+ return None
+
+ if requested_launch_type is not None and requested_launch_type not in td.get('requiresCompatibilities', []):
+ return None
+
+ existing_volumes = td.get('volumes', []) or []
+
+ if len(requested_volumes) != len(existing_volumes):
+ # Nope.
+ return None
+
+ if len(requested_volumes) > 0:
+ for requested_vol in requested_volumes:
+ found = False
+
+ for actual_vol in existing_volumes:
+ if _right_has_values_of_left(requested_vol, actual_vol):
+ found = True
+ break
+
+ if not found:
+ return None
+
+ existing_containers = td.get('containerDefinitions', []) or []
+
+ if len(requested_containers) != len(existing_containers):
+ # Nope.
+ return None
+
+ for requested_container in requested_containers:
+ found = False
+
+ for actual_container in existing_containers:
+ if _right_has_values_of_left(requested_container, actual_container):
+ found = True
+ break
+
+ if not found:
+ return None
+
+ return existing_task_definition
+
+ # No revision explicitly specified. Attempt to find an active, matching revision that has all the properties requested
+ for td in existing_definitions_in_family:
+ requested_volumes = module.params['volumes'] or []
+ requested_containers = module.params['containers'] or []
+ requested_task_role_arn = module.params['task_role_arn']
+ requested_launch_type = module.params['launch_type']
+ existing = _task_definition_matches(requested_volumes, requested_containers, requested_task_role_arn, requested_launch_type, td)
+
+ if existing:
+ break
+
+ if existing and not module.params.get('force_create'):
+ # Awesome. Have an existing one. Nothing to do.
+ results['taskdefinition'] = existing
+ else:
+ if not module.check_mode:
+ # Doesn't exist. create it.
+ volumes = module.params.get('volumes', []) or []
+ results['taskdefinition'] = task_mgr.register_task(module.params['family'],
+ module.params['task_role_arn'],
+ module.params['execution_role_arn'],
+ module.params['network_mode'],
+ module.params['containers'],
+ volumes,
+ module.params['launch_type'],
+ module.params['cpu'],
+ module.params['memory'],
+ module.params['placement_constraints'],)
+ results['changed'] = True
+
+ elif module.params['state'] == 'absent':
+ # When de-registering a task definition, we can specify the ARN OR the family and revision.
+ if module.params['state'] == 'absent':
+ if 'arn' in module.params and module.params['arn'] is not None:
+ task_to_describe = module.params['arn']
+ elif 'family' in module.params and module.params['family'] is not None and 'revision' in module.params and \
+ module.params['revision'] is not None:
+ task_to_describe = module.params['family'] + ":" + str(module.params['revision'])
+ else:
+ module.fail_json(msg="To use task definitions, an arn or family and revision must be specified")
+
+ existing = task_mgr.describe_task(task_to_describe)
+
+ if not existing:
+ pass
+ else:
+ # It exists, so we should delete it and mark changed. Return info about the task definition deleted
+ results['taskdefinition'] = existing
+ if 'status' in existing and existing['status'] == "INACTIVE":
+ results['changed'] = False
+ else:
+ if not module.check_mode:
+ task_mgr.deregister_task(task_to_describe)
+ results['changed'] = True
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition_info.py b/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition_info.py
new file mode 100644
index 000000000..6fbc41731
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition_info.py
@@ -0,0 +1,380 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ecs_taskdefinition_info
+version_added: 1.0.0
+short_description: Describe a task definition in ECS
+notes:
+ - For details of the parameters and returns see
+ U(http://boto3.readthedocs.io/en/latest/reference/services/ecs.html#ECS.Client.describe_task_definition)
+description:
+ - Describes a task definition in ECS.
+author:
+ - Gustavo Maia (@gurumaia)
+ - Mark Chance (@Java1Guy)
+ - Darek Kaczynski (@kaczynskid)
+options:
+ task_definition:
+ description:
+ - The name of the task definition to get details for
+ required: true
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- community.aws.ecs_taskdefinition_info:
+ task_definition: test-td
+'''
+
+RETURN = '''
+container_definitions:
+ description: Returns a list of complex objects representing the containers
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: The name of a container.
+ returned: always
+ type: str
+ image:
+ description: The image used to start a container.
+ returned: always
+ type: str
+ cpu:
+ description: The number of cpu units reserved for the container.
+ returned: always
+ type: int
+ memoryReservation:
+ description: The soft limit (in MiB) of memory to reserve for the container.
+ returned: when present
+ type: int
+ links:
+ description: Links to other containers.
+ returned: when present
+ type: str
+ portMappings:
+ description: The list of port mappings for the container.
+ returned: always
+ type: complex
+ contains:
+ containerPort:
+ description: The port number on the container.
+ returned: when present
+ type: int
+ hostPort:
+ description: The port number on the container instance to reserve for your container.
+ returned: when present
+ type: int
+ protocol:
+ description: The protocol used for the port mapping.
+ returned: when present
+ type: str
+ essential:
+ description: Whether this is an essential container or not.
+ returned: always
+ type: bool
+ entryPoint:
+ description: The entry point that is passed to the container.
+ returned: when present
+ type: str
+ command:
+ description: The command that is passed to the container.
+ returned: when present
+ type: str
+ environment:
+ description: The environment variables to pass to a container.
+ returned: always
+ type: complex
+ contains:
+ name:
+ description: The name of the environment variable.
+ returned: when present
+ type: str
+ value:
+ description: The value of the environment variable.
+ returned: when present
+ type: str
+ mountPoints:
+ description: The mount points for data volumes in your container.
+ returned: always
+ type: complex
+ contains:
+ sourceVolume:
+ description: The name of the volume to mount.
+ returned: when present
+ type: str
+ containerPath:
+ description: The path on the container to mount the host volume at.
+ returned: when present
+ type: str
+ readOnly:
+ description: If this value is true , the container has read-only access to the volume.
+ If this value is false , then the container can write to the volume.
+ returned: when present
+ type: bool
+ volumesFrom:
+ description: Data volumes to mount from another container.
+ returned: always
+ type: complex
+ contains:
+ sourceContainer:
+ description: The name of another container within the same task definition to mount volumes from.
+ returned: when present
+ type: str
+ readOnly:
+ description: If this value is true , the container has read-only access to the volume.
+ If this value is false , then the container can write to the volume.
+ returned: when present
+ type: bool
+ hostname:
+ description: The hostname to use for your container.
+ returned: when present
+ type: str
+ user:
+ description: The user name to use inside the container.
+ returned: when present
+ type: str
+ workingDirectory:
+ description: The working directory in which to run commands inside the container.
+ returned: when present
+ type: str
+ disableNetworking:
+ description: When this parameter is true, networking is disabled within the container.
+ returned: when present
+ type: bool
+ privileged:
+ description: When this parameter is true, the container is given elevated
+ privileges on the host container instance (similar to the root user).
+ returned: when present
+ type: bool
+ readonlyRootFilesystem:
+ description: When this parameter is true, the container is given read-only access to its root file system.
+ returned: when present
+ type: bool
+ dnsServers:
+ description: A list of DNS servers that are presented to the container.
+ returned: when present
+ type: str
+ dnsSearchDomains:
+ description: A list of DNS search domains that are presented to the container.
+ returned: when present
+ type: str
+ extraHosts:
+ description: A list of hostnames and IP address mappings to append to the /etc/hosts file on the container.
+ returned: when present
+ type: complex
+ contains:
+ hostname:
+ description: The hostname to use in the /etc/hosts entry.
+ returned: when present
+ type: str
+ ipAddress:
+ description: The IP address to use in the /etc/hosts entry.
+ returned: when present
+ type: str
+ dockerSecurityOptions:
+ description: A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems.
+ returned: when present
+ type: str
+ dockerLabels:
+ description: A key/value map of labels to add to the container.
+ returned: when present
+ type: str
+ ulimits:
+ description: A list of ulimits to set in the container.
+ returned: when present
+ type: complex
+ contains:
+ name:
+ description: The type of the ulimit .
+ returned: when present
+ type: str
+ softLimit:
+ description: The soft limit for the ulimit type.
+ returned: when present
+ type: int
+ hardLimit:
+ description: The hard limit for the ulimit type.
+ returned: when present
+ type: int
+ logConfiguration:
+ description: The log configuration specification for the container.
+ returned: when present
+ type: str
+ options:
+ description: The configuration options to send to the log driver.
+ returned: when present
+ type: str
+ healthCheck:
+ description: The container health check command and associated configuration parameters for the container.
+ returned: when present
+ type: dict
+ contains:
+ command:
+ description: A string array representing the command that the container runs to determine if it is healthy.
+ type: list
+ interval:
+ description: The time period in seconds between each health check execution.
+ type: int
+ timeout:
+ description: The time period in seconds to wait for a health check to succeed before it is considered a failure.
+ type: int
+ retries:
+ description: The number of times to retry a failed health check before the container is considered unhealthy.
+ type: int
+ startPeriod:
+ description: The optional grace period to provide containers time to bootstrap before failed.
+ type: int
+ resourceRequirements:
+ description: The type and amount of a resource to assign to a container.
+ returned: when present
+ type: dict
+ contains:
+ value:
+ description: The value for the specified resource type.
+ type: str
+ type:
+ description: The type of resource to assign to a container.
+ type: str
+ systemControls:
+ description: A list of namespaced kernel parameters to set in the container.
+ returned: when present
+ type: dict
+ contains:
+ namespace:
+ description: TThe namespaced kernel.
+ type: str
+ value:
+ description: The value for the namespaced kernel.
+ type: str
+ firelensConfiguration:
+ description: The FireLens configuration for the container.
+ returned: when present
+ type: dict
+ contains:
+ type:
+ description: The log router.
+ type: str
+ options:
+ description: The options to use when configuring the log router.
+ type: dict
+family:
+ description: The family of your task definition, used as the definition name
+ returned: always
+ type: str
+task_definition_arn:
+ description: ARN of the task definition
+ returned: always
+ type: str
+task_role_arn:
+ description: The ARN of the IAM role that containers in this task can assume
+ returned: when role is set
+ type: str
+network_mode:
+ description: Network mode for the containers
+ returned: always
+ type: str
+revision:
+ description: Revision number that was queried
+ returned: always
+ type: int
+volumes:
+ description: The list of volumes in a task
+ returned: always
+ type: complex
+ contains:
+ name:
+ description: The name of the volume.
+ returned: when present
+ type: str
+ host:
+ description: The contents of the host parameter determine whether your data volume
+ persists on the host container instance and where it is stored.
+ returned: when present
+ type: bool
+ source_path:
+ description: The path on the host container instance that is presented to the container.
+ returned: when present
+ type: str
+status:
+ description: The status of the task definition
+ returned: always
+ type: str
+requires_attributes:
+ description: The container instance attributes required by your task
+ returned: when present
+ type: complex
+ contains:
+ name:
+ description: The name of the attribute.
+ returned: when present
+ type: str
+ value:
+ description: The value of the attribute.
+ returned: when present
+ type: str
+ targetType:
+ description: The type of the target with which to attach the attribute.
+ returned: when present
+ type: str
+ targetId:
+ description: The ID of the target.
+ returned: when present
+ type: str
+placement_constraints:
+ description: A list of placement constraint objects to use for tasks
+ returned: always
+ type: complex
+ contains:
+ type:
+ description: The type of constraint.
+ returned: when present
+ type: str
+ expression:
+ description: A cluster query language expression to apply to the constraint.
+ returned: when present
+ type: str
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def main():
+ argument_spec = dict(
+ task_definition=dict(required=True, type='str')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ ecs = module.client('ecs')
+
+ try:
+ ecs_td = ecs.describe_task_definition(taskDefinition=module.params['task_definition'])['taskDefinition']
+ except botocore.exceptions.ClientError:
+ ecs_td = {}
+
+ module.exit_json(changed=False, **camel_dict_to_snake_dict(ecs_td))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/efs.py b/ansible_collections/community/aws/plugins/modules/efs.py
new file mode 100644
index 000000000..de1d563fb
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/efs.py
@@ -0,0 +1,786 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: efs
+version_added: 1.0.0
+short_description: create and maintain EFS file systems
+description:
+ - Module allows create, search and destroy Amazon EFS file systems.
+author:
+ - "Ryan Sydnor (@ryansydnor)"
+ - "Artem Kazakov (@akazakov)"
+options:
+ encrypt:
+ description:
+ - If I(encrypt=true) creates an encrypted file system. This can not be modified after the file system is created.
+ type: bool
+ default: false
+ kms_key_id:
+ description:
+ - The id of the AWS KMS CMK that will be used to protect the encrypted file system. This parameter is only
+ required if you want to use a non-default CMK. If this parameter is not specified, the default CMK for
+ Amazon EFS is used. The key id can be Key ID, Key ID ARN, Key Alias or Key Alias ARN.
+ type: str
+ state:
+ description:
+ - Allows to create, search and destroy Amazon EFS file system.
+ default: 'present'
+ choices: ['present', 'absent']
+ type: str
+ name:
+ description:
+ - Creation Token of Amazon EFS file system. Required for create and update. Either name or ID required for delete.
+ type: str
+ id:
+ description:
+ - ID of Amazon EFS. Either name or ID required for delete.
+ type: str
+ performance_mode:
+ description:
+ - File system's performance mode to use. Only takes effect during creation.
+ default: 'general_purpose'
+ choices: ['general_purpose', 'max_io']
+ type: str
+ tags:
+ description:
+ - "List of tags of Amazon EFS. Should be defined as dictionary
+ In case of 'present' state with list of tags and existing EFS (matched by 'name'), tags of EFS will be replaced with provided data."
+ type: dict
+ targets:
+ description:
+ - "List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes:
+ This data may be modified for existing EFS using state 'present' and new list of mount targets."
+ type: list
+ elements: dict
+ default: []
+ suboptions:
+ subnet_id:
+ required: true
+ description: The ID of the subnet to add the mount target in.
+ ip_address:
+ type: str
+ description: A valid IPv4 address within the address range of the specified subnet.
+ security_groups:
+ type: list
+ elements: str
+ description: List of security group IDs, of the form 'sg-xxxxxxxx'. These must be for the same VPC as subnet specified
+ throughput_mode:
+ description:
+ - The throughput_mode for the file system to be created.
+ choices: ['bursting', 'provisioned']
+ type: str
+ provisioned_throughput_in_mibps:
+ description:
+ - If the throughput_mode is provisioned, select the amount of throughput to provisioned in Mibps.
+ type: float
+ wait:
+ description:
+ - "In case of 'present' state should wait for EFS 'available' life cycle state (of course, if current state not 'deleting' or 'deleted')
+ In case of 'absent' state should wait for EFS 'deleted' life cycle state"
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - How long the module should wait (in seconds) for desired state before returning. Zero means wait as long as necessary.
+ default: 0
+ type: int
+ transition_to_ia:
+ description:
+ - How many days before objects transition to the lower-cost EFS Infrequent Access (IA) storage class.
+ - If set to the string C(None), any existing lifecyle policy will be removed, and objects will not transition
+ to an IA storage class.
+ - If this parameter is absent, any existing lifecycle policy will not be affected.
+ choices: ['None', '7', '14', '30', '60', '90']
+ type: str
+ version_added: 2.1.0
+
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+
+'''
+
+EXAMPLES = r'''
+- name: EFS provisioning
+ community.aws.efs:
+ state: present
+ name: myTestEFS
+ tags:
+ Name: myTestNameTag
+ purpose: file-storage
+ targets:
+ - subnet_id: subnet-748c5d03
+ security_groups: [ "sg-1a2b3c4d" ]
+
+- name: Modifying EFS data
+ community.aws.efs:
+ state: present
+ name: myTestEFS
+ tags:
+ name: myAnotherTestTag
+ targets:
+ - subnet_id: subnet-7654fdca
+ security_groups: [ "sg-4c5d6f7a" ]
+
+- name: Set a lifecycle policy
+ community.aws.efs:
+ state: present
+ name: myTestEFS
+ transition_to_ia: 7
+ targets:
+ - subnet_id: subnet-7654fdca
+ security_groups: [ "sg-4c5d6f7a" ]
+
+- name: Remove a lifecycle policy
+ community.aws.efs:
+ state: present
+ name: myTestEFS
+ transition_to_ia: None
+ targets:
+ - subnet_id: subnet-7654fdca
+ security_groups: [ "sg-4c5d6f7a" ]
+
+- name: Deleting EFS
+ community.aws.efs:
+ state: absent
+ name: myTestEFS
+'''
+
+RETURN = r'''
+creation_time:
+ description: timestamp of creation date
+ returned: always
+ type: str
+ sample: "2015-11-16 07:30:57-05:00"
+creation_token:
+ description: EFS creation token
+ returned: always
+ type: str
+ sample: "console-88609e04-9a0e-4a2e-912c-feaa99509961"
+file_system_id:
+ description: ID of the file system
+ returned: always
+ type: str
+ sample: "fs-xxxxxxxx"
+life_cycle_state:
+ description: state of the EFS file system
+ returned: always
+ type: str
+ sample: "creating, available, deleting, deleted"
+mount_point:
+ description: url of file system with leading dot from the time when AWS EFS required to add a region suffix to the address
+ returned: always
+ type: str
+ sample: ".fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/"
+filesystem_address:
+ description: url of file system valid for use with mount
+ returned: always
+ type: str
+ sample: "fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/"
+mount_targets:
+ description: list of mount targets
+ returned: always
+ type: list
+ sample:
+ [
+ {
+ "file_system_id": "fs-a7ad440e",
+ "ip_address": "172.31.17.173",
+ "life_cycle_state": "available",
+ "mount_target_id": "fsmt-d8907871",
+ "network_interface_id": "eni-6e387e26",
+ "owner_id": "123456789012",
+ "security_groups": [
+ "sg-a30b22c6"
+ ],
+ "subnet_id": "subnet-e265c895"
+ },
+ ...
+ ]
+name:
+ description: name of the file system
+ returned: always
+ type: str
+ sample: "my-efs"
+number_of_mount_targets:
+ description: the number of targets mounted
+ returned: always
+ type: int
+ sample: 3
+owner_id:
+ description: AWS account ID of EFS owner
+ returned: always
+ type: str
+ sample: "XXXXXXXXXXXX"
+size_in_bytes:
+ description: size of the file system in bytes as of a timestamp
+ returned: always
+ type: dict
+ sample:
+ {
+ "timestamp": "2015-12-21 13:59:59-05:00",
+ "value": 12288
+ }
+performance_mode:
+ description: performance mode of the file system
+ returned: always
+ type: str
+ sample: "generalPurpose"
+tags:
+ description: tags on the efs instance
+ returned: always
+ type: dict
+ sample:
+ {
+ "name": "my-efs",
+ "key": "Value"
+ }
+
+'''
+
+from time import sleep
+from time import time as timestamp
+
+try:
+ import botocore
+except ImportError as e:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+
+
+def _index_by_key(key, items):
+ return dict((item[key], item) for item in items)
+
+
+class EFSConnection(object):
+
+ DEFAULT_WAIT_TIMEOUT_SECONDS = 0
+
+ STATE_CREATING = 'creating'
+ STATE_AVAILABLE = 'available'
+ STATE_DELETING = 'deleting'
+ STATE_DELETED = 'deleted'
+
+ def __init__(self, module):
+ self.connection = module.client('efs')
+ region = module.region
+
+ self.module = module
+ self.region = region
+ self.wait = module.params.get('wait')
+ self.wait_timeout = module.params.get('wait_timeout')
+
+ def get_file_systems(self, **kwargs):
+ """
+ Returns generator of file systems including all attributes of FS
+ """
+ items = iterate_all(
+ 'FileSystems',
+ self.connection.describe_file_systems,
+ **kwargs
+ )
+ for item in items:
+ item['Name'] = item['CreationToken']
+ item['CreationTime'] = str(item['CreationTime'])
+ """
+ In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it
+ AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose
+ And new FilesystemAddress variable is introduced for direct use with other modules (e.g. mount)
+ AWS documentation is available here:
+ https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html
+ """
+ item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
+ item['FilesystemAddress'] = '%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
+ if 'Timestamp' in item['SizeInBytes']:
+ item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
+ if item['LifeCycleState'] == self.STATE_AVAILABLE:
+ item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId'])
+ item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId']))
+ else:
+ item['Tags'] = {}
+ item['MountTargets'] = []
+ yield item
+
+ def get_tags(self, **kwargs):
+ """
+ Returns tag list for selected instance of EFS
+ """
+ tags = self.connection.describe_tags(**kwargs)['Tags']
+ return tags
+
+ def get_mount_targets(self, **kwargs):
+ """
+ Returns mount targets for selected instance of EFS
+ """
+ targets = iterate_all(
+ 'MountTargets',
+ self.connection.describe_mount_targets,
+ **kwargs
+ )
+ for target in targets:
+ if target['LifeCycleState'] == self.STATE_AVAILABLE:
+ target['SecurityGroups'] = list(self.get_security_groups(
+ MountTargetId=target['MountTargetId']
+ ))
+ else:
+ target['SecurityGroups'] = []
+ yield target
+
+ def get_security_groups(self, **kwargs):
+ """
+ Returns security groups for selected instance of EFS
+ """
+ return iterate_all(
+ 'SecurityGroups',
+ self.connection.describe_mount_target_security_groups,
+ **kwargs
+ )
+
+ def get_file_system_id(self, name):
+ """
+ Returns ID of instance by instance name
+ """
+ info = first_or_default(iterate_all(
+ 'FileSystems',
+ self.connection.describe_file_systems,
+ CreationToken=name
+ ))
+ return info and info['FileSystemId'] or None
+
+ def get_file_system_state(self, name, file_system_id=None):
+ """
+ Returns state of filesystem by EFS id/name
+ """
+ info = first_or_default(iterate_all(
+ 'FileSystems',
+ self.connection.describe_file_systems,
+ CreationToken=name,
+ FileSystemId=file_system_id
+ ))
+ return info and info['LifeCycleState'] or self.STATE_DELETED
+
+ def get_mount_targets_in_state(self, file_system_id, states=None):
+ """
+ Returns states of mount targets of selected EFS with selected state(s) (optional)
+ """
+ targets = iterate_all(
+ 'MountTargets',
+ self.connection.describe_mount_targets,
+ FileSystemId=file_system_id
+ )
+
+ if states:
+ if not isinstance(states, list):
+ states = [states]
+ targets = filter(lambda target: target['LifeCycleState'] in states, targets)
+
+ return list(targets)
+
+ def get_throughput_mode(self, **kwargs):
+ """
+ Returns throughput mode for selected EFS instance
+ """
+ info = first_or_default(iterate_all(
+ 'FileSystems',
+ self.connection.describe_file_systems,
+ **kwargs
+ ))
+
+ return info and info['ThroughputMode'] or None
+
+ def get_provisioned_throughput_in_mibps(self, **kwargs):
+ """
+ Returns throughput mode for selected EFS instance
+ """
+ info = first_or_default(iterate_all(
+ 'FileSystems',
+ self.connection.describe_file_systems,
+ **kwargs
+ ))
+ return info.get('ProvisionedThroughputInMibps', None)
+
+ def create_file_system(self, name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps):
+ """
+ Creates new filesystem with selected name
+ """
+ changed = False
+ state = self.get_file_system_state(name)
+ params = {}
+ params['CreationToken'] = name
+ params['PerformanceMode'] = performance_mode
+ if encrypt:
+ params['Encrypted'] = encrypt
+ if kms_key_id is not None:
+ params['KmsKeyId'] = kms_key_id
+ if throughput_mode:
+ params['ThroughputMode'] = throughput_mode
+ if provisioned_throughput_in_mibps:
+ params['ProvisionedThroughputInMibps'] = provisioned_throughput_in_mibps
+
+ if state in [self.STATE_DELETING, self.STATE_DELETED]:
+ wait_for(
+ lambda: self.get_file_system_state(name),
+ self.STATE_DELETED
+ )
+ try:
+ self.connection.create_file_system(**params)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Unable to create file system.")
+
+ # we always wait for the state to be available when creating.
+ # if we try to take any actions on the file system before it's available
+ # we'll throw errors
+ wait_for(
+ lambda: self.get_file_system_state(name),
+ self.STATE_AVAILABLE,
+ self.wait_timeout
+ )
+
+ return changed
+
+ def update_file_system(self, name, throughput_mode, provisioned_throughput_in_mibps):
+ """
+ Update filesystem with new throughput settings
+ """
+ changed = False
+ state = self.get_file_system_state(name)
+ if state in [self.STATE_AVAILABLE, self.STATE_CREATING]:
+ fs_id = self.get_file_system_id(name)
+ current_mode = self.get_throughput_mode(FileSystemId=fs_id)
+ current_throughput = self.get_provisioned_throughput_in_mibps(FileSystemId=fs_id)
+ params = dict()
+ if throughput_mode and throughput_mode != current_mode:
+ params['ThroughputMode'] = throughput_mode
+ if provisioned_throughput_in_mibps and provisioned_throughput_in_mibps != current_throughput:
+ params['ProvisionedThroughputInMibps'] = provisioned_throughput_in_mibps
+ if len(params) > 0:
+ wait_for(
+ lambda: self.get_file_system_state(name),
+ self.STATE_AVAILABLE,
+ self.wait_timeout
+ )
+ try:
+ self.connection.update_file_system(FileSystemId=fs_id, **params)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Unable to update file system.")
+ return changed
+
+ def update_lifecycle_policy(self, name, transition_to_ia):
+ """
+ Update filesystem with new lifecycle policy.
+ """
+ changed = False
+ state = self.get_file_system_state(name)
+ if state in [self.STATE_AVAILABLE, self.STATE_CREATING]:
+ fs_id = self.get_file_system_id(name)
+ current_policies = self.connection.describe_lifecycle_configuration(FileSystemId=fs_id)
+ if transition_to_ia == 'None':
+ LifecyclePolicies = []
+ else:
+ LifecyclePolicies = [{'TransitionToIA': 'AFTER_' + transition_to_ia + '_DAYS'}]
+ if current_policies.get('LifecyclePolicies') != LifecyclePolicies:
+ response = self.connection.put_lifecycle_configuration(
+ FileSystemId=fs_id,
+ LifecyclePolicies=LifecyclePolicies,
+ )
+ changed = True
+ return changed
+
+ def converge_file_system(self, name, tags, purge_tags, targets, throughput_mode, provisioned_throughput_in_mibps):
+ """
+ Change attributes (mount targets and tags) of filesystem by name
+ """
+ result = False
+ fs_id = self.get_file_system_id(name)
+
+ if tags is not None:
+ tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(self.get_tags(FileSystemId=fs_id)), tags, purge_tags)
+
+ if tags_to_delete:
+ try:
+ self.connection.delete_tags(
+ FileSystemId=fs_id,
+ TagKeys=tags_to_delete
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Unable to delete tags.")
+
+ result = True
+
+ if tags_need_modify:
+ try:
+ self.connection.create_tags(
+ FileSystemId=fs_id,
+ Tags=ansible_dict_to_boto3_tag_list(tags_need_modify)
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Unable to create tags.")
+
+ result = True
+
+ if targets is not None:
+ incomplete_states = [self.STATE_CREATING, self.STATE_DELETING]
+ wait_for(
+ lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
+ 0
+ )
+ current_targets = _index_by_key('SubnetId', self.get_mount_targets(FileSystemId=fs_id))
+ targets = _index_by_key('SubnetId', targets)
+
+ targets_to_create, intersection, targets_to_delete = dict_diff(current_targets,
+ targets, True)
+
+ # To modify mount target it should be deleted and created again
+ changed = [sid for sid in intersection if not targets_equal(['SubnetId', 'IpAddress', 'NetworkInterfaceId'],
+ current_targets[sid], targets[sid])]
+ targets_to_delete = list(targets_to_delete) + changed
+ targets_to_create = list(targets_to_create) + changed
+
+ if targets_to_delete:
+ for sid in targets_to_delete:
+ self.connection.delete_mount_target(
+ MountTargetId=current_targets[sid]['MountTargetId']
+ )
+ wait_for(
+ lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
+ 0
+ )
+ result = True
+
+ if targets_to_create:
+ for sid in targets_to_create:
+ self.connection.create_mount_target(
+ FileSystemId=fs_id,
+ **targets[sid]
+ )
+ wait_for(
+ lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
+ 0,
+ self.wait_timeout
+ )
+ result = True
+
+ # If no security groups were passed into the module, then do not change it.
+ security_groups_to_update = [sid for sid in intersection if
+ 'SecurityGroups' in targets[sid] and
+ current_targets[sid]['SecurityGroups'] != targets[sid]['SecurityGroups']]
+
+ if security_groups_to_update:
+ for sid in security_groups_to_update:
+ self.connection.modify_mount_target_security_groups(
+ MountTargetId=current_targets[sid]['MountTargetId'],
+ SecurityGroups=targets[sid].get('SecurityGroups', None)
+ )
+ result = True
+
+ return result
+
+ def delete_file_system(self, name, file_system_id=None):
+ """
+ Removes EFS instance by id/name
+ """
+ result = False
+ state = self.get_file_system_state(name, file_system_id)
+ if state in [self.STATE_CREATING, self.STATE_AVAILABLE]:
+ wait_for(
+ lambda: self.get_file_system_state(name),
+ self.STATE_AVAILABLE
+ )
+ if not file_system_id:
+ file_system_id = self.get_file_system_id(name)
+ self.delete_mount_targets(file_system_id)
+ self.connection.delete_file_system(FileSystemId=file_system_id)
+ result = True
+
+ if self.wait:
+ wait_for(
+ lambda: self.get_file_system_state(name),
+ self.STATE_DELETED,
+ self.wait_timeout
+ )
+
+ return result
+
+ def delete_mount_targets(self, file_system_id):
+ """
+ Removes mount targets by EFS id
+ """
+ wait_for(
+ lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_CREATING)),
+ 0
+ )
+
+ targets = self.get_mount_targets_in_state(file_system_id, self.STATE_AVAILABLE)
+ for target in targets:
+ self.connection.delete_mount_target(MountTargetId=target['MountTargetId'])
+
+ wait_for(
+ lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_DELETING)),
+ 0
+ )
+
+ return len(targets) > 0
+
+
+def iterate_all(attr, map_method, **kwargs):
+ """
+ Method creates iterator from result set
+ """
+ args = dict((key, value) for (key, value) in kwargs.items() if value is not None)
+ wait = 1
+ while True:
+ try:
+ data = map_method(**args)
+ for elm in data[attr]:
+ yield elm
+ if 'NextMarker' in data:
+ args['Marker'] = data['Nextmarker']
+ continue
+ break
+ except is_boto3_error_code('ThrottlingException'):
+ if wait < 600:
+ sleep(wait)
+ wait = wait * 2
+ continue
+ else:
+ raise
+
+
+def targets_equal(keys, a, b):
+ """
+ Method compare two mount targets by specified attributes
+ """
+ for key in keys:
+ if key in b and a[key] != b[key]:
+ return False
+
+ return True
+
+
+def dict_diff(dict1, dict2, by_key=False):
+ """
+ Helper method to calculate difference of two dictionaries
+ """
+ keys1 = set(dict1.keys() if by_key else dict1.items())
+ keys2 = set(dict2.keys() if by_key else dict2.items())
+
+ intersection = keys1 & keys2
+
+ return keys2 ^ intersection, intersection, keys1 ^ intersection
+
+
+def first_or_default(items, default=None):
+ """
+ Helper method to fetch first element of list (if exists)
+ """
+ for item in items:
+ return item
+ return default
+
+
+def wait_for(callback, value, timeout=EFSConnection.DEFAULT_WAIT_TIMEOUT_SECONDS):
+ """
+ Helper method to wait for desired value returned by callback method
+ """
+ wait_start = timestamp()
+ while True:
+ if callback() != value:
+ if timeout != 0 and (timestamp() - wait_start > timeout):
+ raise RuntimeError('Wait timeout exceeded (' + str(timeout) + ' sec)')
+ else:
+ sleep(5)
+ continue
+ break
+
+
+def main():
+ """
+ Module action handler
+ """
+ argument_spec = dict(
+ encrypt=dict(required=False, type="bool", default=False),
+ state=dict(required=False, type='str', choices=["present", "absent"], default="present"),
+ kms_key_id=dict(required=False, type='str', default=None),
+ purge_tags=dict(default=True, type='bool'),
+ id=dict(required=False, type='str', default=None),
+ name=dict(required=False, type='str', default=None),
+ tags=dict(required=False, type="dict", aliases=['resource_tags']),
+ targets=dict(required=False, type="list", default=[], elements='dict'),
+ performance_mode=dict(required=False, type='str', choices=["general_purpose", "max_io"], default="general_purpose"),
+ transition_to_ia=dict(required=False, type='str', choices=["None", "7", "14", "30", "60", "90"], default=None),
+ throughput_mode=dict(required=False, type='str', choices=["bursting", "provisioned"], default=None),
+ provisioned_throughput_in_mibps=dict(required=False, type='float'),
+ wait=dict(required=False, type="bool", default=False),
+ wait_timeout=dict(required=False, type="int", default=0)
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ connection = EFSConnection(module)
+
+ name = module.params.get('name')
+ fs_id = module.params.get('id')
+ tags = module.params.get('tags')
+ target_translations = {
+ 'ip_address': 'IpAddress',
+ 'security_groups': 'SecurityGroups',
+ 'subnet_id': 'SubnetId'
+ }
+ targets = [dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get('targets')]
+ performance_mode_translations = {
+ 'general_purpose': 'generalPurpose',
+ 'max_io': 'maxIO'
+ }
+ encrypt = module.params.get('encrypt')
+ kms_key_id = module.params.get('kms_key_id')
+ performance_mode = performance_mode_translations[module.params.get('performance_mode')]
+ purge_tags = module.params.get('purge_tags')
+ transition_to_ia = module.params.get('transition_to_ia')
+ throughput_mode = module.params.get('throughput_mode')
+ provisioned_throughput_in_mibps = module.params.get('provisioned_throughput_in_mibps')
+ state = str(module.params.get('state')).lower()
+ changed = False
+
+ if state == 'present':
+ if not name:
+ module.fail_json(msg='Name parameter is required for create')
+
+ changed = connection.create_file_system(name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps)
+ changed = connection.update_file_system(name, throughput_mode, provisioned_throughput_in_mibps) or changed
+ changed = connection.converge_file_system(name=name, tags=tags, purge_tags=purge_tags, targets=targets,
+ throughput_mode=throughput_mode, provisioned_throughput_in_mibps=provisioned_throughput_in_mibps) or changed
+ if transition_to_ia:
+ changed |= connection.update_lifecycle_policy(name, transition_to_ia)
+ result = first_or_default(connection.get_file_systems(CreationToken=name))
+
+ elif state == 'absent':
+ if not name and not fs_id:
+ module.fail_json(msg='Either name or id parameter is required for delete')
+
+ changed = connection.delete_file_system(name, fs_id)
+ result = None
+ if result:
+ result = camel_dict_to_snake_dict(result)
+ module.exit_json(changed=changed, efs=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/efs_info.py b/ansible_collections/community/aws/plugins/modules/efs_info.py
new file mode 100644
index 000000000..5ef436f3c
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/efs_info.py
@@ -0,0 +1,392 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: efs_info
+version_added: 1.0.0
+short_description: Get information about Amazon EFS file systems
+description:
+ - This module can be used to search Amazon EFS file systems.
+ Note that the M(community.aws.efs_info) module no longer returns C(ansible_facts)!
+author:
+ - "Ryan Sydnor (@ryansydnor)"
+options:
+ name:
+ description:
+ - Creation Token of Amazon EFS file system.
+ aliases: [ creation_token ]
+ type: str
+ id:
+ description:
+ - ID of Amazon EFS.
+ type: str
+ tags:
+ description:
+ - List of tags of Amazon EFS. Should be defined as dictionary.
+ type: dict
+ default: {}
+ targets:
+ description:
+ - List of targets on which to filter the returned results.
+ - Result must match all of the specified targets, each of which can be a security group ID, a subnet ID or an IP address.
+ type: list
+ elements: str
+ default: []
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = r'''
+- name: Find all existing efs
+ community.aws.efs_info:
+ register: result
+
+- name: Find efs using id
+ community.aws.efs_info:
+ id: fs-1234abcd
+ register: result
+
+- name: Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a'
+ community.aws.efs_info:
+ tags:
+ Name: myTestNameTag
+ targets:
+ - subnet-1a2b3c4d
+ - sg-4d3c2b1a
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result['efs'] }}"
+'''
+
+RETURN = r'''
+creation_time:
+ description: timestamp of creation date
+ returned: always
+ type: str
+ sample: "2015-11-16 07:30:57-05:00"
+creation_token:
+ description: EFS creation token
+ returned: always
+ type: str
+ sample: console-88609e04-9a0e-4a2e-912c-feaa99509961
+file_system_id:
+ description: ID of the file system
+ returned: always
+ type: str
+ sample: fs-xxxxxxxx
+life_cycle_state:
+ description: state of the EFS file system
+ returned: always
+ type: str
+ sample: creating, available, deleting, deleted
+mount_point:
+ description: url of file system with leading dot from the time AWS EFS required to add network suffix to EFS address
+ returned: always
+ type: str
+ sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
+filesystem_address:
+ description: url of file system
+ returned: always
+ type: str
+ sample: fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
+mount_targets:
+ description: list of mount targets
+ returned: always
+ type: list
+ sample:
+ [
+ {
+ "file_system_id": "fs-a7ad440e",
+ "ip_address": "172.31.17.173",
+ "life_cycle_state": "available",
+ "mount_target_id": "fsmt-d8907871",
+ "network_interface_id": "eni-6e387e26",
+ "owner_id": "123456789012",
+ "security_groups": [
+ "sg-a30b22c6"
+ ],
+ "subnet_id": "subnet-e265c895"
+ },
+ ...
+ ]
+name:
+ description: name of the file system
+ returned: always
+ type: str
+ sample: my-efs
+number_of_mount_targets:
+ description: the number of targets mounted
+ returned: always
+ type: int
+ sample: 3
+owner_id:
+ description: AWS account ID of EFS owner
+ returned: always
+ type: str
+ sample: XXXXXXXXXXXX
+size_in_bytes:
+ description: size of the file system in bytes as of a timestamp
+ returned: always
+ type: dict
+ sample:
+ {
+ "timestamp": "2015-12-21 13:59:59-05:00",
+ "value": 12288
+ }
+performance_mode:
+ description: performance mode of the file system
+ returned: always
+ type: str
+ sample: "generalPurpose"
+throughput_mode:
+ description: mode of throughput for the file system
+ returned: always
+ type: str
+ sample: "bursting"
+provisioned_throughput_in_mibps:
+ description: throughput provisioned in Mibps
+ returned: when throughput_mode is set to "provisioned"
+ type: float
+ sample: 15.0
+tags:
+ description: tags on the efs instance
+ returned: always
+ type: dict
+ sample:
+ {
+ "name": "my-efs",
+ "key": "Value"
+ }
+
+'''
+
+
+from collections import defaultdict
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+class EFSConnection(object):
+ STATE_CREATING = 'creating'
+ STATE_AVAILABLE = 'available'
+ STATE_DELETING = 'deleting'
+ STATE_DELETED = 'deleted'
+
+ def __init__(self, module):
+ try:
+ self.connection = module.client('efs')
+ self.module = module
+ except Exception as e:
+ module.fail_json(msg="Failed to connect to AWS: %s" % to_native(e))
+
+ self.region = module.region
+
+ @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
+ def list_file_systems(self, **kwargs):
+ """
+ Returns generator of file systems including all attributes of FS
+ """
+ paginator = self.connection.get_paginator('describe_file_systems')
+ return paginator.paginate(**kwargs).build_full_result()['FileSystems']
+
+ @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
+ def get_tags(self, file_system_id):
+ """
+ Returns tag list for selected instance of EFS
+ """
+ paginator = self.connection.get_paginator('describe_tags')
+ return boto3_tag_list_to_ansible_dict(paginator.paginate(FileSystemId=file_system_id).build_full_result()['Tags'])
+
+ @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
+ def get_mount_targets(self, file_system_id):
+ """
+ Returns mount targets for selected instance of EFS
+ """
+ paginator = self.connection.get_paginator('describe_mount_targets')
+ return paginator.paginate(FileSystemId=file_system_id).build_full_result()['MountTargets']
+
+ @AWSRetry.jittered_backoff(catch_extra_error_codes=['ThrottlingException'])
+ def get_security_groups(self, mount_target_id):
+ """
+ Returns security groups for selected instance of EFS
+ """
+ return self.connection.describe_mount_target_security_groups(MountTargetId=mount_target_id)['SecurityGroups']
+
+ def get_mount_targets_data(self, file_systems):
+ for item in file_systems:
+ if item['life_cycle_state'] == self.STATE_AVAILABLE:
+ try:
+ mount_targets = self.get_mount_targets(item['file_system_id'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't get EFS targets")
+ for mt in mount_targets:
+ item['mount_targets'].append(camel_dict_to_snake_dict(mt))
+ return file_systems
+
+ def get_security_groups_data(self, file_systems):
+ for item in file_systems:
+ if item['life_cycle_state'] == self.STATE_AVAILABLE:
+ for target in item['mount_targets']:
+ if target['life_cycle_state'] == self.STATE_AVAILABLE:
+ try:
+ target['security_groups'] = self.get_security_groups(target['mount_target_id'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't get EFS security groups")
+ else:
+ target['security_groups'] = []
+ else:
+ item['tags'] = {}
+ item['mount_targets'] = []
+ return file_systems
+
+ def get_file_systems(self, file_system_id=None, creation_token=None):
+ kwargs = dict()
+ if file_system_id:
+ kwargs['FileSystemId'] = file_system_id
+ if creation_token:
+ kwargs['CreationToken'] = creation_token
+ try:
+ file_systems = self.list_file_systems(**kwargs)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't get EFS file systems")
+
+ results = list()
+ for item in file_systems:
+ item['CreationTime'] = str(item['CreationTime'])
+ """
+ In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it
+ AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose
+ And new FilesystemAddress variable is introduced for direct use with other modules (e.g. mount)
+ AWS documentation is available here:
+ U(https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html)
+ """
+ item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
+ item['FilesystemAddress'] = '%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
+
+ if 'Timestamp' in item['SizeInBytes']:
+ item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
+ result = camel_dict_to_snake_dict(item)
+ result['tags'] = {}
+ result['mount_targets'] = []
+ # Set tags *after* doing camel to snake
+ if result['life_cycle_state'] == self.STATE_AVAILABLE:
+ try:
+ result['tags'] = self.get_tags(result['file_system_id'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't get EFS tags")
+ results.append(result)
+ return results
+
+
+def prefix_to_attr(attr_id):
+ """
+ Helper method to convert ID prefix to mount target attribute
+ """
+ attr_by_prefix = {
+ 'fsmt-': 'mount_target_id',
+ 'subnet-': 'subnet_id',
+ 'eni-': 'network_interface_id',
+ 'sg-': 'security_groups'
+ }
+ return first_or_default([attr_name for (prefix, attr_name) in attr_by_prefix.items()
+ if str(attr_id).startswith(prefix)], 'ip_address')
+
+
+def first_or_default(items, default=None):
+ """
+ Helper method to fetch first element of list (if exists)
+ """
+ for item in items:
+ return item
+ return default
+
+
+def has_tags(available, required):
+ """
+ Helper method to determine if tag requested already exists
+ """
+ for key, value in required.items():
+ if key not in available or value != available[key]:
+ return False
+ return True
+
+
+def has_targets(available, required):
+ """
+ Helper method to determine if mount target requested already exists
+ """
+ grouped = group_list_of_dict(available)
+ for (value, field) in required:
+ if field not in grouped or value not in grouped[field]:
+ return False
+ return True
+
+
+def group_list_of_dict(array):
+ """
+ Helper method to group list of dict to dict with all possible values
+ """
+ result = defaultdict(list)
+ for item in array:
+ for key, value in item.items():
+ result[key] += value if isinstance(value, list) else [value]
+ return result
+
+
+def main():
+ """
+ Module action handler
+ """
+ argument_spec = dict(
+ id=dict(),
+ name=dict(aliases=['creation_token']),
+ tags=dict(type="dict", default={}),
+ targets=dict(type="list", default=[], elements='str')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ connection = EFSConnection(module)
+
+ name = module.params.get('name')
+ fs_id = module.params.get('id')
+ tags = module.params.get('tags')
+ targets = module.params.get('targets')
+
+ file_systems_info = connection.get_file_systems(fs_id, name)
+
+ if tags:
+ file_systems_info = [item for item in file_systems_info if has_tags(item['tags'], tags)]
+
+ file_systems_info = connection.get_mount_targets_data(file_systems_info)
+ file_systems_info = connection.get_security_groups_data(file_systems_info)
+
+ if targets:
+ targets = [(item, prefix_to_attr(item)) for item in targets]
+ file_systems_info = [item for item in file_systems_info if has_targets(item['mount_targets'], targets)]
+
+ module.exit_json(changed=False, efs=file_systems_info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/efs_tag.py b/ansible_collections/community/aws/plugins/modules/efs_tag.py
new file mode 100644
index 000000000..1529fa944
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/efs_tag.py
@@ -0,0 +1,183 @@
+#!/usr/bin/python
+"""
+Copyright: (c) 2021, Milan Zink <zeten30@gmail.com>
+GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: efs_tag
+version_added: 2.0.0
+short_description: create and remove tags on Amazon EFS resources
+description:
+ - Creates and removes tags for Amazon EFS resources.
+ - Resources are referenced by their ID (filesystem or filesystem access point).
+author:
+ - Milan Zink (@zeten30)
+options:
+ resource:
+ description:
+ - EFS Filesystem ID or EFS Filesystem Access Point ID.
+ type: str
+ required: True
+ state:
+ description:
+ - Whether the tags should be present or absent on the resource.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ tags:
+ description:
+ - A dictionary of tags to add or remove from the resource.
+ - If the value provided for a tag is null and I(state=absent), the tag will be removed regardless of its current value.
+ type: dict
+ required: True
+ aliases: ['resource_tags']
+ purge_tags:
+ description:
+ - Whether unspecified tags should be removed from the resource.
+ - Note that when combined with I(state=absent), specified tags with non-matching values are not purged.
+ type: bool
+ default: false
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure tags are present on a resource
+ community.aws.efs_tag:
+ resource: fs-123456ab
+ state: present
+ tags:
+ Name: MyEFS
+ Env: Production
+
+- name: Remove the Env tag if it's currently 'development'
+ community.aws.efs_tag:
+ resource: fsap-78945ff
+ state: absent
+ tags:
+ Env: development
+
+- name: Remove all tags except for Name
+ community.aws.efs_tag:
+ resource: fsap-78945ff
+ state: absent
+ tags:
+ Name: foo
+ purge_tags: true
+
+- name: Remove all tags
+ community.aws.efs_tag:
+ resource: fsap-78945ff
+ state: absent
+ tags: {}
+ purge_tags: true
+'''
+
+RETURN = r'''
+tags:
+ description: A dict containing the tags on the resource
+ returned: always
+ type: dict
+added_tags:
+ description: A dict of tags that were added to the resource
+ returned: If tags were added
+ type: dict
+removed_tags:
+ description: A dict of tags that were removed from the resource
+ returned: If tags were removed
+ type: dict
+'''
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ # Handled by AnsibleAWSModule
+ pass
+
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags, AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+MAX_AWS_RETRIES = 10 # How many retries to perform when an API call is failing
+WAIT_RETRY = 5 # how many seconds to wait between propagation status polls
+
+
+def get_tags(efs, module, resource):
+ '''
+ Get resource tags
+ '''
+ try:
+ return boto3_tag_list_to_ansible_dict(efs.list_tags_for_resource(aws_retry=True, ResourceId=resource)['Tags'])
+ except (BotoCoreError, ClientError) as get_tags_error:
+ module.fail_json_aws(get_tags_error, msg='Failed to fetch tags for resource {0}'.format(resource))
+
+
+def main():
+ '''
+ MAIN
+ '''
+ argument_spec = dict(
+ resource=dict(required=True),
+ tags=dict(type='dict', required=True, aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ resource = module.params['resource']
+ tags = module.params['tags']
+ state = module.params['state']
+ purge_tags = module.params['purge_tags']
+
+ result = {'changed': False}
+
+ efs = module.client('efs', retry_decorator=AWSRetry.jittered_backoff())
+
+ current_tags = get_tags(efs, module, resource)
+
+ add_tags, remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags)
+
+ remove_tags = {}
+
+ if state == 'absent':
+ for key in tags:
+ if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]):
+ remove_tags[key] = current_tags[key]
+
+ for key in remove:
+ remove_tags[key] = current_tags[key]
+
+ if remove_tags:
+ result['changed'] = True
+ result['removed_tags'] = remove_tags
+ if not module.check_mode:
+ try:
+ efs.untag_resource(aws_retry=True, ResourceId=resource, TagKeys=list(remove_tags.keys()))
+ except (BotoCoreError, ClientError) as remove_tag_error:
+ module.fail_json_aws(remove_tag_error, msg='Failed to remove tags {0} from resource {1}'.format(remove_tags, resource))
+
+ if state == 'present' and add_tags:
+ result['changed'] = True
+ result['added_tags'] = add_tags
+ current_tags.update(add_tags)
+ if not module.check_mode:
+ try:
+ tags = ansible_dict_to_boto3_tag_list(add_tags)
+ efs.tag_resource(aws_retry=True, ResourceId=resource, Tags=tags)
+ except (BotoCoreError, ClientError) as set_tag_error:
+ module.fail_json_aws(set_tag_error, msg='Failed to set tags {0} on resource {1}'.format(add_tags, resource))
+
+ result['tags'] = get_tags(efs, module, resource)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/eks_cluster.py b/ansible_collections/community/aws/plugins/modules/eks_cluster.py
new file mode 100644
index 000000000..18a5055e9
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/eks_cluster.py
@@ -0,0 +1,305 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: eks_cluster
+version_added: 1.0.0
+short_description: Manage Elastic Kubernetes Service (EKS) Clusters
+description:
+ - Manage Elastic Kubernetes Service (EKS) Clusters.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_eks_cluster).
+ The usage did not change.
+author:
+ - Will Thames (@willthames)
+options:
+ name:
+ description: Name of the EKS cluster.
+ required: True
+ type: str
+ version:
+ description:
+ - Kubernetes version.
+ - Defaults to C(latest).
+ type: str
+ role_arn:
+ description: ARN of IAM role used by the EKS cluster.
+ type: str
+ subnets:
+ description: List of subnet IDs for the Kubernetes cluster.
+ type: list
+ elements: str
+ security_groups:
+ description: List of security group names or IDs.
+ type: list
+ elements: str
+ state:
+ description: Desired state of the EKS cluster.
+ choices:
+ - absent
+ - present
+ default: present
+ type: str
+ tags:
+ description:
+ - A dictionary of tags to add the EKS cluster.
+ type: dict
+ version_added: 5.3.0
+ wait:
+ description: >-
+ Specifies whether the module waits until the cluster is active or deleted
+ before moving on. It takes "usually less than 10 minutes" per AWS documentation.
+ type: bool
+ default: false
+ wait_timeout:
+ description: >-
+ The duration in seconds to wait for the cluster to become active. Defaults
+ to 1200 seconds (20 minutes).
+ default: 1200
+ type: int
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create an EKS cluster
+ community.aws.eks_cluster:
+ name: my_cluster
+ version: 1.14
+ role_arn: my_eks_role
+ subnets:
+ - subnet-aaaa1111
+ security_groups:
+ - my_eks_sg
+ - sg-abcd1234
+ register: caller_facts
+
+- name: Remove an EKS cluster
+ community.aws.eks_cluster:
+ name: my_cluster
+ wait: true
+ state: absent
+'''
+
+RETURN = r'''
+arn:
+ description: ARN of the EKS cluster
+ returned: when state is present
+ type: str
+ sample: arn:aws:eks:us-west-2:123456789012:cluster/my-eks-cluster
+certificate_authority:
+ description: Dictionary containing Certificate Authority Data for cluster
+ returned: after creation
+ type: complex
+ contains:
+ data:
+ description: Base-64 encoded Certificate Authority Data for cluster
+ returned: when the cluster has been created and is active
+ type: str
+endpoint:
+ description: Kubernetes API server endpoint
+ returned: when the cluster has been created and is active
+ type: str
+ sample: https://API_SERVER_ENDPOINT.yl4.us-west-2.eks.amazonaws.com
+created_at:
+ description: Cluster creation date and time
+ returned: when state is present
+ type: str
+ sample: '2018-06-06T11:56:56.242000+00:00'
+name:
+ description: EKS cluster name
+ returned: when state is present
+ type: str
+ sample: my-eks-cluster
+resources_vpc_config:
+ description: VPC configuration of the cluster
+ returned: when state is present
+ type: complex
+ contains:
+ security_group_ids:
+ description: List of security group IDs
+ returned: always
+ type: list
+ sample:
+ - sg-abcd1234
+ - sg-aaaa1111
+ subnet_ids:
+ description: List of subnet IDs
+ returned: always
+ type: list
+ sample:
+ - subnet-abcdef12
+ - subnet-345678ab
+ - subnet-cdef1234
+ vpc_id:
+ description: VPC id
+ returned: always
+ type: str
+ sample: vpc-a1b2c3d4
+role_arn:
+ description: ARN of the IAM role used by the cluster
+ returned: when state is present
+ type: str
+ sample: arn:aws:iam::123456789012:role/eks_cluster_role
+status:
+ description: status of the EKS cluster
+ returned: when state is present
+ type: str
+ sample:
+ - CREATING
+ - ACTIVE
+version:
+ description: Kubernetes version of the cluster
+ returned: when state is present
+ type: str
+ sample: '1.10'
+'''
+
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+
+try:
+ import botocore.exceptions
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def ensure_present(client, module):
+ name = module.params.get('name')
+ subnets = module.params['subnets']
+ groups = module.params['security_groups']
+ wait = module.params.get('wait')
+ cluster = get_cluster(client, module)
+ try:
+ ec2 = module.client('ec2')
+ vpc_id = ec2.describe_subnets(SubnetIds=[subnets[0]])['Subnets'][0]['VpcId']
+ groups = get_ec2_security_group_ids_from_names(groups, ec2, vpc_id)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't lookup security groups")
+
+ if cluster:
+ if set(cluster['resourcesVpcConfig']['subnetIds']) != set(subnets):
+ module.fail_json(msg="Cannot modify subnets of existing cluster")
+ if set(cluster['resourcesVpcConfig']['securityGroupIds']) != set(groups):
+ module.fail_json(msg="Cannot modify security groups of existing cluster")
+ if module.params.get('version') and module.params.get('version') != cluster['version']:
+ module.fail_json(msg="Cannot modify version of existing cluster")
+
+ if wait:
+ wait_until(client, module, 'cluster_active')
+ # Ensure that fields that are only available for active clusters are
+ # included in the returned value
+ cluster = get_cluster(client, module)
+
+ module.exit_json(changed=False, **camel_dict_to_snake_dict(cluster))
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ try:
+ params = dict(name=name,
+ roleArn=module.params['role_arn'],
+ resourcesVpcConfig=dict(
+ subnetIds=subnets,
+ securityGroupIds=groups),
+ )
+ if module.params['version']:
+ params['version'] = module.params['version']
+ if module.params['tags']:
+ params['tags'] = module.params['tags']
+ cluster = client.create_cluster(**params)['cluster']
+ except botocore.exceptions.EndpointConnectionError as e:
+ module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't create cluster %s" % name)
+
+ if wait:
+ wait_until(client, module, 'cluster_active')
+ # Ensure that fields that are only available for active clusters are
+ # included in the returned value
+ cluster = get_cluster(client, module)
+
+ module.exit_json(changed=True, **camel_dict_to_snake_dict(cluster))
+
+
+def ensure_absent(client, module):
+ name = module.params.get('name')
+ existing = get_cluster(client, module)
+ wait = module.params.get('wait')
+ if not existing:
+ module.exit_json(changed=False)
+ if not module.check_mode:
+ try:
+ client.delete_cluster(name=module.params['name'])
+ except botocore.exceptions.EndpointConnectionError as e:
+ module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete cluster %s" % name)
+
+ if wait:
+ wait_until(client, module, 'cluster_deleted')
+
+ module.exit_json(changed=True)
+
+
+def get_cluster(client, module):
+ name = module.params.get('name')
+ try:
+ return client.describe_cluster(name=name)['cluster']
+ except is_boto3_error_code('ResourceNotFoundException'):
+ return None
+ except botocore.exceptions.EndpointConnectionError as e: # pylint: disable=duplicate-except
+ module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't get cluster %s" % name)
+
+
+def wait_until(client, module, waiter_name='cluster_active'):
+ name = module.params.get('name')
+ wait_timeout = module.params.get('wait_timeout')
+
+ waiter = get_waiter(client, waiter_name)
+ attempts = 1 + int(wait_timeout / waiter.config.delay)
+ waiter.wait(name=name, WaiterConfig={'MaxAttempts': attempts})
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ version=dict(),
+ role_arn=dict(),
+ subnets=dict(type='list', elements='str'),
+ security_groups=dict(type='list', elements='str'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ tags=dict(type='dict', required=False),
+ wait=dict(default=False, type='bool'),
+ wait_timeout=dict(default=1200, type='int')
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=[['state', 'present', ['role_arn', 'subnets', 'security_groups']]],
+ supports_check_mode=True,
+ )
+
+ client = module.client('eks')
+
+ if module.params.get('state') == 'present':
+ ensure_present(client, module)
+ else:
+ ensure_absent(client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/eks_fargate_profile.py b/ansible_collections/community/aws/plugins/modules/eks_fargate_profile.py
new file mode 100644
index 000000000..d78cbbe2d
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/eks_fargate_profile.py
@@ -0,0 +1,353 @@
+#!/usr/bin/python
+# Copyright (c) 2022 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: eks_fargate_profile
+version_added: 4.0.0
+short_description: Manage EKS Fargate Profile
+description:
+ - Manage EKS Fargate Profile.
+author:
+ - Tiago Jarra (@tjarra)
+options:
+ name:
+ description: Name of EKS Fargate Profile.
+ required: True
+ type: str
+ cluster_name:
+ description: Name of EKS Cluster.
+ required: True
+ type: str
+ role_arn:
+ description:
+ - ARN of IAM role used by the EKS cluster.
+ - Required when I(state=present).
+ type: str
+ subnets:
+ description:
+ - list of subnet IDs for the Kubernetes cluster.
+ - Required when I(state=present).
+ type: list
+ elements: str
+ selectors:
+ description:
+ - A list of selectors to use in fargate profile.
+ - Required when I(state=present).
+ type: list
+ elements: dict
+ suboptions:
+ namespace:
+ description: A namespace used in fargate profile.
+ type: str
+ labels:
+ description: A dictionary of labels used in fargate profile.
+ type: dict
+ default: {}
+ state:
+ description: Create or delete the Fargate Profile.
+ choices:
+ - absent
+ - present
+ default: present
+ type: str
+ wait:
+ description: >-
+ Specifies whether the module waits until the profile is created or deleted before moving on.
+ type: bool
+ default: false
+ wait_timeout:
+ description: >-
+ The duration in seconds to wait for the cluster to become active. Defaults
+ to 1200 seconds (20 minutes).
+ default: 1200
+ type: int
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+ - amazon.aws.tags
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create an EKS Fargate Profile
+ community.aws.eks_fargate_profile:
+ name: test_fargate
+ cluster_name: test_cluster
+ role_arn: my_eks_role
+ subnets:
+ - subnet-aaaa1111
+ selectors:
+ - namespace: nm-test
+ labels:
+ - label1: test
+ state: present
+ wait: true
+
+- name: Remove an EKS Fargate Profile
+ community.aws.eks_fargate_profile:
+ name: test_fargate
+ cluster_name: test_cluster
+ wait: true
+ state: absent
+'''
+
+RETURN = r'''
+fargate_profile_name:
+ description: Name of Fargate Profile.
+ returned: when state is present
+ type: str
+ sample: test_profile
+fargate_profile_arn:
+ description: ARN of the Fargate Profile.
+ returned: when state is present
+ type: str
+ sample: arn:aws:eks:us-east-1:1231231123:safd
+cluster_name:
+ description: Name of EKS Cluster.
+ returned: when state is present
+ type: str
+ sample: test-cluster
+created_at:
+ description: Fargate Profile creation date and time.
+ returned: when state is present
+ type: str
+ sample: '2022-01-18T20:00:00.111000+00:00'
+pod_execution_role_arn:
+ description: ARN of the IAM Role used by Fargate Profile.
+ returned: when state is present
+ type: str
+ sample: arn:aws:eks:us-east-1:1231231123:role/asdf
+subnets:
+ description: List of subnets used in Fargate Profile.
+ returned: when state is present
+ type: list
+ sample:
+ - subnet-qwerty123
+ - subnet-asdfg456
+selectors:
+ description: Selector configuration.
+ returned: when state is present
+ type: complex
+ contains:
+ namespace:
+ description: Name of the kubernetes namespace used in profile.
+ returned: when state is present
+ type: str
+ sample: nm-test
+ labels:
+ description: List of kubernetes labels used in profile.
+ returned: when state is present
+ type: list
+ sample:
+ - label1: test1
+ - label2: test2
+tags:
+ description: A dictionary of resource tags.
+ returned: when state is present
+ type: dict
+ sample:
+ foo: bar
+ env: test
+status:
+ description: status of the EKS Fargate Profile.
+ returned: when state is present
+ type: str
+ sample:
+ - CREATING
+ - ACTIVE
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+
+try:
+ import botocore.exceptions
+except ImportError:
+ pass
+
+
+def validate_tags(client, module, fargate_profile):
+ changed = False
+
+ desired_tags = module.params.get('tags')
+ if desired_tags is None:
+ return False
+
+ try:
+ existing_tags = client.list_tags_for_resource(resourceArn=fargate_profile['fargateProfileArn'])['tags']
+ tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get('purge_tags'))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Unable to list or compare tags for Fargate Profile %s' % module.params.get('name'))
+
+ if tags_to_remove:
+ changed = True
+ if not module.check_mode:
+ try:
+ client.untag_resource(resourceArn=fargate_profile['fargateProfileArn'], tagKeys=tags_to_remove)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Unable to set tags for Fargate Profile %s' % module.params.get('name'))
+
+ if tags_to_add:
+ changed = True
+ if not module.check_mode:
+ try:
+ client.tag_resource(resourceArn=fargate_profile['fargateProfileArn'], tags=tags_to_add)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Unable to set tags for Fargate Profile %s' % module.params.get('name'))
+
+ return changed
+
+
+def create_or_update_fargate_profile(client, module):
+ name = module.params.get('name')
+ subnets = module.params['subnets']
+ role_arn = module.params['role_arn']
+ cluster_name = module.params['cluster_name']
+ selectors = module.params['selectors']
+ tags = module.params['tags'] or {}
+ wait = module.params.get('wait')
+ fargate_profile = get_fargate_profile(client, module, name, cluster_name)
+
+ if fargate_profile:
+ changed = False
+ if set(fargate_profile['podExecutionRoleArn']) != set(role_arn):
+ module.fail_json(msg="Cannot modify Execution Role")
+ if set(fargate_profile['subnets']) != set(subnets):
+ module.fail_json(msg="Cannot modify Subnets")
+ if fargate_profile['selectors'] != selectors:
+ module.fail_json(msg="Cannot modify Selectors")
+
+ changed = validate_tags(client, module, fargate_profile)
+
+ if wait:
+ wait_until(client, module, 'fargate_profile_active', name, cluster_name)
+ fargate_profile = get_fargate_profile(client, module, name, cluster_name)
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(fargate_profile))
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ check_profiles_status(client, module, cluster_name)
+
+ try:
+ params = dict(fargateProfileName=name,
+ podExecutionRoleArn=role_arn,
+ subnets=subnets,
+ clusterName=cluster_name,
+ selectors=selectors,
+ tags=tags
+ )
+ fargate_profile = client.create_fargate_profile(**params)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't create fargate profile %s" % name)
+
+ if wait:
+ wait_until(client, module, 'fargate_profile_active', name, cluster_name)
+ fargate_profile = get_fargate_profile(client, module, name, cluster_name)
+
+ module.exit_json(changed=True, **camel_dict_to_snake_dict(fargate_profile))
+
+
+def delete_fargate_profile(client, module):
+ name = module.params.get('name')
+ cluster_name = module.params['cluster_name']
+ existing = get_fargate_profile(client, module, name, cluster_name)
+ wait = module.params.get('wait')
+ if not existing or existing["status"] == "DELETING":
+ module.exit_json(changed=False)
+
+ if not module.check_mode:
+ check_profiles_status(client, module, cluster_name)
+ try:
+ client.delete_fargate_profile(clusterName=cluster_name, fargateProfileName=name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete fargate profile %s" % name)
+
+ if wait:
+ wait_until(client, module, 'fargate_profile_deleted', name, cluster_name)
+
+ module.exit_json(changed=True)
+
+
+def get_fargate_profile(client, module, name, cluster_name):
+ try:
+ return client.describe_fargate_profile(clusterName=cluster_name, fargateProfileName=name)['fargateProfile']
+ except is_boto3_error_code('ResourceNotFoundException'):
+ return None
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't get fargate profile")
+
+
+# Check if any fargate profiles is in changing states, if so, wait for the end
+def check_profiles_status(client, module, cluster_name):
+ try:
+ list_profiles = client.list_fargate_profiles(clusterName=cluster_name)
+
+ for name in list_profiles["fargateProfileNames"]:
+ fargate_profile = get_fargate_profile(client, module, name, cluster_name)
+ if fargate_profile["status"] == 'CREATING':
+ wait_until(client, module, 'fargate_profile_active', fargate_profile["fargateProfileName"], cluster_name)
+ elif fargate_profile["status"] == 'DELETING':
+ wait_until(client, module, 'fargate_profile_deleted', fargate_profile["fargateProfileName"], cluster_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't not find EKS cluster")
+
+
+def wait_until(client, module, waiter_name, name, cluster_name):
+ wait_timeout = module.params.get('wait_timeout')
+ waiter = get_waiter(client, waiter_name)
+ attempts = 1 + int(wait_timeout / waiter.config.delay)
+ try:
+ waiter.wait(clusterName=cluster_name, fargateProfileName=name, WaiterConfig={'MaxAttempts': attempts})
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, msg="An error occurred waiting")
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ cluster_name=dict(required=True),
+ role_arn=dict(),
+ subnets=dict(type='list', elements='str'),
+ selectors=dict(type='list', elements='dict', options=dict(
+ namespace=dict(type='str'),
+ labels=dict(type='dict', default={})
+ )),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ state=dict(choices=['absent', 'present'], default='present'),
+ wait=dict(default=False, type='bool'),
+ wait_timeout=dict(default=1200, type='int')
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=[['state', 'present', ['role_arn', 'subnets', 'selectors']]],
+ supports_check_mode=True,
+ )
+
+ try:
+ client = module.client('eks')
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't connect to AWS")
+
+ if module.params.get('state') == 'present':
+ create_or_update_fargate_profile(client, module)
+ else:
+ delete_fargate_profile(client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/eks_nodegroup.py b/ansible_collections/community/aws/plugins/modules/eks_nodegroup.py
new file mode 100644
index 000000000..78979afc2
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/eks_nodegroup.py
@@ -0,0 +1,713 @@
+#!/usr/bin/python
+# Copyright (c) 2022 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: eks_nodegroup
+version_added: 5.3.0
+short_description: Manage EKS Nodegroup module
+description:
+ - Manage EKS Nodegroup.
+author:
+ - Tiago Jarra (@tjarra)
+options:
+ name:
+ description: Name of EKS Nodegroup.
+ required: True
+ type: str
+ cluster_name:
+ description: Name of EKS Cluster.
+ required: True
+ type: str
+ node_role:
+ description: ARN of IAM role used by the EKS cluster Nodegroup.
+ type: str
+ subnets:
+ description: list of subnet IDs for the Kubernetes cluster.
+ type: list
+ elements: str
+ scaling_config:
+ description: The scaling configuration details for the Auto Scaling group that is created for your node group.
+ type: dict
+ default:
+ min_size: 1
+ max_size: 2
+ desired_size: 1
+ suboptions:
+ min_size:
+ description: The minimum number of nodes that the managed node group can scale in to.
+ type: int
+ max_size:
+ description: The maximum number of nodes that the managed node group can scale out to.
+ type: int
+ desired_size:
+ description: The current number of nodes that the managed node group should maintain.
+ type: int
+ disk_size:
+ description:
+ - Size of disk in nodegroup nodes.
+ If you specify I(launch_template), then don't specify I(disk_size), or the node group deployment will fail.
+ type: int
+ instance_types:
+ description:
+ - Specify the instance types for a node group.
+ If you specify I(launch_template), then don't specify I(instance_types), or the node group deployment will fail.
+ type: list
+ elements: str
+ ami_type:
+ description: The AMI type for your node group.
+ type: str
+ choices:
+ - AL2_x86_64
+ - AL2_x86_64_GPU
+ - AL2_ARM_64
+ - CUSTOM
+ - BOTTLEROCKET_ARM_64
+ - BOTTLEROCKET_x86_64
+ remote_access:
+ description:
+ - The remote access (SSH) configuration to use with your node group.
+ If you specify I(launch_template), then don't specify I(remote_access), or the node group deployment will fail.
+ type: dict
+ suboptions:
+ ec2_ssh_key:
+ description: The Amazon EC2 SSH key that provides access for SSH communication with the nodes in the managed node group.
+ type: str
+ source_sg:
+ description: The security groups that are allowed SSH access (port 22) to the nodes.
+ type: list
+ elements: str
+ update_config:
+ description: The node group update configuration.
+ type: dict
+ default:
+ max_unavailable: 1
+ suboptions:
+ max_unavailable:
+ description: The maximum number of nodes unavailable at once during a version update.
+ type: int
+ max_unavailable_percentage:
+ description: The maximum percentage of nodes unavailable during a version update.
+ type: int
+ labels:
+ description: The Kubernetes labels to be applied to the nodes in the node group when they are created.
+ type: dict
+ default: {}
+ taints:
+ description: The Kubernetes taints to be applied to the nodes in the node group.
+ type: list
+ elements: dict
+ default: []
+ suboptions:
+ key:
+ description: The key of the taint.
+ type: str
+ value:
+ description: The value of the taint.
+ type: str
+ effect:
+ description: The effect of the taint.
+ type: str
+ choices:
+ - NO_SCHEDULE
+ - NO_EXECUTE
+ - PREFER_NO_SCHEDULE
+ launch_template:
+ description:
+ - An object representing a node group's launch template specification.
+ - If specified, then do not specify I(instanceTypes), I(diskSize), or I(remoteAccess).
+ type: dict
+ suboptions:
+ name:
+ description: The name of the launch template.
+ type: str
+ version:
+ description:
+ - The version of the launch template to use.
+ - If no version is specified, then the template's default version is used.
+ type: str
+ id:
+ description: The ID of the launch template.
+ type: str
+ capacity_type:
+ description: The capacity type for your node group.
+ default: ON_DEMAND
+ type: str
+ choices:
+ - ON_DEMAND
+ - SPOT
+ release_version:
+ description: The AMI version of the Amazon EKS optimized AMI to use with your node group.
+ type: str
+ state:
+ description: Create or delete the Nodegroup.
+ choices:
+ - absent
+ - present
+ default: present
+ type: str
+ tags:
+ description: A dictionary of resource tags.
+ type: dict
+ aliases: ['resource_tags']
+ purge_tags:
+ description:
+ - Purge existing tags that are not found in the nodegroup.
+ type: bool
+ default: true
+ wait:
+ description: Specifies whether the module waits until the profile is created or deleted before moving on.
+ type: bool
+ default: false
+ wait_timeout:
+ description: The duration in seconds to wait for the nodegroup to become active. Defaults to C(1200) seconds.
+ default: 1200
+ type: int
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: create nodegroup
+ community.aws.eks_nodegroup:
+ name: test_nodegroup
+ state: present
+ cluster_name: test_cluster
+ node_role: arn:aws:eks:us-east-1:1231231123:role/asdf
+ subnets:
+ - subnet-qwerty123
+ - subnet-asdfg456
+ scaling_config:
+ - min_size: 1
+ - max_size: 2
+ - desired_size: 1
+ disk_size: 20
+ instance_types: 't3.micro'
+ ami_type: 'AL2_x86_64'
+ labels:
+ - 'teste': 'test'
+ taints:
+ - key: 'test'
+ value: 'test'
+ effect: 'NO_SCHEDULE'
+ capacity_type: 'on_demand'
+
+- name: Remove an EKS Nodegrop
+ community.aws.eks_nodegroup:
+ name: test_nodegroup
+ cluster_name: test_cluster
+ wait: yes
+ state: absent
+'''
+
+RETURN = r'''
+nodegroup_name:
+ description: The name associated with an Amazon EKS managed node group.
+ returned: when state is present
+ type: str
+ sample: test_cluster
+nodegroup_arn:
+ description: The Amazon Resource Name (ARN) associated with the managed node group.
+ returned: when state is present
+ type: str
+ sample: arn:aws:eks:us-east-1:1231231123:safd
+cluster_name:
+ description: Name of EKS Cluster
+ returned: when state is present
+ type: str
+ sample: test_cluster
+version:
+ description: The Kubernetes version of the managed node group.
+ returned: when state is present
+ type: str
+ sample: need_validate
+release_version:
+ description: This is the version of the Amazon EKS optimized AMI that the node group was deployed with.
+ returned: when state is present
+ type: str
+ sample: need_validate
+created_at:
+ description: Nodegroup creation date and time.
+ returned: when state is present
+ type: str
+ sample: '2022-01-18T20:00:00.111000+00:00'
+modified_at:
+ description: Nodegroup modified date and time.
+ returned: when state is present
+ type: str
+ sample: '2022-01-18T20:00:00.111000+00:00'
+status:
+ description: status of the EKS Nodegroup.
+ returned: when state is present
+ type: str
+ sample:
+ - CREATING
+ - ACTIVE
+capacity_type:
+ description: The capacity type of your managed node group.
+ returned: when state is present
+ type: str
+ sample: need_validate
+scaling_config:
+ description: The scaling configuration details for the Auto Scaling group that is associated with your node group.
+ returned: when state is present
+ type: dict
+ sample: need_validate
+instance_types:
+ description: This is the instance type that is associated with the node group.
+ returned: when state is present
+ type: list
+ sample: need_validate
+subnets:
+ description: List of subnets used in Fargate Profile.
+ returned: when state is present
+ type: list
+ sample:
+ - subnet-qwerty123
+ - subnet-asdfg456
+remote_access:
+ description: This is the remote access configuration that is associated with the node group.
+ returned: when state is present
+ type: dict
+ sample: need_validate
+ami_type:
+ description: This is the AMI type that was specified in the node group configuration.
+ returned: when state is present
+ type: str
+ sample: need_validate
+node_role:
+ description: ARN of the IAM Role used by Nodegroup.
+ returned: when state is present
+ type: str
+ sample: arn:aws:eks:us-east-1:1231231123:role/asdf
+labels:
+ description: The Kubernetes labels applied to the nodes in the node group.
+ returned: when state is present
+ type: dict
+ sample: need_validate
+taints:
+ description: The Kubernetes taints to be applied to the nodes in the node group when they are created.
+ returned: when state is present
+ type: list
+ sample: need_validate
+resources:
+ description: The resources associated with the node group.
+ returned: when state is present
+ type: complex
+ contains:
+ autoScalingGroups:
+ description: The Auto Scaling groups associated with the node group.
+ returned: when state is present
+ type: list
+ elements: dict
+ remoteAccessSecurityGroup:
+ description: The remote access security group associated with the node group.
+ returned: when state is present
+ type: str
+diskSize:
+ description: This is the disk size in the node group configuration.
+ returned: when state is present
+ type: int
+ sample: 20
+health:
+ description: The health status of the node group.
+ returned: when state is present
+ type: dict
+ sample: need_validate
+update_config:
+ description: The node group update configuration.
+ returned: when state is present
+ type: dict
+ contains:
+ maxUnavailable:
+ description: The maximum number of nodes unavailable at once during a version update.
+ type: int
+ maxUnavailablePercentage:
+ description: The maximum percentage of nodes unavailable during a version update.
+ type: int
+launch_template:
+ description: If a launch template was used to create the node group, then this is the launch template that was used.
+ returned: when state is present
+ type: dict
+ sample: need_validate
+tags:
+ description: Nodegroup tags.
+ returned: when state is present
+ type: dict
+ sample:
+ foo: bar
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+
+try:
+ import botocore.exceptions
+except ImportError:
+ pass
+
+
+def validate_tags(client, module, nodegroup):
+ changed = False
+
+ desired_tags = module.params.get('tags')
+ if desired_tags is None:
+ return False
+
+ try:
+ existing_tags = client.list_tags_for_resource(resourceArn=nodegroup['nodegroupArn'])['tags']
+ tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get('purge_tags'))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Unable to list or compare tags for Nodegroup %s.' % module.params.get('name'))
+ if tags_to_remove:
+ if not module.check_mode:
+ changed = True
+ try:
+ client.untag_resource(aws_retry=True, ResourceArn=nodegroup['nodegroupArn'], tagKeys=tags_to_remove)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Unable to set tags for Nodegroup %s.' % module.params.get('name'))
+ if tags_to_add:
+ if not module.check_mode:
+ changed = True
+ try:
+ client.tag_resource(aws_retry=True, ResourceArn=nodegroup['nodegroupArn'], tags=tags_to_add)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Unable to set tags for Nodegroup %s.' % module.params.get('name'))
+
+ return changed
+
+
+def compare_taints(nodegroup_taints, param_taints):
+ taints_to_unset = []
+ taints_to_add_or_update = []
+ for taint in nodegroup_taints:
+ if taint not in param_taints:
+ taints_to_unset.append(taint)
+ for taint in param_taints:
+ if taint not in nodegroup_taints:
+ taints_to_add_or_update.append(taint)
+
+ return taints_to_add_or_update, taints_to_unset
+
+
+def validate_taints(client, module, nodegroup, param_taints):
+ changed = False
+ params = dict()
+ params['clusterName'] = nodegroup['clusterName']
+ params['nodegroupName'] = nodegroup['nodegroupName']
+ params['taints'] = []
+ if 'taints' not in nodegroup:
+ nodegroup['taints'] = []
+ taints_to_add_or_update, taints_to_unset = compare_taints(nodegroup['taints'], param_taints)
+
+ if taints_to_add_or_update:
+ params['taints']['addOrUpdateTaints'] = taints_to_add_or_update
+ if taints_to_unset:
+ params['taints']['removeTaints'] = taints_to_unset
+ if params['taints']:
+ if not module.check_mode:
+ changed = True
+ try:
+ client.update_nodegroup_config(**params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Unable to set taints for Nodegroup %s.' % params['nodegroupName'])
+
+ return changed
+
+
+def compare_labels(nodegroup_labels, param_labels):
+ labels_to_unset = []
+ labels_to_add_or_update = {}
+ for label in nodegroup_labels.keys():
+ if label not in param_labels:
+ labels_to_unset.append(label)
+ for key, value in param_labels.items():
+ if key not in nodegroup_labels.keys():
+ labels_to_add_or_update[key] = value
+
+ return labels_to_add_or_update, labels_to_unset
+
+
+def validate_labels(client, module, nodegroup, param_labels):
+ changed = False
+ params = dict()
+ params['clusterName'] = nodegroup['clusterName']
+ params['nodegroupName'] = nodegroup['nodegroupName']
+ params['labels'] = {}
+ labels_to_add_or_update, labels_to_unset = compare_labels(nodegroup['labels'], param_labels)
+
+ if labels_to_add_or_update:
+ params['labels']['addOrUpdateLabels'] = labels_to_add_or_update
+ if labels_to_unset:
+ params['labels']['removeLabels'] = labels_to_unset
+ if params['labels']:
+ if not module.check_mode:
+ changed = True
+ try:
+ client.update_nodegroup_config(**params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Unable to set labels for Nodegroup %s.' % params['nodegroupName'])
+
+ return changed
+
+
+def compare_params(module, params, nodegroup):
+ for param in ['nodeRole', 'subnets', 'diskSize', 'instanceTypes', 'amiTypes', 'remoteAccess', 'capacityType']:
+ if (param in nodegroup) and (param in params):
+ if (nodegroup[param] != params[param]):
+ module.fail_json(msg="Cannot modify parameter %s." % param)
+ if ('launchTemplate' not in nodegroup) and ('launchTemplate' in params):
+ module.fail_json(msg="Cannot add Launch Template in this Nodegroup.")
+ if nodegroup['updateConfig'] != params['updateConfig']:
+ return True
+ if nodegroup['scalingConfig'] != params['scalingConfig']:
+ return True
+ return False
+
+
+def compare_params_launch_template(module, params, nodegroup):
+ if 'launchTemplate' not in params:
+ module.fail_json(msg="Cannot exclude Launch Template in this Nodegroup.")
+ else:
+ for key in ['name', 'id']:
+ if (key in params['launchTemplate']) and (params['launchTemplate'][key] != nodegroup['launchTemplate'][key]):
+ module.fail_json(msg="Cannot modify Launch Template %s." % key)
+ if ('version' in params['launchTemplate']) and (params['launchTemplate']['version'] != nodegroup['launchTemplate']['version']):
+ return True
+ return False
+
+
+def create_or_update_nodegroups(client, module):
+
+ changed = False
+ params = dict()
+ params['nodegroupName'] = module.params['name']
+ params['clusterName'] = module.params['cluster_name']
+ params['nodeRole'] = module.params['node_role']
+ params['subnets'] = module.params['subnets']
+ params['tags'] = module.params['tags'] or {}
+ if module.params['ami_type'] is not None:
+ params['amiType'] = module.params['ami_type']
+ if module.params['disk_size'] is not None:
+ params['diskSize'] = module.params['disk_size']
+ if module.params['instance_types'] is not None:
+ params['instanceTypes'] = module.params['instance_types']
+ if module.params['launch_template'] is not None:
+ params['launchTemplate'] = dict()
+ if module.params['launch_template']['id'] is not None:
+ params['launchTemplate']['id'] = module.params['launch_template']['id']
+ if module.params['launch_template']['version'] is not None:
+ params['launchTemplate']['version'] = module.params['launch_template']['version']
+ if module.params['launch_template']['name'] is not None:
+ params['launchTemplate']['name'] = module.params['launch_template']['name']
+ if module.params['release_version'] is not None:
+ params['releaseVersion'] = module.params['release_version']
+ if module.params['remote_access'] is not None:
+ params['remoteAccess'] = dict()
+ if module.params['remote_access']['ec2_ssh_key'] is not None:
+ params['remoteAccess']['ec2SshKey'] = module.params['remote_access']['ec2_ssh_key']
+ if module.params['remote_access']['source_sg'] is not None:
+ params['remoteAccess']['sourceSecurityGroups'] = module.params['remote_access']['source_sg']
+ if module.params['capacity_type'] is not None:
+ params['capacityType'] = module.params['capacity_type'].upper()
+ if module.params['labels'] is not None:
+ params['labels'] = module.params['labels']
+ if module.params['taints'] is not None:
+ params['taints'] = module.params['taints']
+ if module.params['update_config'] is not None:
+ params['updateConfig'] = dict()
+ if module.params['update_config']['max_unavailable'] is not None:
+ params['updateConfig']['maxUnavailable'] = module.params['update_config']['max_unavailable']
+ if module.params['update_config']['max_unavailable_percentage'] is not None:
+ params['updateConfig']['maxUnavailablePercentage'] = module.params['update_config']['max_unavailable_percentage']
+ if module.params['scaling_config'] is not None:
+ params['scalingConfig'] = snake_dict_to_camel_dict(module.params['scaling_config'])
+
+ wait = module.params.get('wait')
+ nodegroup = get_nodegroup(client, module, params['nodegroupName'], params['clusterName'])
+
+ if nodegroup:
+ update_params = dict()
+ update_params['clusterName'] = params['clusterName']
+ update_params['nodegroupName'] = params['nodegroupName']
+
+ if 'launchTemplate' in nodegroup:
+ if compare_params_launch_template(module, params, nodegroup):
+ update_params['launchTemplate'] = params['launchTemplate']
+ if not module.check_mode:
+ try:
+ client.update_nodegroup_version(**update_params)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't update nodegroup.")
+ changed |= True
+
+ if compare_params(module, params, nodegroup):
+ try:
+ if 'launchTemplate' in update_params:
+ update_params.pop('launchTemplate')
+ update_params['scalingConfig'] = params['scalingConfig']
+ update_params['updateConfig'] = params['updateConfig']
+
+ if not module.check_mode:
+ client.update_nodegroup_config(**update_params)
+
+ changed |= True
+
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't update nodegroup.")
+
+ changed |= validate_tags(client, module, nodegroup)
+
+ changed |= validate_labels(client, module, nodegroup, params['labels'])
+
+ if 'taints' in nodegroup:
+ changed |= validate_taints(client, module, nodegroup, params['taints'])
+
+ if wait:
+ wait_until(client, module, 'nodegroup_active', params['nodegroupName'], params['clusterName'])
+
+ nodegroup = get_nodegroup(client, module, params['nodegroupName'], params['clusterName'])
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(nodegroup))
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ try:
+ nodegroup = client.create_nodegroup(**params)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't create Nodegroup %s." % params['nodegroupName'])
+
+ if wait:
+ wait_until(client, module, 'nodegroup_active', params['nodegroupName'], params['clusterName'])
+ nodegroup = get_nodegroup(client, module, params['nodegroupName'], params['clusterName'])
+
+ module.exit_json(changed=True, **camel_dict_to_snake_dict(nodegroup))
+
+
+def delete_nodegroups(client, module):
+ name = module.params.get('name')
+ clusterName = module.params['cluster_name']
+ existing = get_nodegroup(client, module, name, clusterName)
+ wait = module.params.get('wait')
+ if not existing or existing['status'] == 'DELETING':
+ module.exit_json(changed=False, msg='Nodegroup not exists or in DELETING status.')
+ if not module.check_mode:
+ try:
+ client.delete_nodegroup(clusterName=clusterName, nodegroupName=name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete Nodegroup %s." % name)
+
+ if wait:
+ wait_until(client, module, 'nodegroup_deleted', name, clusterName)
+
+ module.exit_json(changed=True)
+
+
+def get_nodegroup(client, module, nodegroup_name, cluster_name):
+ try:
+ return client.describe_nodegroup(clusterName=cluster_name, nodegroupName=nodegroup_name)['nodegroup']
+ except is_boto3_error_code('ResourceNotFoundException'):
+ return None
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't get Nodegroup %s." % nodegroup_name)
+
+
+def wait_until(client, module, waiter_name, nodegroup_name, cluster_name):
+ wait_timeout = module.params.get('wait_timeout')
+ waiter = get_waiter(client, waiter_name)
+ attempts = 1 + int(wait_timeout / waiter.config.delay)
+ try:
+ waiter.wait(clusterName=cluster_name, nodegroupName=nodegroup_name, WaiterConfig={'MaxAttempts': attempts})
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, msg="An error occurred waiting")
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ cluster_name=dict(type='str', required=True),
+ node_role=dict(),
+ subnets=dict(type='list', elements='str'),
+ scaling_config=dict(type='dict', default={'min_size': 1, 'max_size': 2, 'desired_size': 1}, options=dict(
+ min_size=dict(type='int'),
+ max_size=dict(type='int'),
+ desired_size=dict(type='int')
+ )),
+ disk_size=dict(type='int'),
+ instance_types=dict(type='list', elements='str'),
+ ami_type=dict(choices=['AL2_x86_64', 'AL2_x86_64_GPU', 'AL2_ARM_64', 'CUSTOM', 'BOTTLEROCKET_ARM_64', 'BOTTLEROCKET_x86_64']),
+ remote_access=dict(type='dict', options=dict(
+ ec2_ssh_key=dict(no_log=True),
+ source_sg=dict(type='list', elements='str')
+ )),
+ update_config=dict(type='dict', default={'max_unavailable': 1}, options=dict(
+ max_unavailable=dict(type='int'),
+ max_unavailable_percentage=dict(type='int')
+ )),
+ labels=dict(type='dict', default={}),
+ taints=dict(type='list', elements='dict', default=[], options=dict(
+ key=dict(type='str', no_log=False,),
+ value=dict(type='str'),
+ effect=dict(type='str', choices=['NO_SCHEDULE', 'NO_EXECUTE', 'PREFER_NO_SCHEDULE'])
+ )),
+ launch_template=dict(type='dict', options=dict(
+ name=dict(type='str'),
+ version=dict(type='str'),
+ id=dict(type='str')
+ )),
+ capacity_type=dict(choices=['ON_DEMAND', 'SPOT'], default='ON_DEMAND'),
+ release_version=dict(),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ state=dict(choices=['absent', 'present'], default='present'),
+ wait=dict(default=False, type='bool'),
+ wait_timeout=dict(default=1200, type='int')
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=[['state', 'present', ['node_role', 'subnets']]],
+ mutually_exclusive=[
+ ('launch_template', 'instance_types'),
+ ('launch_template', 'disk_size'),
+ ('launch_template', 'remote_access'),
+ ('launch_template', 'ami_type')
+ ],
+ supports_check_mode=True,
+ )
+
+ if module.params['launch_template'] is None:
+ if module.params['disk_size'] is None:
+ module.params['disk_size'] = 20
+ if module.params['ami_type'] is None:
+ module.params['ami_type'] = "AL2_x86_64"
+ if module.params['instance_types'] is None:
+ module.params['instance_types'] = ["t3.medium"]
+ else:
+ if (module.params['launch_template']['id'] is None) and (module.params['launch_template']['name'] is None):
+ module.exit_json(changed=False, msg='To use launch_template, it is necessary to inform the id or name.')
+ try:
+ client = module.client('eks')
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't connect to AWS.")
+
+ if module.params.get('state') == 'present':
+ create_or_update_nodegroups(client, module)
+ else:
+ delete_nodegroups(client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/elasticache.py b/ansible_collections/community/aws/plugins/modules/elasticache.py
new file mode 100644
index 000000000..454baafe3
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/elasticache.py
@@ -0,0 +1,549 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: elasticache
+version_added: 1.0.0
+short_description: Manage cache clusters in Amazon ElastiCache
+description:
+ - Manage cache clusters in Amazon ElastiCache.
+ - Returns information about the specified cache cluster.
+author: "Jim Dalton (@jsdalton)"
+options:
+ state:
+ description:
+ - C(absent) or C(present) are idempotent actions that will create or destroy a cache cluster as needed.
+ - C(rebooted) will reboot the cluster, resulting in a momentary outage.
+ choices: ['present', 'absent', 'rebooted']
+ required: true
+ type: str
+ name:
+ description:
+ - The cache cluster identifier.
+ required: true
+ type: str
+ engine:
+ description:
+ - Name of the cache engine to be used.
+ - Supported values are C(redis) and C(memcached).
+ default: memcached
+ type: str
+ cache_engine_version:
+ description:
+ - The version number of the cache engine.
+ type: str
+ default: ''
+ node_type:
+ description:
+ - The compute and memory capacity of the nodes in the cache cluster.
+ default: cache.t2.small
+ type: str
+ num_nodes:
+ description:
+ - The initial number of cache nodes that the cache cluster will have.
+ - Required when I(state=present).
+ type: int
+ default: 1
+ cache_port:
+ description:
+ - The port number on which each of the cache nodes will accept
+ connections.
+ type: int
+ cache_parameter_group:
+ description:
+ - The name of the cache parameter group to associate with this cache cluster. If this argument is omitted, the default cache parameter group
+ for the specified engine will be used.
+ aliases: [ 'parameter_group' ]
+ type: str
+ default: ''
+ cache_subnet_group:
+ description:
+ - The subnet group name to associate with. Only use if inside a VPC.
+ - Required if inside a VPC.
+ type: str
+ default: ''
+ security_group_ids:
+ description:
+ - A list of VPC security group IDs to associate with this cache cluster. Only use if inside a VPC.
+ type: list
+ elements: str
+ default: []
+ cache_security_groups:
+ description:
+ - A list of cache security group names to associate with this cache cluster.
+ - Don't use if your Cache is inside a VPC. In that case use I(security_group_ids) instead!
+ type: list
+ elements: str
+ default: []
+ zone:
+ description:
+ - The EC2 Availability Zone in which the cache cluster will be created.
+ type: str
+ wait:
+ description:
+ - Wait for cache cluster result before returning.
+ type: bool
+ default: true
+ hard_modify:
+ description:
+ - Whether to destroy and recreate an existing cache cluster if necessary in order to modify its state.
+ - Defaults to C(false).
+ type: bool
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = r"""
+# Note: None of these examples set aws_access_key, aws_secret_key, or region.
+# It is assumed that their matching environment variables are set.
+
+- name: Basic example
+ community.aws.elasticache:
+ name: "test-please-delete"
+ state: present
+ engine: memcached
+ cache_engine_version: 1.4.14
+ node_type: cache.m1.small
+ num_nodes: 1
+ cache_port: 11211
+ cache_security_groups:
+ - default
+ zone: us-east-1d
+
+
+- name: Ensure cache cluster is gone
+ community.aws.elasticache:
+ name: "test-please-delete"
+ state: absent
+
+- name: Reboot cache cluster
+ community.aws.elasticache:
+ name: "test-please-delete"
+ state: rebooted
+
+"""
+from time import sleep
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+
+
+class ElastiCacheManager(object):
+
+ """Handles elasticache creation and destruction"""
+
+ EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying']
+
+ def __init__(self, module, name, engine, cache_engine_version, node_type,
+ num_nodes, cache_port, cache_parameter_group, cache_subnet_group,
+ cache_security_groups, security_group_ids, zone, wait,
+ hard_modify, region, **aws_connect_kwargs):
+ self.module = module
+ self.name = name
+ self.engine = engine.lower()
+ self.cache_engine_version = cache_engine_version
+ self.node_type = node_type
+ self.num_nodes = num_nodes
+ self.cache_port = cache_port
+ self.cache_parameter_group = cache_parameter_group
+ self.cache_subnet_group = cache_subnet_group
+ self.cache_security_groups = cache_security_groups
+ self.security_group_ids = security_group_ids
+ self.zone = zone
+ self.wait = wait
+ self.hard_modify = hard_modify
+
+ self.region = region
+ self.aws_connect_kwargs = aws_connect_kwargs
+
+ self.changed = False
+ self.data = None
+ self.status = 'gone'
+ self.conn = self._get_elasticache_connection()
+ self._refresh_data()
+
+ def ensure_present(self):
+ """Ensure cache cluster exists or create it if not"""
+ if self.exists():
+ self.sync()
+ else:
+ self.create()
+
+ def ensure_absent(self):
+ """Ensure cache cluster is gone or delete it if not"""
+ self.delete()
+
+ def ensure_rebooted(self):
+ """Ensure cache cluster is gone or delete it if not"""
+ self.reboot()
+
+ def exists(self):
+ """Check if cache cluster exists"""
+ return self.status in self.EXIST_STATUSES
+
+ def create(self):
+ """Create an ElastiCache cluster"""
+ if self.status == 'available':
+ return
+ if self.status in ['creating', 'rebooting', 'modifying']:
+ if self.wait:
+ self._wait_for_status('available')
+ return
+ if self.status == 'deleting':
+ if self.wait:
+ self._wait_for_status('gone')
+ else:
+ msg = "'%s' is currently deleting. Cannot create."
+ self.module.fail_json(msg=msg % self.name)
+
+ kwargs = dict(CacheClusterId=self.name,
+ NumCacheNodes=self.num_nodes,
+ CacheNodeType=self.node_type,
+ Engine=self.engine,
+ EngineVersion=self.cache_engine_version,
+ CacheSecurityGroupNames=self.cache_security_groups,
+ SecurityGroupIds=self.security_group_ids,
+ CacheParameterGroupName=self.cache_parameter_group,
+ CacheSubnetGroupName=self.cache_subnet_group)
+ if self.cache_port is not None:
+ kwargs['Port'] = self.cache_port
+ if self.zone is not None:
+ kwargs['PreferredAvailabilityZone'] = self.zone
+
+ try:
+ self.conn.create_cache_cluster(**kwargs)
+
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Failed to create cache cluster")
+
+ self._refresh_data()
+
+ self.changed = True
+ if self.wait:
+ self._wait_for_status('available')
+ return True
+
+ def delete(self):
+ """Destroy an ElastiCache cluster"""
+ if self.status == 'gone':
+ return
+ if self.status == 'deleting':
+ if self.wait:
+ self._wait_for_status('gone')
+ return
+ if self.status in ['creating', 'rebooting', 'modifying']:
+ if self.wait:
+ self._wait_for_status('available')
+ else:
+ msg = "'%s' is currently %s. Cannot delete."
+ self.module.fail_json(msg=msg % (self.name, self.status))
+
+ try:
+ response = self.conn.delete_cache_cluster(CacheClusterId=self.name)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Failed to delete cache cluster")
+
+ cache_cluster_data = response['CacheCluster']
+ self._refresh_data(cache_cluster_data)
+
+ self.changed = True
+ if self.wait:
+ self._wait_for_status('gone')
+
+ def sync(self):
+ """Sync settings to cluster if required"""
+ if not self.exists():
+ msg = "'%s' is %s. Cannot sync."
+ self.module.fail_json(msg=msg % (self.name, self.status))
+
+ if self.status in ['creating', 'rebooting', 'modifying']:
+ if self.wait:
+ self._wait_for_status('available')
+ else:
+ # Cluster can only be synced if available. If we can't wait
+ # for this, then just be done.
+ return
+
+ if self._requires_destroy_and_create():
+ if not self.hard_modify:
+ msg = "'%s' requires destructive modification. 'hard_modify' must be set to true to proceed."
+ self.module.fail_json(msg=msg % self.name)
+ if not self.wait:
+ msg = "'%s' requires destructive modification. 'wait' must be set to true."
+ self.module.fail_json(msg=msg % self.name)
+ self.delete()
+ self.create()
+ return
+
+ if self._requires_modification():
+ self.modify()
+
+ def modify(self):
+ """Modify the cache cluster. Note it's only possible to modify a few select options."""
+ nodes_to_remove = self._get_nodes_to_remove()
+ try:
+ self.conn.modify_cache_cluster(CacheClusterId=self.name,
+ NumCacheNodes=self.num_nodes,
+ CacheNodeIdsToRemove=nodes_to_remove,
+ CacheSecurityGroupNames=self.cache_security_groups,
+ CacheParameterGroupName=self.cache_parameter_group,
+ SecurityGroupIds=self.security_group_ids,
+ ApplyImmediately=True,
+ EngineVersion=self.cache_engine_version)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Failed to modify cache cluster")
+
+ self._refresh_data()
+
+ self.changed = True
+ if self.wait:
+ self._wait_for_status('available')
+
+ def reboot(self):
+ """Reboot the cache cluster"""
+ if not self.exists():
+ msg = "'%s' is %s. Cannot reboot."
+ self.module.fail_json(msg=msg % (self.name, self.status))
+ if self.status == 'rebooting':
+ return
+ if self.status in ['creating', 'modifying']:
+ if self.wait:
+ self._wait_for_status('available')
+ else:
+ msg = "'%s' is currently %s. Cannot reboot."
+ self.module.fail_json(msg=msg % (self.name, self.status))
+
+ # Collect ALL nodes for reboot
+ cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']]
+ try:
+ self.conn.reboot_cache_cluster(CacheClusterId=self.name,
+ CacheNodeIdsToReboot=cache_node_ids)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Failed to reboot cache cluster")
+
+ self._refresh_data()
+
+ self.changed = True
+ if self.wait:
+ self._wait_for_status('available')
+
+ def get_info(self):
+ """Return basic info about the cache cluster"""
+ info = {
+ 'name': self.name,
+ 'status': self.status
+ }
+ if self.data:
+ info['data'] = self.data
+ return info
+
+ def _wait_for_status(self, awaited_status):
+ """Wait for status to change from present status to awaited_status"""
+ status_map = {
+ 'creating': 'available',
+ 'rebooting': 'available',
+ 'modifying': 'available',
+ 'deleting': 'gone'
+ }
+ if self.status == awaited_status:
+ # No need to wait, we're already done
+ return
+ if status_map[self.status] != awaited_status:
+ msg = "Invalid awaited status. '%s' cannot transition to '%s'"
+ self.module.fail_json(msg=msg % (self.status, awaited_status))
+
+ if awaited_status not in set(status_map.values()):
+ msg = "'%s' is not a valid awaited status."
+ self.module.fail_json(msg=msg % awaited_status)
+
+ while True:
+ sleep(1)
+ self._refresh_data()
+ if self.status == awaited_status:
+ break
+
+ def _requires_modification(self):
+ """Check if cluster requires (nondestructive) modification"""
+ # Check modifiable data attributes
+ modifiable_data = {
+ 'NumCacheNodes': self.num_nodes,
+ 'EngineVersion': self.cache_engine_version
+ }
+ for key, value in modifiable_data.items():
+ if value is not None and value and self.data[key] != value:
+ return True
+
+ # Check cache security groups
+ cache_security_groups = []
+ for sg in self.data['CacheSecurityGroups']:
+ cache_security_groups.append(sg['CacheSecurityGroupName'])
+ if set(cache_security_groups) != set(self.cache_security_groups):
+ return True
+
+ # check vpc security groups
+ if self.security_group_ids:
+ vpc_security_groups = []
+ security_groups = self.data.get('SecurityGroups', [])
+ for sg in security_groups:
+ vpc_security_groups.append(sg['SecurityGroupId'])
+ if set(vpc_security_groups) != set(self.security_group_ids):
+ return True
+
+ return False
+
+ def _requires_destroy_and_create(self):
+ """
+ Check whether a destroy and create is required to synchronize cluster.
+ """
+ unmodifiable_data = {
+ 'node_type': self.data['CacheNodeType'],
+ 'engine': self.data['Engine'],
+ 'cache_port': self._get_port()
+ }
+ # Only check for modifications if zone is specified
+ if self.zone is not None:
+ unmodifiable_data['zone'] = self.data['PreferredAvailabilityZone']
+ for key, value in unmodifiable_data.items():
+ if getattr(self, key) is not None and getattr(self, key) != value:
+ return True
+ return False
+
+ def _get_elasticache_connection(self):
+ """Get an elasticache connection"""
+ try:
+ return self.module.client('elasticache')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ def _get_port(self):
+ """Get the port. Where this information is retrieved from is engine dependent."""
+ if self.data['Engine'] == 'memcached':
+ return self.data['ConfigurationEndpoint']['Port']
+ elif self.data['Engine'] == 'redis':
+ # Redis only supports a single node (presently) so just use
+ # the first and only
+ return self.data['CacheNodes'][0]['Endpoint']['Port']
+
+ def _refresh_data(self, cache_cluster_data=None):
+ """Refresh data about this cache cluster"""
+
+ if cache_cluster_data is None:
+ try:
+ response = self.conn.describe_cache_clusters(CacheClusterId=self.name, ShowCacheNodeInfo=True)
+ except is_boto3_error_code('CacheClusterNotFound'):
+ self.data = None
+ self.status = 'gone'
+ return
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ self.module.fail_json_aws(e, msg="Failed to describe cache clusters")
+ cache_cluster_data = response['CacheClusters'][0]
+ self.data = cache_cluster_data
+ self.status = self.data['CacheClusterStatus']
+
+ # The documentation for elasticache lies -- status on rebooting is set
+ # to 'rebooting cache cluster nodes' instead of 'rebooting'. Fix it
+ # here to make status checks etc. more sane.
+ if self.status == 'rebooting cache cluster nodes':
+ self.status = 'rebooting'
+
+ def _get_nodes_to_remove(self):
+ """If there are nodes to remove, it figures out which need to be removed"""
+ num_nodes_to_remove = self.data['NumCacheNodes'] - self.num_nodes
+ if num_nodes_to_remove <= 0:
+ return []
+
+ if not self.hard_modify:
+ msg = "'%s' requires removal of cache nodes. 'hard_modify' must be set to true to proceed."
+ self.module.fail_json(msg=msg % self.name)
+
+ cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']]
+ return cache_node_ids[-num_nodes_to_remove:]
+
+
+def main():
+ """ elasticache ansible module """
+ argument_spec = dict(
+ state=dict(required=True, choices=['present', 'absent', 'rebooted']),
+ name=dict(required=True),
+ engine=dict(default='memcached'),
+ cache_engine_version=dict(default=""),
+ node_type=dict(default='cache.t2.small'),
+ num_nodes=dict(default=1, type='int'),
+ # alias for compat with the original PR 1950
+ cache_parameter_group=dict(default="", aliases=['parameter_group']),
+ cache_port=dict(type='int'),
+ cache_subnet_group=dict(default=""),
+ cache_security_groups=dict(default=[], type='list', elements='str'),
+ security_group_ids=dict(default=[], type='list', elements='str'),
+ zone=dict(),
+ wait=dict(default=True, type='bool'),
+ hard_modify=dict(type='bool'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ )
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
+
+ name = module.params['name']
+ state = module.params['state']
+ engine = module.params['engine']
+ cache_engine_version = module.params['cache_engine_version']
+ node_type = module.params['node_type']
+ num_nodes = module.params['num_nodes']
+ cache_port = module.params['cache_port']
+ cache_subnet_group = module.params['cache_subnet_group']
+ cache_security_groups = module.params['cache_security_groups']
+ security_group_ids = module.params['security_group_ids']
+ zone = module.params['zone']
+ wait = module.params['wait']
+ hard_modify = module.params['hard_modify']
+ cache_parameter_group = module.params['cache_parameter_group']
+
+ if cache_subnet_group and cache_security_groups:
+ module.fail_json(msg="Can't specify both cache_subnet_group and cache_security_groups")
+
+ if state == 'present' and not num_nodes:
+ module.fail_json(msg="'num_nodes' is a required parameter. Please specify num_nodes > 0")
+
+ elasticache_manager = ElastiCacheManager(module, name, engine,
+ cache_engine_version, node_type,
+ num_nodes, cache_port,
+ cache_parameter_group,
+ cache_subnet_group,
+ cache_security_groups,
+ security_group_ids, zone, wait,
+ hard_modify, region, **aws_connect_kwargs)
+
+ if state == 'present':
+ elasticache_manager.ensure_present()
+ elif state == 'absent':
+ elasticache_manager.ensure_absent()
+ elif state == 'rebooted':
+ elasticache_manager.ensure_rebooted()
+
+ facts_result = dict(changed=elasticache_manager.changed,
+ elasticache=elasticache_manager.get_info())
+
+ module.exit_json(**facts_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/elasticache_info.py b/ansible_collections/community/aws/plugins/modules/elasticache_info.py
new file mode 100644
index 000000000..f6c34629e
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/elasticache_info.py
@@ -0,0 +1,509 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: elasticache_info
+short_description: Retrieve information for AWS ElastiCache clusters
+version_added: 1.0.0
+description:
+ - Retrieve information from AWS ElastiCache clusters.
+options:
+ name:
+ description:
+ - The name of an ElastiCache cluster.
+ type: str
+author:
+ - Will Thames (@willthames)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+- name: obtain all ElastiCache information
+ community.aws.elasticache_info:
+
+- name: obtain all information for a single ElastiCache cluster
+ community.aws.elasticache_info:
+ name: test_elasticache
+'''
+
+RETURN = '''
+elasticache_clusters:
+ description: List of ElastiCache clusters.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ arn:
+ description: ARN of the cache cluster.
+ returned: always
+ type: str
+ sample: 'arn:aws:elasticache:us-east-1:123456789012:cluster:ansible-test'
+ auto_minor_version_upgrade:
+ description: Whether to automatically upgrade to minor versions.
+ returned: always
+ type: bool
+ sample: true
+ cache_cluster_create_time:
+ description: Date and time cluster was created.
+ returned: always
+ type: str
+ sample: '2017-09-15T05:43:46.038000+00:00'
+ cache_cluster_id:
+ description: ID of the cache cluster.
+ returned: always
+ type: str
+ sample: abcd-1234-001
+ cache_cluster_status:
+ description: Status of ElastiCache cluster.
+ returned: always
+ type: str
+ sample: available
+ cache_node_type:
+ description: Instance type of ElastiCache nodes.
+ returned: always
+ type: str
+ sample: cache.t2.micro
+ cache_nodes:
+ description: List of ElastiCache nodes in the cluster.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ cache_node_create_time:
+ description: Date and time node was created.
+ returned: always
+ type: str
+ sample: '2017-09-15T05:43:46.038000+00:00'
+ cache_node_id:
+ description: ID of the cache node.
+ returned: always
+ type: str
+ sample: '0001'
+ cache_node_status:
+ description: Status of the cache node.
+ returned: always
+ type: str
+ sample: available
+ customer_availability_zone:
+ description: Availability Zone in which the cache node was created.
+ returned: always
+ type: str
+ sample: ap-southeast-2b
+ endpoint:
+ description: Connection details for the cache node.
+ returned: always
+ type: dict
+ contains:
+ address:
+ description: URL of the cache node endpoint.
+ returned: always
+ type: str
+ sample: abcd-1234-001.bgiz2p.0001.apse2.cache.amazonaws.com
+ port:
+ description: Port of the cache node endpoint.
+ returned: always
+ type: int
+ sample: 6379
+ parameter_group_status:
+ description: Status of the Cache Parameter Group.
+ returned: always
+ type: str
+ sample: in-sync
+ cache_parameter_group:
+ description: Contents of the Cache Parameter Group.
+ returned: always
+ type: dict
+ contains:
+ cache_node_ids_to_reboot:
+ description: Cache nodes which need to be rebooted for parameter changes to be applied.
+ returned: always
+ type: list
+ elements: str
+ sample: []
+ cache_parameter_group_name:
+ description: Name of the cache parameter group.
+ returned: always
+ type: str
+ sample: default.redis3.2
+ parameter_apply_status:
+ description: Status of parameter updates.
+ returned: always
+ type: str
+ sample: in-sync
+ cache_security_groups:
+ description: Security Groups used by the cache.
+ returned: always
+ type: list
+ elements: str
+ sample:
+ - 'sg-abcd1234'
+ cache_subnet_group_name:
+ description: ElastiCache Subnet Group used by the cache.
+ returned: always
+ type: str
+ sample: abcd-subnet-group
+ client_download_landing_page:
+ description: URL of client download web page.
+ returned: always
+ type: str
+ sample: 'https://console.aws.amazon.com/elasticache/home#client-download:'
+ engine:
+ description: Engine used by ElastiCache.
+ returned: always
+ type: str
+ sample: redis
+ engine_version:
+ description: Version of ElastiCache engine.
+ returned: always
+ type: str
+ sample: 3.2.4
+ notification_configuration:
+ description: Configuration of notifications.
+ returned: if notifications are enabled
+ type: dict
+ contains:
+ topic_arn:
+ description: ARN of notification destination topic.
+ returned: if notifications are enabled
+ type: str
+ sample: arn:aws:sns:*:123456789012:my_topic
+ topic_name:
+ description: Name of notification destination topic.
+ returned: if notifications are enabled
+ type: str
+ sample: MyTopic
+ num_cache_nodes:
+ description: Number of Cache Nodes.
+ returned: always
+ type: int
+ sample: 1
+ pending_modified_values:
+ description: Values that are pending modification.
+ returned: always
+ type: dict
+ preferred_availability_zone:
+ description: Preferred Availability Zone.
+ returned: always
+ type: str
+ sample: ap-southeast-2b
+ preferred_maintenance_window:
+ description: Time slot for preferred maintenance window.
+ returned: always
+ type: str
+ sample: sat:12:00-sat:13:00
+ replication_group:
+ description: Informations about the associated replication group.
+ version_added: 4.1.0
+ returned: if replication is enabled
+ type: dict
+ contains:
+ arn:
+ description: The ARN (Amazon Resource Name) of the replication group.
+ returned: always
+ type: str
+ at_rest_encryption_enabled:
+ description: A flag that enables encryption at-rest when set to true.
+ returned: always
+ type: bool
+ auth_token_enabled:
+ description: A flag that enables using an AuthToken (password) when issuing Redis commands.
+ returned: always
+ type: bool
+ automatic_failover:
+ description: Indicates the status of automatic failover for this Redis replication group.
+ returned: always
+ type: str
+ sample: enabled
+ cache_node_type:
+ description: The name of the compute and memory capacity node type for each node in the replication group.
+ returned: always
+ type: str
+ sample: cache.t3.medium
+ cluster_enabled:
+ description: A flag indicating whether or not this replication group is cluster enabled.
+ returned: always
+ type: bool
+ description:
+ description: The user supplied description of the replication group.
+ returned: always
+ type: str
+ global_replication_group_info:
+ description: The name of the Global datastore and role of this replication group in the Global datastore.
+ returned: always
+ type: dict
+ contains:
+ global_replication_group_id:
+ description: The name of the Global datastore.
+ returned: always
+ type: str
+ global_replication_group_member_role:
+ description: The role of the replication group in a Global datastore. Can be primary or secondary.
+ returned: always
+ type: str
+ kms_key_id:
+ description: The ID of the KMS key used to encrypt the disk in the cluster.
+ returned: always
+ type: str
+ member_clusters:
+ description: The names of all the cache clusters that are part of this replication group.
+ returned: always
+ type: list
+ elements: str
+ multi_az:
+ description: A flag indicating if you have Multi-AZ enabled to enhance fault tolerance.
+ returned: always
+ type: str
+ sample: enabled
+ node_groups:
+ description: A list of node groups in this replication group.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ node_group_id:
+ description: The identifier for the node group (shard).
+ returned: always
+ type: str
+ node_group_members:
+ description: A list containing information about individual nodes within the node group (shard).
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ cache_cluster_id:
+ description: The ID of the cluster to which the node belongs.
+ returned: always
+ type: str
+ cache_node_id:
+ description: The ID of the node within its cluster.
+ returned: always
+ type: str
+ current_role:
+ description: The role that is currently assigned to the node - primary or replica.
+ returned: always
+ type: str
+ sample: primary
+ preferred_availability_zone:
+ description: The name of the Availability Zone in which the node is located.
+ returned: always
+ type: str
+ read_endpoint:
+ description: The information required for client programs to connect to a node for read operations.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ address:
+ description: The DNS hostname of the cache node.
+ returned: always
+ type: str
+ port:
+ description: The port number that the cache engine is listening on.
+ returned: always
+ type: int
+ sample: 6379
+ primary_endpoint:
+ description: The endpoint of the primary node in this node group (shard).
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ address:
+ description: The DNS hostname of the cache node.
+ returned: always
+ type: str
+ port:
+ description: The port number that the cache engine is listening on.
+ returned: always
+ type: int
+ sample: 6379
+ reader_endpoint:
+ description: The endpoint of the cache node.
+ returned: always
+ type: dict
+ contains:
+ address:
+ description: The DNS hostname of the cache node.
+ returned: always
+ type: str
+ port:
+ description: The port number that the cache engine is listening on.
+ returned: always
+ type: int
+ sample: 6379
+ status:
+ description: The current state of this replication group - C(creating), C(available), C(modifying), C(deleting).
+ returned: always
+ type: str
+ sample: available
+ pending_modified_values:
+ description: A group of settings to be applied to the replication group, either immediately or during the next maintenance window.
+ returned: always
+ type: dict
+ replication_group_id:
+ description: Replication Group Id.
+ returned: always
+ type: str
+ sample: replication-001
+ snapshot_retention_limit:
+ description: The number of days for which ElastiCache retains automatic cluster snapshots before deleting them.
+ returned: always
+ type: int
+ snapshot_window:
+ description: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard).
+ returned: always
+ type: str
+ sample: 07:00-09:00
+ snapshotting_cluster_id:
+ description: The cluster ID that is used as the daily snapshot source for the replication group.
+ returned: always
+ type: str
+ status:
+ description: The current state of this replication group - C(creating), C(available), C(modifying), C(deleting), C(create-failed), C(snapshotting)
+ returned: always
+ type: str
+ transit_encryption_enabled:
+ description: A flag that enables in-transit encryption when set to C(true).
+ returned: always
+ type: bool
+ replication_group_id:
+ description: Replication Group Id.
+ returned: if replication is enabled
+ type: str
+ sample: replication-001
+ security_groups:
+ description: List of Security Groups associated with ElastiCache.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ security_group_id:
+ description: Security Group ID
+ returned: always
+ type: str
+ sample: sg-abcd1234
+ status:
+ description: Status of Security Group
+ returned: always
+ type: str
+ sample: active
+ tags:
+ description: Tags applied to the ElastiCache cluster
+ returned: always
+ type: dict
+ sample:
+ Application: web
+ Environment: test
+'''
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+@AWSRetry.exponential_backoff()
+def describe_cache_clusters_with_backoff(client, cluster_id=None):
+ paginator = client.get_paginator('describe_cache_clusters')
+ params = dict(ShowCacheNodeInfo=True)
+ if cluster_id:
+ params['CacheClusterId'] = cluster_id
+ try:
+ response = paginator.paginate(**params).build_full_result()
+ except is_boto3_error_code('CacheClusterNotFound'):
+ return []
+ return response['CacheClusters']
+
+
+@AWSRetry.exponential_backoff()
+def describe_replication_group_with_backoff(client, replication_group_id):
+ try:
+ response = client.describe_replication_groups(ReplicationGroupId=replication_group_id)
+ except is_boto3_error_code('ReplicationGroupNotFoundFault'):
+ return None
+
+ return response['ReplicationGroups'][0]
+
+
+@AWSRetry.exponential_backoff()
+def get_elasticache_tags_with_backoff(client, cluster_id):
+ return client.list_tags_for_resource(ResourceName=cluster_id)['TagList']
+
+
+def get_aws_account_id(module):
+ try:
+ client = module.client('sts')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Can't authorize connection")
+
+ try:
+ return client.get_caller_identity()['Account']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain AWS account id")
+
+
+def get_elasticache_clusters(client, module):
+ region = module.region
+ try:
+ clusters = describe_cache_clusters_with_backoff(client, cluster_id=module.params.get('name'))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain cache cluster info")
+
+ account_id = get_aws_account_id(module)
+ results = []
+ for cluster in clusters:
+
+ cluster = camel_dict_to_snake_dict(cluster)
+ arn = "arn:aws:elasticache:%s:%s:cluster:%s" % (region, account_id, cluster['cache_cluster_id'])
+ try:
+ tags = get_elasticache_tags_with_backoff(client, arn)
+ except is_boto3_error_code("CacheClusterNotFound"):
+ # e.g: Cluster was listed but is in deleting state
+ continue
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get tags for cluster %s")
+
+ cluster['tags'] = boto3_tag_list_to_ansible_dict(tags)
+
+ if cluster.get('replication_group_id', None):
+ try:
+ replication_group = describe_replication_group_with_backoff(client, cluster['replication_group_id'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain replication group info")
+
+ if replication_group is not None:
+ replication_group = camel_dict_to_snake_dict(replication_group)
+ cluster['replication_group'] = replication_group
+
+ results.append(cluster)
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=False),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ client = module.client('elasticache')
+
+ module.exit_json(elasticache_clusters=get_elasticache_clusters(client, module))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/elasticache_parameter_group.py b/ansible_collections/community/aws/plugins/modules/elasticache_parameter_group.py
new file mode 100644
index 000000000..247dd0bab
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/elasticache_parameter_group.py
@@ -0,0 +1,340 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: elasticache_parameter_group
+version_added: 1.0.0
+short_description: Manage cache parameter groups in Amazon ElastiCache.
+description:
+ - Manage cache security groups in Amazon ElastiCache.
+ - Returns information about the specified cache cluster.
+author: "Sloane Hertel (@s-hertel)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+options:
+ group_family:
+ description:
+ - The name of the cache parameter group family that the cache parameter group can be used with.
+ Required when creating a cache parameter group.
+ choices: ['memcached1.4', 'memcached1.5', 'redis2.6', 'redis2.8', 'redis3.2', 'redis4.0', 'redis5.0', 'redis6.x']
+ type: str
+ name:
+ description:
+ - A user-specified name for the cache parameter group.
+ required: true
+ type: str
+ description:
+ description:
+ - A user-specified description for the cache parameter group.
+ type: str
+ default: ''
+ state:
+ description:
+ - Idempotent actions that will create/modify, destroy, or reset a cache parameter group as needed.
+ choices: ['present', 'absent', 'reset']
+ required: true
+ type: str
+ values:
+ description:
+ - A user-specified dictionary of parameters to reset or modify for the cache parameter group.
+ type: dict
+'''
+
+EXAMPLES = """
+# Note: None of these examples set aws_access_key, aws_secret_key, or region.
+# It is assumed that their matching environment variables are set.
+---
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: 'Create a test parameter group'
+ community.aws.elasticache_parameter_group:
+ name: 'test-param-group'
+ group_family: 'redis3.2'
+ description: 'This is a cache parameter group'
+ state: 'present'
+ - name: 'Modify a test parameter group'
+ community.aws.elasticache_parameter_group:
+ name: 'test-param-group'
+ values:
+ activerehashing: yes
+ client-output-buffer-limit-normal-hard-limit: 4
+ state: 'present'
+ - name: 'Reset all modifiable parameters for the test parameter group'
+ community.aws.elasticache_parameter_group:
+ name: 'test-param-group'
+ state: reset
+ - name: 'Delete a test parameter group'
+ community.aws.elasticache_parameter_group:
+ name: 'test-param-group'
+ state: 'absent'
+"""
+
+RETURN = """
+elasticache:
+ description: cache parameter group information and response metadata
+ returned: always
+ type: dict
+ sample:
+ cache_parameter_group:
+ cache_parameter_group_family: redis3.2
+ cache_parameter_group_name: test-please-delete
+ description: "initial description"
+ response_metadata:
+ http_headers:
+ content-length: "562"
+ content-type: text/xml
+ date: "Mon, 06 Feb 2017 22:14:08 GMT"
+ x-amzn-requestid: 947291f9-ecb9-11e6-85bd-3baa4eca2cc1
+ http_status_code: 200
+ request_id: 947291f9-ecb9-11e6-85bd-3baa4eca2cc1
+ retry_attempts: 0
+changed:
+ description: if the cache parameter group has changed
+ returned: always
+ type: bool
+ sample:
+ changed: true
+"""
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.six import string_types
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+def create(module, conn, name, group_family, description):
+ """ Create ElastiCache parameter group. """
+ try:
+ response = conn.create_cache_parameter_group(CacheParameterGroupName=name, CacheParameterGroupFamily=group_family, Description=description)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to create cache parameter group.")
+ return response, changed
+
+
+def delete(module, conn, name):
+ """ Delete ElastiCache parameter group. """
+ try:
+ conn.delete_cache_parameter_group(CacheParameterGroupName=name)
+ response = {}
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to delete cache parameter group.")
+ return response, changed
+
+
+def make_current_modifiable_param_dict(module, conn, name):
+ """ Gets the current state of the cache parameter group and creates a dict with the format: {ParameterName: [Allowed_Values, DataType, ParameterValue]}"""
+ current_info = get_info(conn, name)
+ if current_info is False:
+ module.fail_json(msg="Could not connect to the cache parameter group %s." % name)
+
+ parameters = current_info["Parameters"]
+ modifiable_params = {}
+
+ for param in parameters:
+ if param["IsModifiable"]:
+ modifiable_params[param["ParameterName"]] = [param.get("AllowedValues")]
+ modifiable_params[param["ParameterName"]].append(param["DataType"])
+ modifiable_params[param["ParameterName"]].append(param.get("ParameterValue"))
+ return modifiable_params
+
+
+def check_valid_modification(module, values, modifiable_params):
+ """ Check if the parameters and values in values are valid. """
+ changed_with_update = False
+
+ for parameter in values:
+ new_value = values[parameter]
+
+ # check valid modifiable parameters
+ if parameter not in modifiable_params:
+ module.fail_json(msg="%s is not a modifiable parameter. Valid parameters to modify are: %s." % (parameter, modifiable_params.keys()))
+
+ # check allowed datatype for modified parameters
+ str_to_type = {"integer": int, "string": string_types}
+ expected_type = str_to_type[modifiable_params[parameter][1]]
+ if not isinstance(new_value, expected_type):
+ if expected_type == str:
+ if isinstance(new_value, bool):
+ values[parameter] = "yes" if new_value else "no"
+ else:
+ values[parameter] = to_text(new_value)
+ elif expected_type == int:
+ if isinstance(new_value, bool):
+ values[parameter] = 1 if new_value else 0
+ else:
+ module.fail_json(msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." %
+ (new_value, type(new_value), parameter, modifiable_params[parameter][1]))
+ else:
+ module.fail_json(msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." %
+ (new_value, type(new_value), parameter, modifiable_params[parameter][1]))
+
+ # check allowed values for modifiable parameters
+ choices = modifiable_params[parameter][0]
+ if choices:
+ if not (to_text(new_value) in choices or isinstance(new_value, int)):
+ module.fail_json(msg="%s is not an allowed value for the parameter %s. Valid parameters are: %s." %
+ (new_value, parameter, choices))
+
+ # check if a new value is different from current value
+ if to_text(values[parameter]) != modifiable_params[parameter][2]:
+ changed_with_update = True
+
+ return changed_with_update, values
+
+
+def check_changed_parameter_values(values, old_parameters, new_parameters):
+ """ Checking if the new values are different than the old values. """
+ changed_with_update = False
+
+ # if the user specified parameters to reset, only check those for change
+ if values:
+ for parameter in values:
+ if old_parameters[parameter] != new_parameters[parameter]:
+ changed_with_update = True
+ break
+ # otherwise check all to find a change
+ else:
+ for parameter in old_parameters:
+ if old_parameters[parameter] != new_parameters[parameter]:
+ changed_with_update = True
+ break
+
+ return changed_with_update
+
+
+def modify(module, conn, name, values):
+ """ Modify ElastiCache parameter group to reflect the new information if it differs from the current. """
+ # compares current group parameters with the parameters we've specified to to a value to see if this will change the group
+ format_parameters = []
+ for key in values:
+ value = to_text(values[key])
+ format_parameters.append({'ParameterName': key, 'ParameterValue': value})
+ try:
+ response = conn.modify_cache_parameter_group(CacheParameterGroupName=name, ParameterNameValues=format_parameters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to modify cache parameter group.")
+ return response
+
+
+def reset(module, conn, name, values):
+ """ Reset ElastiCache parameter group if the current information is different from the new information. """
+ # used to compare with the reset parameters' dict to see if there have been changes
+ old_parameters_dict = make_current_modifiable_param_dict(module, conn, name)
+
+ format_parameters = []
+
+ # determine whether to reset all or specific parameters
+ if values:
+ all_parameters = False
+ format_parameters = []
+ for key in values:
+ value = to_text(values[key])
+ format_parameters.append({'ParameterName': key, 'ParameterValue': value})
+ else:
+ all_parameters = True
+
+ try:
+ response = conn.reset_cache_parameter_group(CacheParameterGroupName=name, ParameterNameValues=format_parameters, ResetAllParameters=all_parameters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to reset cache parameter group.")
+
+ # determine changed
+ new_parameters_dict = make_current_modifiable_param_dict(module, conn, name)
+ changed = check_changed_parameter_values(values, old_parameters_dict, new_parameters_dict)
+
+ return response, changed
+
+
+def get_info(conn, name):
+ """ Gets info about the ElastiCache parameter group. Returns false if it doesn't exist or we don't have access. """
+ try:
+ data = conn.describe_cache_parameters(CacheParameterGroupName=name)
+ return data
+ except botocore.exceptions.ClientError as e:
+ return False
+
+
+def main():
+ argument_spec = dict(
+ group_family=dict(type='str', choices=['memcached1.4', 'memcached1.5', 'redis2.6', 'redis2.8', 'redis3.2', 'redis4.0', 'redis5.0', 'redis6.x']),
+ name=dict(required=True, type='str'),
+ description=dict(default='', type='str'),
+ state=dict(required=True, choices=['present', 'absent', 'reset']),
+ values=dict(type='dict'),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ parameter_group_family = module.params.get('group_family')
+ parameter_group_name = module.params.get('name')
+ group_description = module.params.get('description')
+ state = module.params.get('state')
+ values = module.params.get('values')
+
+ try:
+ connection = module.client('elasticache')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ exists = get_info(connection, parameter_group_name)
+
+ # check that the needed requirements are available
+ if state == 'present' and not (exists or parameter_group_family):
+ module.fail_json(msg="Creating a group requires a family group.")
+ elif state == 'reset' and not exists:
+ module.fail_json(msg="No group %s to reset. Please create the group before using the state 'reset'." % parameter_group_name)
+
+ # Taking action
+ changed = False
+ if state == 'present':
+ if exists:
+ # confirm that the group exists without any actions
+ if not values:
+ response = exists
+ changed = False
+ # modify existing group
+ else:
+ modifiable_params = make_current_modifiable_param_dict(module, connection, parameter_group_name)
+ changed, values = check_valid_modification(module, values, modifiable_params)
+ response = modify(module, connection, parameter_group_name, values)
+ # create group
+ else:
+ response, changed = create(module, connection, parameter_group_name, parameter_group_family, group_description)
+ if values:
+ modifiable_params = make_current_modifiable_param_dict(module, connection, parameter_group_name)
+ changed, values = check_valid_modification(module, values, modifiable_params)
+ response = modify(module, connection, parameter_group_name, values)
+ elif state == 'absent':
+ if exists:
+ # delete group
+ response, changed = delete(module, connection, parameter_group_name)
+ else:
+ response = {}
+ changed = False
+ elif state == 'reset':
+ response, changed = reset(module, connection, parameter_group_name, values)
+
+ facts_result = dict(changed=changed, elasticache=camel_dict_to_snake_dict(response))
+
+ module.exit_json(**facts_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/elasticache_snapshot.py b/ansible_collections/community/aws/plugins/modules/elasticache_snapshot.py
new file mode 100644
index 000000000..fa18b80c0
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/elasticache_snapshot.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: elasticache_snapshot
+version_added: 1.0.0
+short_description: Manage cache snapshots in Amazon ElastiCache
+description:
+ - Manage cache snapshots in Amazon ElastiCache.
+ - Returns information about the specified snapshot.
+author: "Sloane Hertel (@s-hertel)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+options:
+ name:
+ description:
+ - The name of the snapshot we want to create, copy, delete.
+ required: true
+ type: str
+ state:
+ description:
+ - Actions that will create, destroy, or copy a snapshot.
+ required: true
+ choices: ['present', 'absent', 'copy']
+ type: str
+ replication_id:
+ description:
+ - The name of the existing replication group to make the snapshot.
+ type: str
+ cluster_id:
+ description:
+ - The name of an existing cache cluster in the replication group to make the snapshot.
+ type: str
+ target:
+ description:
+ - The name of a snapshot copy.
+ type: str
+ bucket:
+ description:
+ - The s3 bucket to which the snapshot is exported.
+ type: str
+'''
+
+EXAMPLES = """
+# Note: None of these examples set aws_access_key, aws_secret_key, or region.
+# It is assumed that their matching environment variables are set.
+
+- name: 'Create a snapshot'
+ community.aws.elasticache_snapshot:
+ name: 'test-snapshot'
+ state: 'present'
+ cluster_id: '{{ cluster }}'
+ replication_id: '{{ replication }}'
+"""
+
+RETURN = """
+response_metadata:
+ description: response metadata about the snapshot
+ returned: always
+ type: dict
+ sample:
+ http_headers:
+ content-length: 1490
+ content-type: text/xml
+ date: 'Tue, 07 Feb 2017 16:43:04 GMT'
+ x-amzn-requestid: 7f436dea-ed54-11e6-a04c-ab2372a1f14d
+ http_status_code: 200
+ request_id: 7f436dea-ed54-11e6-a04c-ab2372a1f14d
+ retry_attempts: 0
+snapshot:
+ description: snapshot data
+ returned: always
+ type: dict
+ sample:
+ auto_minor_version_upgrade: true
+ cache_cluster_create_time: '2017-02-01T17:43:58.261000+00:00'
+ cache_cluster_id: test-please-delete
+ cache_node_type: cache.m1.small
+ cache_parameter_group_name: default.redis3.2
+ cache_subnet_group_name: default
+ engine: redis
+ engine_version: 3.2.4
+ node_snapshots:
+ cache_node_create_time: '2017-02-01T17:43:58.261000+00:00'
+ cache_node_id: 0001
+ cache_size:
+ num_cache_nodes: 1
+ port: 11211
+ preferred_availability_zone: us-east-1d
+ preferred_maintenance_window: wed:03:00-wed:04:00
+ snapshot_name: deletesnapshot
+ snapshot_retention_limit: 0
+ snapshot_source: manual
+ snapshot_status: creating
+ snapshot_window: 10:00-11:00
+ vpc_id: vpc-c248fda4
+changed:
+ description: if a snapshot has been created, deleted, or copied
+ returned: always
+ type: bool
+ sample:
+ changed: true
+"""
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+
+
+def create(module, connection, replication_id, cluster_id, name):
+ """ Create an ElastiCache backup. """
+ try:
+ response = connection.create_snapshot(ReplicationGroupId=replication_id,
+ CacheClusterId=cluster_id,
+ SnapshotName=name)
+ changed = True
+ except is_boto3_error_code('SnapshotAlreadyExistsFault'):
+ response = {}
+ changed = False
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Unable to create the snapshot.")
+ return response, changed
+
+
+def copy(module, connection, name, target, bucket):
+ """ Copy an ElastiCache backup. """
+ try:
+ response = connection.copy_snapshot(SourceSnapshotName=name,
+ TargetSnapshotName=target,
+ TargetBucket=bucket)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to copy the snapshot.")
+ return response, changed
+
+
+def delete(module, connection, name):
+ """ Delete an ElastiCache backup. """
+ try:
+ response = connection.delete_snapshot(SnapshotName=name)
+ changed = True
+ except is_boto3_error_code('SnapshotNotFoundFault'):
+ response = {}
+ changed = False
+ except is_boto3_error_code('InvalidSnapshotState'): # pylint: disable=duplicate-except
+ module.fail_json(msg="Error: InvalidSnapshotState. The snapshot is not in an available state or failed state to allow deletion."
+ "You may need to wait a few minutes.")
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Unable to delete the snapshot.")
+ return response, changed
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True, type='str'),
+ state=dict(required=True, type='str', choices=['present', 'absent', 'copy']),
+ replication_id=dict(type='str'),
+ cluster_id=dict(type='str'),
+ target=dict(type='str'),
+ bucket=dict(type='str'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+ replication_id = module.params.get('replication_id')
+ cluster_id = module.params.get('cluster_id')
+ target = module.params.get('target')
+ bucket = module.params.get('bucket')
+
+ try:
+ connection = module.client('elasticache')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ changed = False
+ response = {}
+
+ if state == 'present':
+ if not all((replication_id, cluster_id)):
+ module.fail_json(msg="The state 'present' requires options: 'replication_id' and 'cluster_id'")
+ response, changed = create(module, connection, replication_id, cluster_id, name)
+ elif state == 'absent':
+ response, changed = delete(module, connection, name)
+ elif state == 'copy':
+ if not all((target, bucket)):
+ module.fail_json(msg="The state 'copy' requires options: 'target' and 'bucket'.")
+ response, changed = copy(module, connection, name, target, bucket)
+
+ facts_result = dict(changed=changed, **camel_dict_to_snake_dict(response))
+
+ module.exit_json(**facts_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/elasticache_subnet_group.py b/ansible_collections/community/aws/plugins/modules/elasticache_subnet_group.py
new file mode 100644
index 000000000..0f5f5e75e
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/elasticache_subnet_group.py
@@ -0,0 +1,256 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: elasticache_subnet_group
+version_added: 1.0.0
+short_description: manage ElastiCache subnet groups
+description:
+ - Creates, modifies, and deletes ElastiCache subnet groups.
+options:
+ state:
+ description:
+ - Specifies whether the subnet should be present or absent.
+ choices: [ 'present' , 'absent' ]
+ default: 'present'
+ type: str
+ name:
+ description:
+ - Database subnet group identifier.
+ - This value is automatically converted to lowercase.
+ required: true
+ type: str
+ description:
+ description:
+ - ElastiCache subnet group description.
+ - When not provided defaults to I(name) on subnet group creation.
+ type: str
+ subnets:
+ description:
+ - List of subnet IDs that make up the ElastiCache subnet group.
+ - At least one subnet must be provided when creating an ElastiCache subnet group.
+ type: list
+ elements: str
+author:
+ - "Tim Mahoney (@timmahoney)"
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+- name: Add or change a subnet group
+ community.aws.elasticache_subnet_group:
+ state: present
+ name: norwegian-blue
+ description: My Fancy Ex Parrot Subnet Group
+ subnets:
+ - subnet-aaaaaaaa
+ - subnet-bbbbbbbb
+
+- name: Remove a subnet group
+ community.aws.elasticache_subnet_group:
+ state: absent
+ name: norwegian-blue
+'''
+
+RETURN = r'''
+cache_subnet_group:
+ description: Description of the Elasticache Subnet Group.
+ returned: always
+ type: dict
+ contains:
+ arn:
+ description: The Amazon Resource Name (ARN) of the cache subnet group.
+ returned: when the subnet group exists
+ type: str
+ sample: arn:aws:elasticache:us-east-1:123456789012:subnetgroup:norwegian-blue
+ description:
+ description: The description of the cache subnet group.
+ returned: when the cache subnet group exists
+ type: str
+ sample: My Fancy Ex Parrot Subnet Group
+ name:
+ description: The name of the cache subnet group.
+ returned: when the cache subnet group exists
+ type: str
+ sample: norwegian-blue
+ vpc_id:
+ description: The VPC ID of the cache subnet group.
+ returned: when the cache subnet group exists
+ type: str
+ sample: norwegian-blue
+ subnet_ids:
+ description: The IDs of the subnets beloging to the cache subnet group.
+ returned: when the cache subnet group exists
+ type: list
+ elements: str
+ sample:
+ - subnet-aaaaaaaa
+ - subnet-bbbbbbbb
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+def get_subnet_group(name):
+ try:
+ groups = client.describe_cache_subnet_groups(
+ aws_retry=True,
+ CacheSubnetGroupName=name,
+ )['CacheSubnetGroups']
+ except is_boto3_error_code('CacheSubnetGroupNotFoundFault'):
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to describe subnet group")
+
+ if not groups:
+ return None
+
+ if len(groups) > 1:
+ module.fail_aws(
+ msg="Found multiple matches for subnet group",
+ cache_subnet_groups=camel_dict_to_snake_dict(groups),
+ )
+
+ subnet_group = camel_dict_to_snake_dict(groups[0])
+
+ subnet_group['name'] = subnet_group['cache_subnet_group_name']
+ subnet_group['description'] = subnet_group['cache_subnet_group_description']
+
+ subnet_ids = list(s['subnet_identifier'] for s in subnet_group['subnets'])
+ subnet_group['subnet_ids'] = subnet_ids
+
+ return subnet_group
+
+
+def create_subnet_group(name, description, subnets):
+
+ if not subnets:
+ module.fail_json(msg='At least one subnet must be provided when creating a subnet group')
+
+ if module.check_mode:
+ return True
+
+ try:
+ if not description:
+ description = name
+ client.create_cache_subnet_group(
+ aws_retry=True,
+ CacheSubnetGroupName=name,
+ CacheSubnetGroupDescription=description,
+ SubnetIds=subnets,
+ )
+ return True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to create subnet group")
+
+
+def update_subnet_group(subnet_group, name, description, subnets):
+ update_params = dict()
+ if description and subnet_group['description'] != description:
+ update_params['CacheSubnetGroupDescription'] = description
+ if subnets:
+ old_subnets = set(subnet_group['subnet_ids'])
+ new_subnets = set(subnets)
+ if old_subnets != new_subnets:
+ update_params['SubnetIds'] = list(subnets)
+
+ if not update_params:
+ return False
+
+ if module.check_mode:
+ return True
+
+ try:
+ client.modify_cache_subnet_group(
+ aws_retry=True,
+ CacheSubnetGroupName=name,
+ **update_params,
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to update subnet group")
+
+ return True
+
+
+def delete_subnet_group(name):
+
+ if module.check_mode:
+ return True
+
+ try:
+ client.delete_cache_subnet_group(
+ aws_retry=True,
+ CacheSubnetGroupName=name,
+ )
+ return True
+ except is_boto3_error_code('CacheSubnetGroupNotFoundFault'):
+ # AWS is "eventually consistent", cope with the race conditions where
+ # deletion hadn't completed when we ran describe
+ return False
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to delete subnet group")
+
+
+def main():
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ name=dict(required=True),
+ description=dict(required=False),
+ subnets=dict(required=False, type='list', elements='str'),
+ )
+
+ global module
+ global client
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ state = module.params.get('state')
+ name = module.params.get('name').lower()
+ description = module.params.get('description')
+ subnets = module.params.get('subnets')
+
+ client = module.client('elasticache', retry_decorator=AWSRetry.jittered_backoff())
+
+ subnet_group = get_subnet_group(name)
+ changed = False
+
+ if state == 'present':
+ if not subnet_group:
+ result = create_subnet_group(name, description, subnets)
+ changed |= result
+ else:
+ result = update_subnet_group(subnet_group, name, description, subnets)
+ changed |= result
+ subnet_group = get_subnet_group(name)
+ else:
+ if subnet_group:
+ result = delete_subnet_group(name)
+ changed |= result
+ subnet_group = None
+
+ module.exit_json(changed=changed, cache_subnet_group=subnet_group)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/elasticbeanstalk_app.py b/ansible_collections/community/aws/plugins/modules/elasticbeanstalk_app.py
new file mode 100644
index 000000000..b5b32c178
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/elasticbeanstalk_app.py
@@ -0,0 +1,224 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: elasticbeanstalk_app
+version_added: 1.0.0
+
+short_description: Create, update, and delete an Elastic Beanstalk application
+
+description:
+ - Creates, updates, deletes Elastic Beanstalk applications if I(app_name) is provided.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_elasticbeanstalk_app).
+ The usage did not change.
+
+options:
+ app_name:
+ description:
+ - Name of the Beanstalk application you wish to manage.
+ aliases: [ 'name' ]
+ type: str
+ description:
+ description:
+ - The description of the application.
+ type: str
+ state:
+ description:
+ - Whether to ensure the application is present or absent.
+ default: present
+ choices: ['absent','present']
+ type: str
+ terminate_by_force:
+ description:
+ - When I(terminate_by_force=true), running environments will be terminated before deleting the application.
+ default: false
+ type: bool
+author:
+ - Harpreet Singh (@hsingh)
+ - Stephen Granger (@viper233)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Create or update an application
+- community.aws.elasticbeanstalk_app:
+ app_name: Sample_App
+ description: "Hello World App"
+ state: present
+
+# Delete application
+- community.aws.elasticbeanstalk_app:
+ app_name: Sample_App
+ state: absent
+
+'''
+
+RETURN = '''
+app:
+ description: Beanstalk application.
+ returned: always
+ type: dict
+ sample: {
+ "ApplicationName": "app-name",
+ "ConfigurationTemplates": [],
+ "DateCreated": "2016-12-28T14:50:03.185000+00:00",
+ "DateUpdated": "2016-12-28T14:50:03.185000+00:00",
+ "Description": "description",
+ "Versions": [
+ "1.0.0",
+ "1.0.1"
+ ]
+ }
+output:
+ description: Message indicating what change will occur.
+ returned: in check mode
+ type: str
+ sample: App is up-to-date
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message
+
+
+def describe_app(ebs, app_name, module):
+ apps = list_apps(ebs, app_name, module)
+
+ return None if len(apps) != 1 else apps[0]
+
+
+def list_apps(ebs, app_name, module):
+ try:
+ if app_name is not None:
+ apps = ebs.describe_applications(ApplicationNames=[app_name])
+ else:
+ apps = ebs.describe_applications()
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Could not describe application")
+
+ return apps.get("Applications", [])
+
+
+def check_app(ebs, app, module):
+ app_name = module.params['app_name']
+ description = module.params['description']
+ state = module.params['state']
+ terminate_by_force = module.params['terminate_by_force']
+
+ result = {}
+
+ if state == 'present' and app is None:
+ result = dict(changed=True, output="App would be created")
+ elif state == 'present' and app.get("Description", None) != description:
+ result = dict(changed=True, output="App would be updated", app=app)
+ elif state == 'present' and app.get("Description", None) == description:
+ result = dict(changed=False, output="App is up-to-date", app=app)
+ elif state == 'absent' and app is None:
+ result = dict(changed=False, output="App does not exist", app={})
+ elif state == 'absent' and app is not None:
+ result = dict(changed=True, output="App will be deleted", app=app)
+ elif state == 'absent' and app is not None and terminate_by_force is True:
+ result = dict(changed=True, output="Running environments terminated before the App will be deleted", app=app)
+
+ module.exit_json(**result)
+
+
+def filter_empty(**kwargs):
+ retval = {}
+ for k, v in kwargs.items():
+ if v:
+ retval[k] = v
+ return retval
+
+
+def main():
+ argument_spec = dict(
+ app_name=dict(aliases=['name'], type='str', required=False),
+ description=dict(),
+ state=dict(choices=['present', 'absent'], default='present'),
+ terminate_by_force=dict(type='bool', default=False, required=False)
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ app_name = module.params['app_name']
+ description = module.params['description']
+ state = module.params['state']
+ terminate_by_force = module.params['terminate_by_force']
+
+ if app_name is None:
+ module.fail_json(msg='Module parameter "app_name" is required')
+
+ result = {}
+
+ ebs = module.client('elasticbeanstalk')
+
+ app = describe_app(ebs, app_name, module)
+
+ if module.check_mode:
+ check_app(ebs, app, module)
+ module.fail_json(msg='ASSERTION FAILURE: check_app() should not return control.')
+
+ if state == 'present':
+ if app is None:
+ try:
+ create_app = ebs.create_application(**filter_empty(ApplicationName=app_name,
+ Description=description))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Could not create application")
+
+ app = describe_app(ebs, app_name, module)
+
+ result = dict(changed=True, app=app)
+ else:
+ if app.get("Description", None) != description:
+ try:
+ if not description:
+ ebs.update_application(ApplicationName=app_name)
+ else:
+ ebs.update_application(ApplicationName=app_name, Description=description)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Could not update application")
+
+ app = describe_app(ebs, app_name, module)
+
+ result = dict(changed=True, app=app)
+ else:
+ result = dict(changed=False, app=app)
+
+ else:
+ if app is None:
+ result = dict(changed=False, output='Application not found', app={})
+ else:
+ try:
+ if terminate_by_force:
+ # Running environments will be terminated before deleting the application
+ ebs.delete_application(ApplicationName=app_name, TerminateEnvByForce=terminate_by_force)
+ else:
+ ebs.delete_application(ApplicationName=app_name)
+ changed = True
+ except is_boto3_error_message('It is currently pending deletion'):
+ changed = False
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Cannot terminate app")
+
+ result = dict(changed=changed, app=app)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/elb_classic_lb_info.py b/ansible_collections/community/aws/plugins/modules/elb_classic_lb_info.py
new file mode 100644
index 000000000..4cbeb9589
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/elb_classic_lb_info.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: elb_classic_lb_info
+version_added: 1.0.0
+short_description: Gather information about EC2 Elastic Load Balancers in AWS
+description:
+ - Gather information about EC2 Elastic Load Balancers in AWS
+author:
+ - "Michael Schultz (@mjschultz)"
+ - "Fernando Jose Pando (@nand0p)"
+options:
+ names:
+ description:
+ - List of ELB names to gather information about. Pass this option to gather information about a set of ELBs, otherwise, all ELBs are returned.
+ type: list
+ elements: str
+ default: []
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+# Output format tries to match amazon.aws.ec2_elb_lb module input parameters
+
+# Gather information about all ELBs
+- community.aws.elb_classic_lb_info:
+ register: elb_info
+
+- ansible.builtin.debug:
+ msg: "{{ item.dns_name }}"
+ loop: "{{ elb_info.elbs }}"
+
+# Gather information about a particular ELB
+- community.aws.elb_classic_lb_info:
+ names: frontend-prod-elb
+ register: elb_info
+
+- ansible.builtin.debug:
+ msg: "{{ elb_info.elbs.0.dns_name }}"
+
+# Gather information about a set of ELBs
+- community.aws.elb_classic_lb_info:
+ names:
+ - frontend-prod-elb
+ - backend-prod-elb
+ register: elb_info
+
+- ansible.builtin.debug:
+ msg: "{{ item.dns_name }}"
+ loop: "{{ elb_info.elbs }}"
+
+'''
+
+RETURN = r'''
+elbs:
+ description: a list of load balancers
+ returned: always
+ type: list
+ sample:
+ elbs:
+ - attributes:
+ access_log:
+ enabled: false
+ connection_draining:
+ enabled: true
+ timeout: 300
+ connection_settings:
+ idle_timeout: 60
+ cross_zone_load_balancing:
+ enabled: true
+ availability_zones:
+ - "us-east-1a"
+ - "us-east-1b"
+ - "us-east-1c"
+ - "us-east-1d"
+ - "us-east-1e"
+ backend_server_description: []
+ canonical_hosted_zone_name: test-lb-XXXXXXXXXXXX.us-east-1.elb.amazonaws.com
+ canonical_hosted_zone_name_id: XXXXXXXXXXXXXX
+ created_time: '2017-08-23T18:25:03.280000+00:00'
+ dns_name: test-lb-XXXXXXXXXXXX.us-east-1.elb.amazonaws.com
+ health_check:
+ healthy_threshold: 10
+ interval: 30
+ target: HTTP:80/index.html
+ timeout: 5
+ unhealthy_threshold: 2
+ instances: []
+ instances_inservice: []
+ instances_inservice_count: 0
+ instances_outofservice: []
+ instances_outofservice_count: 0
+ instances_unknownservice: []
+ instances_unknownservice_count: 0
+ listener_descriptions:
+ - listener:
+ instance_port: 80
+ instance_protocol: HTTP
+ load_balancer_port: 80
+ protocol: HTTP
+ policy_names: []
+ load_balancer_name: test-lb
+ policies:
+ app_cookie_stickiness_policies: []
+ lb_cookie_stickiness_policies: []
+ other_policies: []
+ scheme: internet-facing
+ security_groups:
+ - sg-29d13055
+ source_security_group:
+ group_name: default
+ owner_alias: XXXXXXXXXXXX
+ subnets:
+ - subnet-XXXXXXXX
+ - subnet-XXXXXXXX
+ tags: {}
+ vpc_id: vpc-c248fda4
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (
+ AWSRetry,
+ camel_dict_to_snake_dict,
+ boto3_tag_list_to_ansible_dict
+)
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+MAX_AWS_RETRIES = 5
+MAX_AWS_DELAY = 5
+
+
+def list_elbs(connection, load_balancer_names):
+ results = []
+
+ if not load_balancer_names:
+ for lb in get_all_lb(connection):
+ results.append(describe_elb(connection, lb))
+
+ for load_balancer_name in load_balancer_names:
+ lb = get_lb(connection, load_balancer_name)
+ if not lb:
+ continue
+ results.append(describe_elb(connection, lb))
+ return results
+
+
+def describe_elb(connection, lb):
+ description = camel_dict_to_snake_dict(lb)
+ name = lb['LoadBalancerName']
+ instances = lb.get('Instances', [])
+ description['tags'] = get_tags(connection, name)
+ description['instances_inservice'], description['instances_inservice_count'] = lb_instance_health(connection, name, instances, 'InService')
+ description['instances_outofservice'], description['instances_outofservice_count'] = lb_instance_health(connection, name, instances, 'OutOfService')
+ description['instances_unknownservice'], description['instances_unknownservice_count'] = lb_instance_health(connection, name, instances, 'Unknown')
+ description['attributes'] = get_lb_attributes(connection, name)
+ return description
+
+
+@AWSRetry.jittered_backoff()
+def get_all_lb(connection):
+ paginator = connection.get_paginator('describe_load_balancers')
+ return paginator.paginate().build_full_result()['LoadBalancerDescriptions']
+
+
+def get_lb(connection, load_balancer_name):
+ try:
+ return connection.describe_load_balancers(aws_retry=True, LoadBalancerNames=[load_balancer_name])['LoadBalancerDescriptions'][0]
+ except is_boto3_error_code('LoadBalancerNotFound'):
+ return []
+
+
+def get_lb_attributes(connection, load_balancer_name):
+ attributes = connection.describe_load_balancer_attributes(aws_retry=True, LoadBalancerName=load_balancer_name).get('LoadBalancerAttributes', {})
+ return camel_dict_to_snake_dict(attributes)
+
+
+def get_tags(connection, load_balancer_name):
+ tags = connection.describe_tags(aws_retry=True, LoadBalancerNames=[load_balancer_name])['TagDescriptions']
+ if not tags:
+ return {}
+ return boto3_tag_list_to_ansible_dict(tags[0]['Tags'])
+
+
+def lb_instance_health(connection, load_balancer_name, instances, state):
+ instance_states = connection.describe_instance_health(LoadBalancerName=load_balancer_name, Instances=instances).get('InstanceStates', [])
+ instate = [instance['InstanceId'] for instance in instance_states if instance['State'] == state]
+ return instate, len(instate)
+
+
+def main():
+ argument_spec = dict(
+ names=dict(default=[], type='list', elements='str')
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ connection = module.client('elb', retry_decorator=AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES, delay=MAX_AWS_DELAY))
+
+ try:
+ elbs = list_elbs(connection, module.params.get('names'))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get load balancer information.")
+
+ module.exit_json(elbs=elbs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/elb_instance.py b/ansible_collections/community/aws/plugins/modules/elb_instance.py
new file mode 100644
index 000000000..ecea32a63
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/elb_instance.py
@@ -0,0 +1,399 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: elb_instance
+version_added: 1.0.0
+short_description: De-registers or registers instances from EC2 ELBs
+description:
+ - This module de-registers or registers an AWS EC2 instance from the ELBs
+ that it belongs to.
+ - Will be marked changed when called only if there are ELBs found to operate on.
+author: "John Jarvis (@jarv)"
+options:
+ state:
+ description:
+ - Register or deregister the instance.
+ required: true
+ choices: ['present', 'absent']
+ type: str
+ instance_id:
+ description:
+ - EC2 Instance ID.
+ required: true
+ type: str
+ ec2_elbs:
+ description:
+ - List of ELB names
+ - Required when I(state=present).
+ type: list
+ elements: str
+ enable_availability_zone:
+ description:
+ - Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already
+ been enabled.
+ - If I(enable_availability_zone=no), the task will fail if the availability zone is not enabled on the ELB.
+ type: bool
+ default: true
+ wait:
+ description:
+ - Wait for instance registration or deregistration to complete successfully before returning.
+ type: bool
+ default: true
+ wait_timeout:
+ description:
+ - Number of seconds to wait for an instance to change state.
+ - If I(wait_timeout=0) then this module may return an error if a transient error occurs.
+ - If non-zero then any transient errors are ignored until the timeout is reached.
+ - Ignored when I(wait=no).
+ default: 0
+ type: int
+notes:
+- The ec2_elbs fact previously set by this module was deprecated in release 2.1.0 and since release
+ 4.0.0 is no longer set.
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = r"""
+# basic pre_task and post_task example
+pre_tasks:
+ - name: Instance De-register
+ community.aws.elb_instance:
+ instance_id: "{{ ansible_ec2_instance_id }}"
+ state: absent
+ register: deregister_instances
+ delegate_to: localhost
+roles:
+ - myrole
+post_tasks:
+ - name: Instance Register
+ community.aws.elb_instance:
+ instance_id: "{{ ansible_ec2_instance_id }}"
+ ec2_elbs: "{{ deregister_instances.updated_elbs }}"
+ state: present
+ delegate_to: localhost
+"""
+
+RETURN = '''
+updated_elbs:
+ description: A list of ELB names that the instance has been added to or removed from.
+ returned: always
+ type: list
+ elements: str
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+class ElbManager:
+ """Handles EC2 instance ELB registration and de-registration"""
+
+ def __init__(self, module, instance_id=None, ec2_elbs=None):
+ retry_decorator = AWSRetry.jittered_backoff()
+ self.module = module
+ self.client_asg = module.client('autoscaling', retry_decorator=retry_decorator)
+ self.client_ec2 = module.client('ec2', retry_decorator=retry_decorator)
+ self.client_elb = module.client('elb', retry_decorator=retry_decorator)
+ self.instance_id = instance_id
+ self.lbs = self._get_instance_lbs(ec2_elbs)
+ self.changed = False
+ self.updated_elbs = set()
+
+ def deregister(self, wait, timeout):
+ """De-register the instance from all ELBs and wait for the ELB
+ to report it out-of-service"""
+
+ for lb in self.lbs:
+ instance_ids = [i['InstanceId'] for i in lb['Instances']]
+ if self.instance_id not in instance_ids:
+ continue
+
+ self.updated_elbs.add(lb['LoadBalancerName'])
+
+ if self.module.check_mode:
+ self.changed = True
+ continue
+
+ try:
+ self.client_elb.deregister_instances_from_load_balancer(
+ aws_retry=True,
+ LoadBalancerName=lb['LoadBalancerName'],
+ Instances=[{"InstanceId": self.instance_id}],
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, 'Failed to deregister instance from load balancer',
+ load_balancer=lb, instance=self.instance_id)
+
+ # The ELB is changing state in some way. Either an instance that's
+ # InService is moving to OutOfService, or an instance that's
+ # already OutOfService is being deregistered.
+ self.changed = True
+
+ if wait:
+ for lb in self.lbs:
+ self._await_elb_instance_state(lb, 'Deregistered', timeout)
+
+ def register(self, wait, enable_availability_zone, timeout):
+ """Register the instance for all ELBs and wait for the ELB
+ to report the instance in-service"""
+ for lb in self.lbs:
+ instance_ids = [i['InstanceId'] for i in lb['Instances']]
+ if self.instance_id in instance_ids:
+ continue
+
+ self.updated_elbs.add(lb['LoadBalancerName'])
+
+ if enable_availability_zone:
+ self.changed |= self._enable_availailability_zone(lb)
+
+ if self.module.check_mode:
+ self.changed = True
+ continue
+
+ try:
+ self.client_elb.register_instances_with_load_balancer(
+ aws_retry=True,
+ LoadBalancerName=lb['LoadBalancerName'],
+ Instances=[{"InstanceId": self.instance_id}],
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, 'Failed to register instance with load balancer',
+ load_balancer=lb, instance=self.instance_id)
+
+ self.changed = True
+
+ if wait:
+ for lb in self.lbs:
+ self._await_elb_instance_state(lb, 'InService', timeout)
+
+ @AWSRetry.jittered_backoff()
+ def _describe_elbs(self, **params):
+ paginator = self.client_elb.get_paginator('describe_load_balancers')
+ results = paginator.paginate(**params).build_full_result()
+ return results['LoadBalancerDescriptions']
+
+ def exists(self, lbtest):
+ """ Verify that the named ELB actually exists """
+
+ found = False
+ for lb in self.lbs:
+ if lb['LoadBalancerName'] == lbtest:
+ found = True
+ break
+ return found
+
+ def _enable_availailability_zone(self, lb):
+ """Enable the current instance's availability zone in the provided lb.
+ Returns True if the zone was enabled or False if no change was made.
+ lb: load balancer"""
+ instance = self._get_instance()
+ desired_zone = instance['Placement']['AvailabilityZone']
+
+ if desired_zone in lb['AvailabilityZones']:
+ return False
+
+ if self.module.check_mode:
+ return True
+
+ try:
+ self.client_elb.enable_availability_zones_for_load_balancer(
+ aws_retry=True,
+ LoadBalancerName=lb['LoadBalancerName'],
+ AvailabilityZones=[desired_zone],
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, 'Failed to enable AZ on load balancers',
+ load_balancer=lb, zone=desired_zone)
+
+ return True
+
+ def _await_elb_instance_state(self, lb, awaited_state, timeout):
+ """Wait for an ELB to change state"""
+ if self.module.check_mode:
+ return
+
+ initial_state = self._get_instance_health(lb)
+
+ if awaited_state == initial_state:
+ return
+
+ if awaited_state == 'InService':
+ waiter = self.client_elb.get_waiter('instance_in_service')
+ elif awaited_state == 'Deregistered':
+ waiter = self.client_elb.get_waiter('instance_deregistered')
+ elif awaited_state == 'OutOfService':
+ waiter = self.client_elb.get_waiter('instance_deregistered')
+ else:
+ self.module.fail_json(msg='Could not wait for unknown state', awaited_state=awaited_state)
+
+ try:
+ waiter.wait(
+ LoadBalancerName=lb['LoadBalancerName'],
+ Instances=[{"InstanceId": self.instance_id}],
+ WaiterConfig={'Delay': 1, 'MaxAttempts': timeout},
+ )
+ except botocore.exceptions.WaiterError as e:
+ self.module.fail_json_aws(e, msg='Timeout waiting for instance to reach desired state',
+ awaited_state=awaited_state)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Error while waiting for instance to reach desired state',
+ awaited_state=awaited_state)
+
+ return
+
+ def _get_instance_health(self, lb):
+ """
+ Check instance health, should return status object or None under
+ certain error conditions.
+ """
+ try:
+ status = self.client_elb.describe_instance_health(
+ aws_retry=True,
+ LoadBalancerName=lb['LoadBalancerName'],
+ Instances=[{'InstanceId': self.instance_id}],
+ )['InstanceStates']
+ except is_boto3_error_code('InvalidInstance'):
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ self.module.fail_json_aws(e, msg='Failed to get instance health')
+
+ if not status:
+ return None
+
+ return status[0]['State']
+
+ def _get_instance_lbs(self, ec2_elbs=None):
+ """Returns a list of ELBs attached to self.instance_id
+ ec2_elbs: an optional list of elb names that will be used
+ for elb lookup instead of returning what elbs
+ are attached to self.instance_id"""
+
+ list_params = dict()
+ if not ec2_elbs:
+ ec2_elbs = self._get_auto_scaling_group_lbs()
+
+ if ec2_elbs:
+ list_params['LoadBalancerNames'] = ec2_elbs
+
+ try:
+ elbs = self._describe_elbs(**list_params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, 'Failed to describe load balancers')
+
+ if ec2_elbs:
+ return elbs
+
+ # If ec2_elbs wasn't specified, then filter out LBs we're not a member
+ # of.
+ lbs = []
+ for lb in elbs:
+ instance_ids = [i['InstanceId'] for i in lb['Instances']]
+ if self.instance_id in instance_ids:
+ lbs.append(lb)
+
+ return lbs
+
+ def _get_auto_scaling_group_lbs(self):
+ """Returns a list of ELBs associated with self.instance_id
+ indirectly through its auto scaling group membership"""
+
+ try:
+ asg_instances = self.client_asg.describe_auto_scaling_instances(
+ aws_retry=True,
+ InstanceIds=[self.instance_id])['AutoScalingInstances']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Failed to describe ASG Instance')
+
+ if len(asg_instances) > 1:
+ self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.")
+
+ if not asg_instances:
+ # Instance isn't a member of an ASG
+ return []
+
+ asg_name = asg_instances[0]['AutoScalingGroupName']
+
+ try:
+ asg_instances = self.client_asg.describe_auto_scaling_groups(
+ aws_retry=True,
+ AutoScalingGroupNames=[asg_name])['AutoScalingGroups']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Failed to describe ASG Instance')
+
+ if len(asg_instances) != 1:
+ self.module.fail_json(msg="Illegal state, expected one auto scaling group.")
+
+ return asg_instances[0]['LoadBalancerNames']
+
+ def _get_instance(self):
+ """Returns the description of an instance"""
+ try:
+ result = self.client_ec2.describe_instances(
+ aws_retry=True,
+ InstanceIds=[self.instance_id])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Failed to describe ASG Instance')
+ return result['Reservations'][0]['Instances'][0]
+
+
+def main():
+ argument_spec = dict(
+ state={'required': True, 'choices': ['present', 'absent']},
+ instance_id={'required': True},
+ ec2_elbs={'default': None, 'required': False, 'type': 'list', 'elements': 'str'},
+ enable_availability_zone={'default': True, 'required': False, 'type': 'bool'},
+ wait={'required': False, 'default': True, 'type': 'bool'},
+ wait_timeout={'required': False, 'default': 0, 'type': 'int'},
+ )
+ required_if = [
+ ('state', 'present', ['ec2_elbs']),
+ ]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True,
+ )
+
+ ec2_elbs = module.params['ec2_elbs']
+ wait = module.params['wait']
+ enable_availability_zone = module.params['enable_availability_zone']
+ timeout = module.params['wait_timeout']
+ instance_id = module.params['instance_id']
+
+ elb_man = ElbManager(module, instance_id, ec2_elbs)
+
+ if ec2_elbs is not None:
+ for elb in ec2_elbs:
+ if not elb_man.exists(elb):
+ module.fail_json(msg="ELB {0} does not exist".format(elb))
+
+ if module.params['state'] == 'present':
+ elb_man.register(wait, enable_availability_zone, timeout)
+ elif module.params['state'] == 'absent':
+ elb_man.deregister(wait, timeout)
+
+ module.exit_json(
+ changed=elb_man.changed,
+ updated_elbs=list(elb_man.updated_elbs),
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/elb_network_lb.py b/ansible_collections/community/aws/plugins/modules/elb_network_lb.py
new file mode 100644
index 000000000..6dcdfd209
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/elb_network_lb.py
@@ -0,0 +1,496 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Rob White (@wimnat)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: elb_network_lb
+version_added: 1.0.0
+short_description: Manage a Network Load Balancer
+description:
+ - Manage an AWS Network Elastic Load Balancer. See
+ U(https://aws.amazon.com/blogs/aws/new-network-load-balancer-effortless-scaling-to-millions-of-requests-per-second/) for details.
+author:
+ - "Rob White (@wimnat)"
+options:
+ cross_zone_load_balancing:
+ description:
+ - Indicates whether cross-zone load balancing is enabled.
+ - Defaults to C(false).
+ type: bool
+ deletion_protection:
+ description:
+ - Indicates whether deletion protection for the ELB is enabled.
+ - Defaults to C(false).
+ type: bool
+ listeners:
+ description:
+ - A list of dicts containing listeners to attach to the ELB. See examples for detail of the dict required. Note that listener keys
+ are CamelCased.
+ type: list
+ elements: dict
+ suboptions:
+ Port:
+ description: The port on which the load balancer is listening.
+ type: int
+ required: true
+ Protocol:
+ description: The protocol for connections from clients to the load balancer.
+ type: str
+ required: true
+ Certificates:
+ description: The SSL server certificate.
+ type: list
+ elements: dict
+ suboptions:
+ CertificateArn:
+ description: The Amazon Resource Name (ARN) of the certificate.
+ type: str
+ SslPolicy:
+ description: The security policy that defines which ciphers and protocols are supported.
+ type: str
+ DefaultActions:
+ description: The default actions for the listener.
+ required: true
+ type: list
+ elements: dict
+ suboptions:
+ Type:
+ description: The type of action.
+ type: str
+ TargetGroupArn:
+ description:
+ - The Amazon Resource Name (ARN) of the target group.
+ - Mutually exclusive with I(TargetGroupName).
+ type: str
+ TargetGroupName:
+ description:
+ - The name of the target group.
+ - Mutually exclusive with I(TargetGroupArn).
+ name:
+ description:
+ - The name of the load balancer. This name must be unique within your AWS account, can have a maximum of 32 characters, must contain only alphanumeric
+ characters or hyphens, and must not begin or end with a hyphen.
+ required: true
+ type: str
+ purge_listeners:
+ description:
+ - If I(purge_listeners=true), existing listeners will be purged from the ELB to match exactly what is defined by I(listeners) parameter.
+ - If the I(listeners) parameter is not set then listeners will not be modified.
+ default: true
+ type: bool
+ subnet_mappings:
+ description:
+ - A list of dicts containing the IDs of the subnets to attach to the load balancer. You can also specify the allocation ID of an Elastic IP
+ to attach to the load balancer or the internal IP address for an internal load balancer. You can specify one Elastic IP address or internal
+ address per subnet.
+ - This parameter is mutually exclusive with I(subnets).
+ type: list
+ elements: dict
+ subnets:
+ description:
+ - A list of the IDs of the subnets to attach to the load balancer. You can specify only one subnet per Availability Zone. You must specify subnets from
+ at least two Availability Zones.
+ - Required when I(state=present).
+ - This parameter is mutually exclusive with I(subnet_mappings).
+ type: list
+ elements: str
+ scheme:
+ description:
+ - Internet-facing or internal load balancer. An ELB scheme can not be modified after creation.
+ default: internet-facing
+ choices: [ 'internet-facing', 'internal' ]
+ type: str
+ state:
+ description:
+ - Create or destroy the load balancer.
+ - The default changed from C('absent') to C('present') in release 4.0.0.
+ choices: [ 'present', 'absent' ]
+ type: str
+ default: 'present'
+ wait:
+ description:
+ - Whether or not to wait for the network load balancer to reach the desired state.
+ type: bool
+ wait_timeout:
+ description:
+ - The duration in seconds to wait, used in conjunction with I(wait).
+ type: int
+ ip_address_type:
+ description:
+ - Sets the type of IP addresses used by the subnets of the specified Application Load Balancer.
+ choices: [ 'ipv4', 'dualstack' ]
+ type: str
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+ - amazon.aws.tags
+notes:
+ - Listeners are matched based on port. If a listener's port is changed then a new listener will be created.
+ - Listener rules are matched based on priority. If a rule's priority is changed then a new rule will be created.
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create an ELB and attach a listener
+ community.aws.elb_network_lb:
+ name: myelb
+ subnets:
+ - subnet-012345678
+ - subnet-abcdef000
+ listeners:
+ - Protocol: TCP # Required. The protocol for connections from clients to the load balancer (TCP, TLS, UDP or TCP_UDP) (case-sensitive).
+ Port: 80 # Required. The port on which the load balancer is listening.
+ DefaultActions:
+ - Type: forward # Required. Only 'forward' is accepted at this time
+ TargetGroupName: mytargetgroup # Required. The name of the target group
+ state: present
+
+- name: Create an ELB with an attached Elastic IP address
+ community.aws.elb_network_lb:
+ name: myelb
+ subnet_mappings:
+ - SubnetId: subnet-012345678
+ AllocationId: eipalloc-aabbccdd
+ listeners:
+ - Protocol: TCP # Required. The protocol for connections from clients to the load balancer (TCP, TLS, UDP or TCP_UDP) (case-sensitive).
+ Port: 80 # Required. The port on which the load balancer is listening.
+ DefaultActions:
+ - Type: forward # Required. Only 'forward' is accepted at this time
+ TargetGroupName: mytargetgroup # Required. The name of the target group
+ state: present
+
+- name: Create an internal ELB with a specified IP address
+ community.aws.elb_network_lb:
+ name: myelb
+ scheme: internal
+ subnet_mappings:
+ - SubnetId: subnet-012345678
+ PrivateIPv4Address: 192.168.0.1 # Must be an address from within the CIDR of the subnet.
+ listeners:
+ - Protocol: TCP # Required. The protocol for connections from clients to the load balancer (TCP, TLS, UDP or TCP_UDP) (case-sensitive).
+ Port: 80 # Required. The port on which the load balancer is listening.
+ DefaultActions:
+ - Type: forward # Required. Only 'forward' is accepted at this time
+ TargetGroupName: mytargetgroup # Required. The name of the target group
+ state: present
+
+- name: Remove an ELB
+ community.aws.elb_network_lb:
+ name: myelb
+ state: absent
+
+'''
+
+RETURN = r'''
+load_balancer:
+ description: A representation of the Network Load Balancer
+ returned: when state is present
+ type: dict
+ version_added: 5.0.0
+ contains:
+ availability_zones:
+ description: The Availability Zones for the load balancer.
+ returned: when state is present
+ type: list
+ sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a', 'load_balancer_addresses': []}]"
+ canonical_hosted_zone_id:
+ description: The ID of the Amazon Route 53 hosted zone associated with the load balancer.
+ returned: when state is present
+ type: str
+ sample: ABCDEF12345678
+ created_time:
+ description: The date and time the load balancer was created.
+ returned: when state is present
+ type: str
+ sample: "2015-02-12T02:14:02+00:00"
+ deletion_protection_enabled:
+ description: Indicates whether deletion protection is enabled.
+ returned: when state is present
+ type: str
+ sample: true
+ dns_name:
+ description: The public DNS name of the load balancer.
+ returned: when state is present
+ type: str
+ sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com
+ idle_timeout_timeout_seconds:
+ description: The idle timeout value, in seconds.
+ returned: when state is present
+ type: str
+ sample: 60
+ ip_address_type:
+ description: The type of IP addresses used by the subnets for the load balancer.
+ returned: when state is present
+ type: str
+ sample: ipv4
+ listeners:
+ description: Information about the listeners.
+ returned: when state is present
+ type: complex
+ contains:
+ listener_arn:
+ description: The Amazon Resource Name (ARN) of the listener.
+ returned: when state is present
+ type: str
+ sample: ""
+ load_balancer_arn:
+ description: The Amazon Resource Name (ARN) of the load balancer.
+ returned: when state is present
+ type: str
+ sample: ""
+ port:
+ description: The port on which the load balancer is listening.
+ returned: when state is present
+ type: int
+ sample: 80
+ protocol:
+ description: The protocol for connections from clients to the load balancer.
+ returned: when state is present
+ type: str
+ sample: HTTPS
+ certificates:
+ description: The SSL server certificate.
+ returned: when state is present
+ type: complex
+ contains:
+ certificate_arn:
+ description: The Amazon Resource Name (ARN) of the certificate.
+ returned: when state is present
+ type: str
+ sample: ""
+ ssl_policy:
+ description: The security policy that defines which ciphers and protocols are supported.
+ returned: when state is present
+ type: str
+ sample: ""
+ default_actions:
+ description: The default actions for the listener.
+ returned: when state is present
+ type: str
+ contains:
+ type:
+ description: The type of action.
+ returned: when state is present
+ type: str
+ sample: ""
+ target_group_arn:
+ description: The Amazon Resource Name (ARN) of the target group.
+ returned: when state is present
+ type: str
+ sample: ""
+ load_balancer_arn:
+ description: The Amazon Resource Name (ARN) of the load balancer.
+ returned: when state is present
+ type: str
+ sample: arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:loadbalancer/app/my-elb/001122334455
+ load_balancer_name:
+ description: The name of the load balancer.
+ returned: when state is present
+ type: str
+ sample: my-elb
+ load_balancing_cross_zone_enabled:
+ description: Indicates whether cross-zone load balancing is enabled.
+ returned: when state is present
+ type: str
+ sample: true
+ scheme:
+ description: Internet-facing or internal load balancer.
+ returned: when state is present
+ type: str
+ sample: internal
+ state:
+ description: The state of the load balancer.
+ returned: when state is present
+ type: dict
+ sample: "{'code': 'active'}"
+ tags:
+ description: The tags attached to the load balancer.
+ returned: when state is present
+ type: dict
+ sample: "{
+ 'Tag': 'Example'
+ }"
+ type:
+ description: The type of load balancer.
+ returned: when state is present
+ type: str
+ sample: network
+ vpc_id:
+ description: The ID of the VPC for the load balancer.
+ returned: when state is present
+ type: str
+ sample: vpc-0011223344
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import NetworkLoadBalancer, ELBListeners, ELBListener
+
+
+def create_or_update_elb(elb_obj):
+ """Create ELB or modify main attributes. json_exit here"""
+ if elb_obj.elb:
+ # ELB exists so check subnets, security groups and tags match what has been passed
+
+ # Subnets
+ if not elb_obj.compare_subnets():
+ elb_obj.modify_subnets()
+
+ # Tags - only need to play with tags if tags parameter has been set to something
+ if elb_obj.tags is not None:
+
+ # Delete necessary tags
+ tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(elb_obj.elb['tags']),
+ boto3_tag_list_to_ansible_dict(elb_obj.tags), elb_obj.purge_tags)
+ if tags_to_delete:
+ elb_obj.delete_tags(tags_to_delete)
+
+ # Add/update tags
+ if tags_need_modify:
+ elb_obj.modify_tags()
+
+ else:
+ # Create load balancer
+ elb_obj.create_elb()
+
+ # ELB attributes
+ elb_obj.update_elb_attributes()
+ elb_obj.modify_elb_attributes()
+
+ # Listeners
+ listeners_obj = ELBListeners(elb_obj.connection, elb_obj.module, elb_obj.elb['LoadBalancerArn'])
+
+ listeners_to_add, listeners_to_modify, listeners_to_delete = listeners_obj.compare_listeners()
+
+ # Delete listeners
+ for listener_to_delete in listeners_to_delete:
+ listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_delete, elb_obj.elb['LoadBalancerArn'])
+ listener_obj.delete()
+ listeners_obj.changed = True
+
+ # Add listeners
+ for listener_to_add in listeners_to_add:
+ listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_add, elb_obj.elb['LoadBalancerArn'])
+ listener_obj.add()
+ listeners_obj.changed = True
+
+ # Modify listeners
+ for listener_to_modify in listeners_to_modify:
+ listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_modify, elb_obj.elb['LoadBalancerArn'])
+ listener_obj.modify()
+ listeners_obj.changed = True
+
+ # If listeners changed, mark ELB as changed
+ if listeners_obj.changed:
+ elb_obj.changed = True
+
+ # Update ELB ip address type only if option has been provided
+ if elb_obj.module.params.get('ip_address_type') is not None:
+ elb_obj.modify_ip_address_type(elb_obj.module.params.get('ip_address_type'))
+
+ # Update the objects to pickup changes
+ # Get the ELB again
+ elb_obj.update()
+ # Get the ELB listeners again
+ listeners_obj.update()
+ # Update the ELB attributes
+ elb_obj.update_elb_attributes()
+
+ # Convert to snake_case and merge in everything we want to return to the user
+ snaked_elb = camel_dict_to_snake_dict(elb_obj.elb)
+ snaked_elb.update(camel_dict_to_snake_dict(elb_obj.elb_attributes))
+ snaked_elb['listeners'] = []
+ for listener in listeners_obj.current_listeners:
+ snaked_elb['listeners'].append(camel_dict_to_snake_dict(listener))
+
+ # Change tags to ansible friendly dict
+ snaked_elb['tags'] = boto3_tag_list_to_ansible_dict(snaked_elb['tags'])
+
+ # ip address type
+ snaked_elb['ip_address_type'] = elb_obj.get_elb_ip_address_type()
+
+ elb_obj.module.exit_json(
+ changed=elb_obj.changed,
+ load_balancer=snaked_elb,
+ **snaked_elb)
+
+
+def delete_elb(elb_obj):
+
+ if elb_obj.elb:
+ elb_obj.delete()
+
+ elb_obj.module.exit_json(changed=elb_obj.changed)
+
+
+def main():
+
+ argument_spec = (
+ dict(
+ cross_zone_load_balancing=dict(type='bool'),
+ deletion_protection=dict(type='bool'),
+ listeners=dict(type='list',
+ elements='dict',
+ options=dict(
+ Protocol=dict(type='str', required=True),
+ Port=dict(type='int', required=True),
+ SslPolicy=dict(type='str'),
+ Certificates=dict(type='list', elements='dict'),
+ DefaultActions=dict(type='list', required=True, elements='dict')
+ )
+ ),
+ name=dict(required=True, type='str'),
+ purge_listeners=dict(default=True, type='bool'),
+ purge_tags=dict(default=True, type='bool'),
+ subnets=dict(type='list', elements='str'),
+ subnet_mappings=dict(type='list', elements='dict'),
+ scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']),
+ state=dict(choices=['present', 'absent'], type='str', default='present'),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ wait_timeout=dict(type='int'),
+ wait=dict(type='bool'),
+ ip_address_type=dict(type='str', choices=['ipv4', 'dualstack'])
+ )
+ )
+
+ required_if = [
+ ('state', 'present', ('subnets', 'subnet_mappings',), True)
+ ]
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=required_if,
+ mutually_exclusive=[['subnets', 'subnet_mappings']])
+
+ # Check for subnets or subnet_mappings if state is present
+ state = module.params.get("state")
+
+ # Quick check of listeners parameters
+ listeners = module.params.get("listeners")
+ if listeners is not None:
+ for listener in listeners:
+ for key in listener.keys():
+ protocols_list = ['TCP', 'TLS', 'UDP', 'TCP_UDP']
+ if key == 'Protocol' and listener[key] not in protocols_list:
+ module.fail_json(msg="'Protocol' must be either " + ", ".join(protocols_list))
+
+ connection = module.client('elbv2')
+ connection_ec2 = module.client('ec2')
+
+ elb = NetworkLoadBalancer(connection, connection_ec2, module)
+
+ if state == 'present':
+ create_or_update_elb(elb)
+ else:
+ delete_elb(elb)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/elb_target.py b/ansible_collections/community/aws/plugins/modules/elb_target.py
new file mode 100644
index 000000000..cff46a62a
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/elb_target.py
@@ -0,0 +1,334 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: elb_target
+version_added: 1.0.0
+short_description: Manage a target in a target group
+description:
+ - Used to register or deregister a target in a target group.
+author: "Rob White (@wimnat)"
+options:
+ deregister_unused:
+ description:
+ - The default behaviour for targets that are unused is to leave them registered.
+ - If instead you would like to remove them set I(deregister_unused=true).
+ default: false
+ type: bool
+ target_az:
+ description:
+ - An Availability Zone or C(all). This determines whether the target receives traffic from the load balancer nodes in the specified
+ Availability Zone or from all enabled Availability Zones for the load balancer. This parameter is not supported if the target
+ type of the target group is instance.
+ type: str
+ target_group_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the target group.
+ - Mutually exclusive of I(target_group_name).
+ type: str
+ target_group_name:
+ description:
+ - The name of the target group.
+ - Mutually exclusive of I(target_group_arn).
+ type: str
+ target_id:
+ description:
+ - The ID of the target.
+ required: true
+ type: str
+ target_port:
+ description:
+ - The port on which the target is listening. You can specify a port override. If a target is already registered,
+ you can register it again using a different port.
+ - The default port for a target is the port for the target group.
+ required: false
+ type: int
+ target_status:
+ description:
+ - Blocks and waits for the target status to equal given value. For more detail on target status see
+ U(https://docs.aws.amazon.com/elasticloadbalancing/latest/application/target-group-health-checks.html#target-health-states)
+ required: false
+ choices: [ 'initial', 'healthy', 'unhealthy', 'unused', 'draining', 'unavailable' ]
+ type: str
+ target_status_timeout:
+ description:
+ - Maximum time in seconds to wait for I(target_status) change.
+ required: false
+ default: 60
+ type: int
+ state:
+ description:
+ - Register or deregister the target.
+ required: true
+ choices: [ 'present', 'absent' ]
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+notes:
+ - If you specified a port override when you registered a target, you must specify both the target ID and the port when you deregister it.
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Register an IP address target to a target group
+ community.aws.elb_target:
+ target_group_name: myiptargetgroup
+ target_id: i-1234567
+ state: present
+
+- name: Register an instance target to a target group
+ community.aws.elb_target:
+ target_group_name: mytargetgroup
+ target_id: i-1234567
+ state: present
+
+- name: Deregister a target from a target group
+ community.aws.elb_target:
+ target_group_name: mytargetgroup
+ target_id: i-1234567
+ state: absent
+
+# Modify a target to use a different port
+- name: Register a target to a target group
+ community.aws.elb_target:
+ target_group_name: mytargetgroup
+ target_id: i-1234567
+ target_port: 8080
+ state: present
+
+'''
+
+RETURN = '''
+
+'''
+
+from time import time, sleep
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=['TargetGroupNotFound'])
+def describe_target_groups_with_backoff(connection, tg_name):
+ return connection.describe_target_groups(Names=[tg_name])
+
+
+def convert_tg_name_to_arn(connection, module, tg_name):
+
+ try:
+ response = describe_target_groups_with_backoff(connection, tg_name)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to describe target group {0}".format(tg_name))
+
+ tg_arn = response['TargetGroups'][0]['TargetGroupArn']
+
+ return tg_arn
+
+
+@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=['TargetGroupNotFound'])
+def describe_targets_with_backoff(connection, tg_arn, target):
+ if target is None:
+ tg = []
+ else:
+ tg = [target]
+
+ return connection.describe_target_health(TargetGroupArn=tg_arn, Targets=tg)
+
+
+def describe_targets(connection, module, tg_arn, target=None):
+
+ """
+ Describe targets in a target group
+
+ :param module: ansible module object
+ :param connection: boto3 connection
+ :param tg_arn: target group arn
+ :param target: dictionary containing target id and port
+ :return:
+ """
+
+ try:
+ targets = describe_targets_with_backoff(connection, tg_arn, target)['TargetHealthDescriptions']
+ if not targets:
+ return {}
+ return targets[0]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to describe target health for target {0}".format(target))
+
+
+@AWSRetry.jittered_backoff(retries=10, delay=10)
+def register_target_with_backoff(connection, target_group_arn, target):
+ connection.register_targets(TargetGroupArn=target_group_arn, Targets=[target])
+
+
+def register_target(connection, module):
+
+ """
+ Registers a target to a target group
+
+ :param module: ansible module object
+ :param connection: boto3 connection
+ :return:
+ """
+
+ target_az = module.params.get("target_az")
+ target_group_arn = module.params.get("target_group_arn")
+ target_id = module.params.get("target_id")
+ target_port = module.params.get("target_port")
+ target_status = module.params.get("target_status")
+ target_status_timeout = module.params.get("target_status_timeout")
+ changed = False
+
+ if not target_group_arn:
+ target_group_arn = convert_tg_name_to_arn(connection, module, module.params.get("target_group_name"))
+
+ target = dict(Id=target_id)
+ if target_az:
+ target['AvailabilityZone'] = target_az
+ if target_port:
+ target['Port'] = target_port
+
+ target_description = describe_targets(connection, module, target_group_arn, target)
+
+ if 'Reason' in target_description['TargetHealth']:
+ if target_description['TargetHealth']['Reason'] == "Target.NotRegistered":
+ try:
+ register_target_with_backoff(connection, target_group_arn, target)
+ changed = True
+ if target_status:
+ target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to deregister target {0}".format(target))
+
+ # Get all targets for the target group
+ target_descriptions = describe_targets(connection, module, target_group_arn)
+
+ module.exit_json(changed=changed, target_health_descriptions=camel_dict_to_snake_dict(target_descriptions), target_group_arn=target_group_arn)
+
+
+@AWSRetry.jittered_backoff(retries=10, delay=10)
+def deregister_target_with_backoff(connection, target_group_arn, target):
+ connection.deregister_targets(TargetGroupArn=target_group_arn, Targets=[target])
+
+
+def deregister_target(connection, module):
+
+ """
+ Deregisters a target to a target group
+
+ :param module: ansible module object
+ :param connection: boto3 connection
+ :return:
+ """
+
+ deregister_unused = module.params.get("deregister_unused")
+ target_group_arn = module.params.get("target_group_arn")
+ target_id = module.params.get("target_id")
+ target_port = module.params.get("target_port")
+ target_status = module.params.get("target_status")
+ target_status_timeout = module.params.get("target_status_timeout")
+ changed = False
+
+ if not target_group_arn:
+ target_group_arn = convert_tg_name_to_arn(connection, module, module.params.get("target_group_name"))
+
+ target = dict(Id=target_id)
+ if target_port:
+ target['Port'] = target_port
+
+ target_description = describe_targets(connection, module, target_group_arn, target)
+ current_target_state = target_description['TargetHealth']['State']
+ current_target_reason = target_description['TargetHealth'].get('Reason')
+
+ needs_deregister = False
+
+ if deregister_unused and current_target_state == 'unused':
+ if current_target_reason != 'Target.NotRegistered':
+ needs_deregister = True
+ elif current_target_state not in ['unused', 'draining']:
+ needs_deregister = True
+
+ if needs_deregister:
+ try:
+ deregister_target_with_backoff(connection, target_group_arn, target)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json(msg="Unable to deregister target {0}".format(target))
+ else:
+ if current_target_reason != 'Target.NotRegistered' and current_target_state != 'draining':
+ module.warn(warning="Your specified target has an 'unused' state but is still registered to the target group. " +
+ "To force deregistration use the 'deregister_unused' option.")
+
+ if target_status:
+ target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout)
+
+ # Get all targets for the target group
+ target_descriptions = describe_targets(connection, module, target_group_arn)
+
+ module.exit_json(changed=changed, target_health_descriptions=camel_dict_to_snake_dict(target_descriptions), target_group_arn=target_group_arn)
+
+
+def target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout):
+ reached_state = False
+ timeout = target_status_timeout + time()
+ while time() < timeout:
+ health_state = describe_targets(connection, module, target_group_arn, target)['TargetHealth']['State']
+ if health_state == target_status:
+ reached_state = True
+ break
+ sleep(1)
+ if not reached_state:
+ module.fail_json(msg='Status check timeout of {0} exceeded, last status was {1}: '.format(target_status_timeout, health_state))
+
+
+def main():
+
+ argument_spec = dict(
+ deregister_unused=dict(type='bool', default=False),
+ target_az=dict(type='str'),
+ target_group_arn=dict(type='str'),
+ target_group_name=dict(type='str'),
+ target_id=dict(type='str', required=True),
+ target_port=dict(type='int'),
+ target_status=dict(choices=['initial', 'healthy', 'unhealthy', 'unused', 'draining', 'unavailable'], type='str'),
+ target_status_timeout=dict(type='int', default=60),
+ state=dict(required=True, choices=['present', 'absent'], type='str'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[['target_group_arn', 'target_group_name']],
+ )
+
+ try:
+ connection = module.client('elbv2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ register_target(connection, module)
+ else:
+ deregister_target(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/elb_target_group.py b/ansible_collections/community/aws/plugins/modules/elb_target_group.py
new file mode 100644
index 000000000..45a6e7ae9
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/elb_target_group.py
@@ -0,0 +1,992 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: elb_target_group
+version_added: 1.0.0
+short_description: Manage a target group for an Application or Network load balancer
+description:
+ - Manage an AWS Elastic Load Balancer target group. See
+ U(https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-target-groups.html) or
+ U(https://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-target-groups.html) for details.
+author:
+ - "Rob White (@wimnat)"
+options:
+ deregistration_delay_timeout:
+ description:
+ - The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused.
+ The range is 0-3600 seconds.
+ type: int
+ deregistration_connection_termination:
+ description:
+ - Indicates whether the load balancer terminates connections at the end of the deregistration timeout.
+ - Using this option is only supported when attaching to a Network Load Balancer (NLB).
+ type: bool
+ default: false
+ required: false
+ version_added: 3.1.0
+ health_check_protocol:
+ description:
+ - The protocol the load balancer uses when performing health checks on targets.
+ required: false
+ choices: [ 'http', 'https', 'tcp', 'tls', 'udp', 'tcp_udp', 'HTTP', 'HTTPS', 'TCP', 'TLS', 'UDP', 'TCP_UDP']
+ type: str
+ health_check_port:
+ description:
+ - The port the load balancer uses when performing health checks on targets.
+ Can be set to 'traffic-port' to match target port.
+ - When not defined will default to the port on which each target receives traffic from the load balancer.
+ required: false
+ type: str
+ health_check_path:
+ description:
+ - The ping path that is the destination on the targets for health checks. The path must be defined in order to set a health check.
+ - Requires the I(health_check_protocol) parameter to be set.
+ required: false
+ type: str
+ health_check_interval:
+ description:
+ - The approximate amount of time, in seconds, between health checks of an individual target.
+ required: false
+ type: int
+ health_check_timeout:
+ description:
+ - The amount of time, in seconds, during which no response from a target means a failed health check.
+ required: false
+ type: int
+ healthy_threshold_count:
+ description:
+ - The number of consecutive health checks successes required before considering an unhealthy target healthy.
+ required: false
+ type: int
+ modify_targets:
+ description:
+ - Whether or not to alter existing targets in the group to match what is passed with the module
+ required: false
+ default: true
+ type: bool
+ name:
+ description:
+ - The name of the target group.
+ required: true
+ type: str
+ port:
+ description:
+ - The port on which the targets receive traffic. This port is used unless you specify a port override when registering the target.
+ - Required when I(state) is C(present) and I(target_type) is C(instance), C(ip), or C(alb).
+ required: false
+ type: int
+ protocol:
+ description:
+ - The protocol to use for routing traffic to the targets.
+ - Required when I(state) is C(present) and I(target_type) is C(instance), C(ip), or C(alb).
+ required: false
+ choices: [ 'http', 'https', 'tcp', 'tls', 'udp', 'tcp_udp', 'HTTP', 'HTTPS', 'TCP', 'TLS', 'UDP', 'TCP_UDP']
+ type: str
+ protocol_version:
+ description:
+ - Specifies protocol version.
+ - The protocol_version parameter is immutable and cannot be changed when updating an elb_target_group.
+ required: false
+ choices: ['GRPC', 'HTTP1', 'HTTP2']
+ type: str
+ version_added: 5.1.0
+ state:
+ description:
+ - Create or destroy the target group.
+ required: true
+ choices: [ 'present', 'absent' ]
+ type: str
+ stickiness_enabled:
+ description:
+ - Indicates whether sticky sessions are enabled.
+ type: bool
+ stickiness_lb_cookie_duration:
+ description:
+ - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load
+ balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds).
+ type: int
+ stickiness_app_cookie_duration:
+ description:
+ - The time period, in seconds, during which requests from a client
+ should be routed to the same target. After this time period expires,
+ the application-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds).
+ type: int
+ version_added: 1.5.0
+ stickiness_app_cookie_name:
+ description:
+ - The name of the application cookie. Required if I(stickiness_type=app_cookie).
+ type: str
+ version_added: 1.5.0
+ stickiness_type:
+ description:
+ - The type of sticky sessions.
+ - Valid values are C(lb_cookie), C(app_cookie) or C(source_ip).
+ - If not set AWS will default to C(lb_cookie) for Application Load Balancers or C(source_ip) for Network Load Balancers.
+ type: str
+ load_balancing_algorithm_type:
+ description:
+ - The type of load balancing algorithm to use.
+ - Changing the load balancing algorithm is only supported when used with Application Load Balancers (ALB).
+ - If not set AWS will default to C(round_robin).
+ choices: ['round_robin', 'least_outstanding_requests']
+ type: str
+ version_added: 3.2.0
+ successful_response_codes:
+ description:
+ - The HTTP codes to use when checking for a successful response from a target.
+ - Accepts multiple values (for example, "200,202") or a range of values (for example, "200-299").
+ - Requires the I(health_check_protocol) parameter to be set.
+ required: false
+ type: str
+ target_type:
+ description:
+ - The type of target that you must specify when registering targets with this target group. The possible values are
+ C(instance) (targets are specified by instance ID), C(ip) (targets are specified by IP address), C(lambda) (target is specified by ARN),
+ or C(alb) (target is specified by ARN).
+ Note that you can't specify targets for a target group using more than one type. Target types lambda and alb only accept one target. When more than
+ one target is specified, only the first one is used. All additional targets are ignored.
+ If the target type is ip, specify IP addresses from the subnets of the virtual private cloud (VPC) for the target
+ group, the RFC 1918 range (10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16), and the RFC 6598 range (100.64.0.0/10).
+ You can't specify publicly routable IP addresses.
+ - The default behavior is C(instance).
+ required: false
+ choices: ['instance', 'ip', 'lambda', 'alb']
+ type: str
+ targets:
+ description:
+ - A list of targets to assign to the target group. This parameter defaults to an empty list. Unless you set the 'modify_targets' parameter then
+ all existing targets will be removed from the group. The list should be an Id and a Port parameter. See the Examples for detail.
+ required: false
+ type: list
+ elements: dict
+ unhealthy_threshold_count:
+ description:
+ - The number of consecutive health check failures required before considering a target unhealthy.
+ required: false
+ type: int
+ vpc_id:
+ description:
+ - The identifier of the virtual private cloud (VPC).
+ - Required when I(state) is C(present) and I(target_type) is C(instance), C(ip), or C(alb).
+ required: false
+ type: str
+ preserve_client_ip_enabled:
+ description:
+ - Indicates whether client IP preservation is enabled.
+ - The default is disabled if the target group type is C(ip) address and the target group protocol is C(tcp) or C(tls).
+ Otherwise, the default is enabled. Client IP preservation cannot be disabled for C(udp) and C(tcp_udp) target groups.
+ - I(preserve_client_ip_enabled) is supported only by Network Load Balancers.
+ type: bool
+ required: false
+ version_added: 2.1.0
+ proxy_protocol_v2_enabled:
+ description:
+ - Indicates whether Proxy Protocol version 2 is enabled.
+ - The value is C(true) or C(false).
+ - I(proxy_protocol_v2_enabled) is supported only by Network Load Balancers.
+ type: bool
+ required: false
+ version_added: 2.1.0
+ wait:
+ description:
+ - Whether or not to wait for the target group.
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - The time to wait for the target group.
+ default: 200
+ type: int
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+ - amazon.aws.tags
+
+notes:
+ - Once a target group has been created, only its health check can then be modified using subsequent calls
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create a target group with a default health check
+ community.aws.elb_target_group:
+ name: mytargetgroup
+ protocol: http
+ port: 80
+ vpc_id: vpc-01234567
+ state: present
+
+- name: Create a target group with protocol_version 'GRPC'
+ community.aws.elb_target_group:
+ name: mytargetgroup
+ protocol: http
+ port: 80
+ vpc_id: vpc-01234567
+ protocol_version: GRPC
+ state: present
+
+- name: Modify the target group with a custom health check
+ community.aws.elb_target_group:
+ name: mytargetgroup
+ protocol: http
+ port: 80
+ vpc_id: vpc-01234567
+ health_check_protocol: http
+ health_check_path: /health_check
+ health_check_port: 80
+ successful_response_codes: 200
+ health_check_interval: 15
+ health_check_timeout: 3
+ healthy_threshold_count: 4
+ unhealthy_threshold_count: 3
+ state: present
+
+- name: Delete a target group
+ community.aws.elb_target_group:
+ name: mytargetgroup
+ state: absent
+
+- name: Create a target group with instance targets
+ community.aws.elb_target_group:
+ name: mytargetgroup
+ protocol: http
+ port: 81
+ vpc_id: vpc-01234567
+ health_check_protocol: http
+ health_check_path: /
+ successful_response_codes: "200,250-260"
+ targets:
+ - Id: i-01234567
+ Port: 80
+ - Id: i-98765432
+ Port: 80
+ state: present
+ wait_timeout: 200
+ wait: True
+
+- name: Create a target group with IP address targets
+ community.aws.elb_target_group:
+ name: mytargetgroup
+ protocol: http
+ port: 81
+ vpc_id: vpc-01234567
+ health_check_protocol: http
+ health_check_path: /
+ successful_response_codes: "200,250-260"
+ target_type: ip
+ targets:
+ - Id: 10.0.0.10
+ Port: 80
+ AvailabilityZone: all
+ - Id: 10.0.0.20
+ Port: 80
+ state: present
+ wait_timeout: 200
+ wait: True
+
+# Using lambda as targets require that the target group
+# itself is allow to invoke the lambda function.
+# therefore you need first to create an empty target group
+# to receive its arn, second, allow the target group
+# to invoke the lambda function and third, add the target
+# to the target group
+- name: first, create empty target group
+ community.aws.elb_target_group:
+ name: my-lambda-targetgroup
+ target_type: lambda
+ state: present
+ modify_targets: False
+ register: out
+
+- name: second, allow invoke of the lambda
+ community.aws.lambda_policy:
+ state: "{{ state | default('present') }}"
+ function_name: my-lambda-function
+ statement_id: someID
+ action: lambda:InvokeFunction
+ principal: elasticloadbalancing.amazonaws.com
+ source_arn: "{{ out.target_group_arn }}"
+
+- name: third, add target
+ community.aws.elb_target_group:
+ name: my-lambda-targetgroup
+ target_type: lambda
+ state: present
+ targets:
+ - Id: arn:aws:lambda:eu-central-1:123456789012:function:my-lambda-function
+
+'''
+
+RETURN = r'''
+deregistration_delay_timeout_seconds:
+ description: The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused.
+ returned: when state present
+ type: int
+ sample: 300
+deregistration_connection_termination:
+ description: Indicates whether the load balancer terminates connections at the end of the deregistration timeout.
+ returned: when state present
+ type: bool
+ sample: True
+health_check_interval_seconds:
+ description: The approximate amount of time, in seconds, between health checks of an individual target.
+ returned: when state present
+ type: int
+ sample: 30
+health_check_path:
+ description: The destination for the health check request.
+ returned: when state present
+ type: str
+ sample: /index.html
+health_check_port:
+ description: The port to use to connect with the target.
+ returned: when state present
+ type: str
+ sample: traffic-port
+health_check_protocol:
+ description: The protocol to use to connect with the target.
+ returned: when state present
+ type: str
+ sample: HTTP
+health_check_timeout_seconds:
+ description: The amount of time, in seconds, during which no response means a failed health check.
+ returned: when state present
+ type: int
+ sample: 5
+healthy_threshold_count:
+ description: The number of consecutive health checks successes required before considering an unhealthy target healthy.
+ returned: when state present
+ type: int
+ sample: 5
+load_balancer_arns:
+ description: The Amazon Resource Names (ARN) of the load balancers that route traffic to this target group.
+ returned: when state present
+ type: list
+ sample: []
+matcher:
+ description: The HTTP codes to use when checking for a successful response from a target.
+ returned: when state present
+ type: dict
+ sample: {
+ "http_code": "200"
+ }
+port:
+ description: The port on which the targets are listening.
+ returned: when state present
+ type: int
+ sample: 80
+protocol:
+ description: The protocol to use for routing traffic to the targets.
+ returned: when state present
+ type: str
+ sample: HTTP
+stickiness_enabled:
+ description: Indicates whether sticky sessions are enabled.
+ returned: when state present
+ type: bool
+ sample: true
+stickiness_lb_cookie_duration_seconds:
+ description: The time period, in seconds, during which requests from a client should be routed to the same target.
+ returned: when state present
+ type: int
+ sample: 86400
+stickiness_type:
+ description: The type of sticky sessions.
+ returned: when state present
+ type: str
+ sample: lb_cookie
+load_balancing_algorithm_type:
+ description: The type load balancing algorithm used.
+ returned: when state present
+ type: str
+ version_added: 3.2.0
+ sample: least_outstanding_requests
+tags:
+ description: The tags attached to the target group.
+ returned: when state present
+ type: dict
+ sample: "{
+ 'Tag': 'Example'
+ }"
+target_group_arn:
+ description: The Amazon Resource Name (ARN) of the target group.
+ returned: when state present
+ type: str
+ sample: "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/mytargetgroup/aabbccddee0044332211"
+target_group_name:
+ description: The name of the target group.
+ returned: when state present
+ type: str
+ sample: mytargetgroup
+unhealthy_threshold_count:
+ description: The number of consecutive health check failures required before considering the target unhealthy.
+ returned: when state present
+ type: int
+ sample: 2
+vpc_id:
+ description: The ID of the VPC for the targets.
+ returned: when state present
+ type: str
+ sample: vpc-0123456
+'''
+
+import time
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+
+
+def get_tg_attributes(connection, module, tg_arn):
+ try:
+ _attributes = connection.describe_target_group_attributes(TargetGroupArn=tg_arn, aws_retry=True)
+ tg_attributes = boto3_tag_list_to_ansible_dict(_attributes['Attributes'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get target group attributes")
+
+ # Replace '.' with '_' in attribute key names to make it more Ansible friendly
+ return dict((k.replace('.', '_'), v) for k, v in tg_attributes.items())
+
+
+def get_target_group_tags(connection, module, target_group_arn):
+ try:
+ _tags = connection.describe_tags(ResourceArns=[target_group_arn], aws_retry=True)
+ return _tags['TagDescriptions'][0]['Tags']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get target group tags")
+
+
+def get_target_group(connection, module, retry_missing=False):
+ extra_codes = ['TargetGroupNotFound'] if retry_missing else []
+ try:
+ target_group_paginator = connection.get_paginator('describe_target_groups').paginate(Names=[module.params.get("name")])
+ jittered_retry = AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=extra_codes)
+ result = jittered_retry(target_group_paginator.build_full_result)()
+ except is_boto3_error_code('TargetGroupNotFound'):
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't get target group")
+
+ return result['TargetGroups'][0]
+
+
+def wait_for_status(connection, module, target_group_arn, targets, status):
+ polling_increment_secs = 5
+ max_retries = (module.params.get('wait_timeout') // polling_increment_secs)
+ status_achieved = False
+
+ for x in range(0, max_retries):
+ try:
+ response = connection.describe_target_health(TargetGroupArn=target_group_arn, Targets=targets, aws_retry=True)
+ if response['TargetHealthDescriptions'][0]['TargetHealth']['State'] == status:
+ status_achieved = True
+ break
+ else:
+ time.sleep(polling_increment_secs)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't describe target health")
+
+ result = response
+ return status_achieved, result
+
+
+def create_or_update_attributes(connection, module, target_group, new_target_group):
+ changed = False
+ target_type = module.params.get("target_type")
+ deregistration_delay_timeout = module.params.get("deregistration_delay_timeout")
+ deregistration_connection_termination = module.params.get("deregistration_connection_termination")
+ stickiness_enabled = module.params.get("stickiness_enabled")
+ stickiness_lb_cookie_duration = module.params.get("stickiness_lb_cookie_duration")
+ stickiness_type = module.params.get("stickiness_type")
+ stickiness_app_cookie_duration = module.params.get("stickiness_app_cookie_duration")
+ stickiness_app_cookie_name = module.params.get("stickiness_app_cookie_name")
+ preserve_client_ip_enabled = module.params.get("preserve_client_ip_enabled")
+ proxy_protocol_v2_enabled = module.params.get("proxy_protocol_v2_enabled")
+ load_balancing_algorithm_type = module.params.get("load_balancing_algorithm_type")
+
+ # Now set target group attributes
+ update_attributes = []
+
+ # Get current attributes
+ current_tg_attributes = get_tg_attributes(connection, module, target_group['TargetGroupArn'])
+
+ if deregistration_delay_timeout is not None:
+ if str(deregistration_delay_timeout) != current_tg_attributes['deregistration_delay_timeout_seconds']:
+ update_attributes.append({'Key': 'deregistration_delay.timeout_seconds', 'Value': str(deregistration_delay_timeout)})
+ if deregistration_connection_termination is not None:
+ if deregistration_connection_termination and current_tg_attributes.get('deregistration_delay_connection_termination_enabled') != "true":
+ update_attributes.append({'Key': 'deregistration_delay.connection_termination.enabled', 'Value': 'true'})
+ if stickiness_enabled is not None:
+ if stickiness_enabled and current_tg_attributes['stickiness_enabled'] != "true":
+ update_attributes.append({'Key': 'stickiness.enabled', 'Value': 'true'})
+ if stickiness_lb_cookie_duration is not None:
+ if str(stickiness_lb_cookie_duration) != current_tg_attributes['stickiness_lb_cookie_duration_seconds']:
+ update_attributes.append({'Key': 'stickiness.lb_cookie.duration_seconds', 'Value': str(stickiness_lb_cookie_duration)})
+ if stickiness_type is not None:
+ if stickiness_type != current_tg_attributes.get('stickiness_type'):
+ update_attributes.append({'Key': 'stickiness.type', 'Value': stickiness_type})
+ if stickiness_app_cookie_name is not None:
+ if stickiness_app_cookie_name != current_tg_attributes.get('stickiness_app_cookie_name'):
+ update_attributes.append({'Key': 'stickiness.app_cookie.cookie_name', 'Value': str(stickiness_app_cookie_name)})
+ if stickiness_app_cookie_duration is not None:
+ if str(stickiness_app_cookie_duration) != current_tg_attributes['stickiness_app_cookie_duration_seconds']:
+ update_attributes.append({'Key': 'stickiness.app_cookie.duration_seconds', 'Value': str(stickiness_app_cookie_duration)})
+ if preserve_client_ip_enabled is not None:
+ if target_type not in ('udp', 'tcp_udp'):
+ if str(preserve_client_ip_enabled).lower() != current_tg_attributes.get('preserve_client_ip_enabled'):
+ update_attributes.append({'Key': 'preserve_client_ip.enabled', 'Value': str(preserve_client_ip_enabled).lower()})
+ if proxy_protocol_v2_enabled is not None:
+ if str(proxy_protocol_v2_enabled).lower() != current_tg_attributes.get('proxy_protocol_v2_enabled'):
+ update_attributes.append({'Key': 'proxy_protocol_v2.enabled', 'Value': str(proxy_protocol_v2_enabled).lower()})
+ if load_balancing_algorithm_type is not None:
+ if str(load_balancing_algorithm_type) != current_tg_attributes['load_balancing_algorithm_type']:
+ update_attributes.append({'Key': 'load_balancing.algorithm.type', 'Value': str(load_balancing_algorithm_type)})
+
+ if update_attributes:
+ try:
+ connection.modify_target_group_attributes(TargetGroupArn=target_group['TargetGroupArn'], Attributes=update_attributes, aws_retry=True)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ # Something went wrong setting attributes. If this target group was created during this task, delete it to leave a consistent state
+ if new_target_group:
+ connection.delete_target_group(TargetGroupArn=target_group['TargetGroupArn'], aws_retry=True)
+ module.fail_json_aws(e, msg="Couldn't delete target group")
+
+ return changed
+
+
+def create_or_update_target_group(connection, module):
+
+ changed = False
+ new_target_group = False
+ params = dict()
+ target_type = module.params.get("target_type")
+ params['Name'] = module.params.get("name")
+ params['TargetType'] = target_type
+ if target_type != "lambda":
+ params['Protocol'] = module.params.get("protocol").upper()
+ if module.params.get('protocol_version') is not None:
+ params['ProtocolVersion'] = module.params.get('protocol_version')
+ params['Port'] = module.params.get("port")
+ params['VpcId'] = module.params.get("vpc_id")
+ tags = module.params.get("tags")
+ purge_tags = module.params.get("purge_tags")
+
+ health_option_keys = [
+ "health_check_path", "health_check_protocol", "health_check_interval", "health_check_timeout",
+ "healthy_threshold_count", "unhealthy_threshold_count", "successful_response_codes"
+ ]
+ health_options = any(module.params[health_option_key] is not None for health_option_key in health_option_keys)
+
+ # Set health check if anything set
+ if health_options:
+
+ if module.params.get("health_check_protocol") is not None:
+ params['HealthCheckProtocol'] = module.params.get("health_check_protocol").upper()
+
+ if module.params.get("health_check_port") is not None:
+ params['HealthCheckPort'] = module.params.get("health_check_port")
+
+ if module.params.get("health_check_interval") is not None:
+ params['HealthCheckIntervalSeconds'] = module.params.get("health_check_interval")
+
+ if module.params.get("health_check_timeout") is not None:
+ params['HealthCheckTimeoutSeconds'] = module.params.get("health_check_timeout")
+
+ if module.params.get("healthy_threshold_count") is not None:
+ params['HealthyThresholdCount'] = module.params.get("healthy_threshold_count")
+
+ if module.params.get("unhealthy_threshold_count") is not None:
+ params['UnhealthyThresholdCount'] = module.params.get("unhealthy_threshold_count")
+
+ # Only need to check response code and path for http(s) health checks
+ protocol = module.params.get("health_check_protocol")
+ if protocol is not None and protocol.upper() in ['HTTP', 'HTTPS']:
+
+ if module.params.get("health_check_path") is not None:
+ params['HealthCheckPath'] = module.params.get("health_check_path")
+
+ if module.params.get("successful_response_codes") is not None:
+ params['Matcher'] = {}
+ code_key = 'HttpCode'
+ protocol_version = module.params.get('protocol_version')
+ if protocol_version is not None and protocol_version.upper() == "GRPC":
+ code_key = 'GrpcCode'
+ params['Matcher'][code_key] = module.params.get("successful_response_codes")
+
+ # Get target group
+ target_group = get_target_group(connection, module)
+
+ if target_group:
+ diffs = [param for param in ('Port', 'Protocol', 'VpcId')
+ if target_group.get(param) != params.get(param)]
+ if diffs:
+ module.fail_json(msg="Cannot modify %s parameter(s) for a target group" %
+ ", ".join(diffs))
+ # Target group exists so check health check parameters match what has been passed
+ health_check_params = dict()
+
+ # Modify health check if anything set
+ if health_options:
+
+ # Health check protocol
+ if 'HealthCheckProtocol' in params and target_group['HealthCheckProtocol'] != params['HealthCheckProtocol']:
+ health_check_params['HealthCheckProtocol'] = params['HealthCheckProtocol']
+
+ # Health check port
+ if 'HealthCheckPort' in params and target_group['HealthCheckPort'] != params['HealthCheckPort']:
+ health_check_params['HealthCheckPort'] = params['HealthCheckPort']
+
+ # Health check interval
+ if 'HealthCheckIntervalSeconds' in params and target_group['HealthCheckIntervalSeconds'] != params['HealthCheckIntervalSeconds']:
+ health_check_params['HealthCheckIntervalSeconds'] = params['HealthCheckIntervalSeconds']
+
+ # Health check timeout
+ if 'HealthCheckTimeoutSeconds' in params and target_group['HealthCheckTimeoutSeconds'] != params['HealthCheckTimeoutSeconds']:
+ health_check_params['HealthCheckTimeoutSeconds'] = params['HealthCheckTimeoutSeconds']
+
+ # Healthy threshold
+ if 'HealthyThresholdCount' in params and target_group['HealthyThresholdCount'] != params['HealthyThresholdCount']:
+ health_check_params['HealthyThresholdCount'] = params['HealthyThresholdCount']
+
+ # Unhealthy threshold
+ if 'UnhealthyThresholdCount' in params and target_group['UnhealthyThresholdCount'] != params['UnhealthyThresholdCount']:
+ health_check_params['UnhealthyThresholdCount'] = params['UnhealthyThresholdCount']
+
+ # Only need to check response code and path for http(s) health checks
+ if target_group['HealthCheckProtocol'] in ['HTTP', 'HTTPS']:
+ # Health check path
+ if 'HealthCheckPath' in params and target_group['HealthCheckPath'] != params['HealthCheckPath']:
+ health_check_params['HealthCheckPath'] = params['HealthCheckPath']
+
+ # Matcher (successful response codes)
+ # TODO: required and here?
+ if "Matcher" in params:
+ code_key = "HttpCode"
+ if target_group.get("ProtocolVersion") == "GRPC":
+ code_key = "GrpcCode"
+ current_matcher_list = target_group["Matcher"][code_key].split(",")
+ requested_matcher_list = params["Matcher"][code_key].split(",")
+ if set(current_matcher_list) != set(requested_matcher_list):
+ health_check_params['Matcher'] = {}
+ health_check_params['Matcher'][code_key] = ','.join(requested_matcher_list)
+
+ try:
+ if health_check_params:
+ connection.modify_target_group(TargetGroupArn=target_group['TargetGroupArn'], aws_retry=True, **health_check_params)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't update target group")
+
+ # Do we need to modify targets?
+ if module.params.get("modify_targets"):
+ # get list of current target instances. I can't see anything like a describe targets in the doco so
+ # describe_target_health seems to be the only way to get them
+ try:
+ current_targets = connection.describe_target_health(
+ TargetGroupArn=target_group['TargetGroupArn'], aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get target group health")
+
+ if module.params.get("targets"):
+
+ if target_type != "lambda":
+ params['Targets'] = module.params.get("targets")
+
+ # Correct type of target ports
+ for target in params['Targets']:
+ target['Port'] = int(target.get('Port', module.params.get('port')))
+
+ current_instance_ids = []
+
+ for instance in current_targets['TargetHealthDescriptions']:
+ current_instance_ids.append(instance['Target']['Id'])
+
+ new_instance_ids = []
+ for instance in params['Targets']:
+ new_instance_ids.append(instance['Id'])
+
+ add_instances = set(new_instance_ids) - set(current_instance_ids)
+
+ if add_instances:
+ instances_to_add = []
+ for target in params["Targets"]:
+ if target["Id"] in add_instances:
+ tmp_item = {"Id": target["Id"], "Port": target["Port"]}
+ if target.get("AvailabilityZone"):
+ tmp_item["AvailabilityZone"] = target["AvailabilityZone"]
+ instances_to_add.append(tmp_item)
+
+ changed = True
+ try:
+ connection.register_targets(TargetGroupArn=target_group['TargetGroupArn'], Targets=instances_to_add, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't register targets")
+
+ if module.params.get("wait"):
+ status_achieved, registered_instances = wait_for_status(
+ connection, module, target_group['TargetGroupArn'], instances_to_add, 'healthy')
+ if not status_achieved:
+ module.fail_json(
+ msg='Error waiting for target registration to be healthy - please check the AWS console')
+
+ remove_instances = set(current_instance_ids) - set(new_instance_ids)
+
+ if remove_instances:
+ instances_to_remove = []
+ for target in current_targets['TargetHealthDescriptions']:
+ if target['Target']['Id'] in remove_instances:
+ instances_to_remove.append({'Id': target['Target']['Id'], 'Port': target['Target']['Port']})
+
+ changed = True
+ try:
+ connection.deregister_targets(TargetGroupArn=target_group['TargetGroupArn'], Targets=instances_to_remove, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't remove targets")
+
+ if module.params.get("wait"):
+ status_achieved, registered_instances = wait_for_status(
+ connection, module, target_group['TargetGroupArn'], instances_to_remove, 'unused')
+ if not status_achieved:
+ module.fail_json(
+ msg='Error waiting for target deregistration - please check the AWS console')
+
+ # register lambda target
+ else:
+ try:
+ changed = False
+ target = module.params.get("targets")[0]
+ if len(current_targets["TargetHealthDescriptions"]) == 0:
+ changed = True
+ else:
+ for item in current_targets["TargetHealthDescriptions"]:
+ if target["Id"] != item["Target"]["Id"]:
+ changed = True
+ break # only one target is possible with lambda
+
+ if changed:
+ if target.get("Id"):
+ response = connection.register_targets(
+ TargetGroupArn=target_group['TargetGroupArn'],
+ Targets=[
+ {
+ "Id": target['Id']
+ }
+ ],
+ aws_retry=True
+ )
+
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(
+ e, msg="Couldn't register targets")
+ else:
+ if target_type != "lambda":
+
+ current_instances = current_targets['TargetHealthDescriptions']
+
+ if current_instances:
+ instances_to_remove = []
+ for target in current_targets['TargetHealthDescriptions']:
+ instances_to_remove.append({'Id': target['Target']['Id'], 'Port': target['Target']['Port']})
+
+ changed = True
+ try:
+ connection.deregister_targets(TargetGroupArn=target_group['TargetGroupArn'], Targets=instances_to_remove, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't remove targets")
+
+ if module.params.get("wait"):
+ status_achieved, registered_instances = wait_for_status(
+ connection, module, target_group['TargetGroupArn'], instances_to_remove, 'unused')
+ if not status_achieved:
+ module.fail_json(
+ msg='Error waiting for target deregistration - please check the AWS console')
+
+ # remove lambda targets
+ else:
+ changed = False
+ if current_targets["TargetHealthDescriptions"]:
+ changed = True
+ # only one target is possible with lambda
+ target_to_remove = current_targets["TargetHealthDescriptions"][0]["Target"]["Id"]
+ if changed:
+ connection.deregister_targets(
+ TargetGroupArn=target_group['TargetGroupArn'], Targets=[{"Id": target_to_remove}], aws_retry=True)
+ else:
+ try:
+ connection.create_target_group(aws_retry=True, **params)
+ changed = True
+ new_target_group = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create target group")
+
+ target_group = get_target_group(connection, module, retry_missing=True)
+
+ if module.params.get("targets"):
+ if target_type != "lambda":
+ params['Targets'] = module.params.get("targets")
+ try:
+ connection.register_targets(TargetGroupArn=target_group['TargetGroupArn'], Targets=params['Targets'], aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't register targets")
+
+ if module.params.get("wait"):
+ status_achieved, registered_instances = wait_for_status(connection, module, target_group['TargetGroupArn'], params['Targets'], 'healthy')
+ if not status_achieved:
+ module.fail_json(msg='Error waiting for target registration to be healthy - please check the AWS console')
+
+ else:
+ try:
+ target = module.params.get("targets")[0]
+ response = connection.register_targets(
+ TargetGroupArn=target_group['TargetGroupArn'],
+ Targets=[
+ {
+ "Id": target["Id"]
+ }
+ ],
+ aws_retry=True
+ )
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(
+ e, msg="Couldn't register targets")
+
+ attributes_update = create_or_update_attributes(connection, module, target_group, new_target_group)
+
+ if attributes_update:
+ changed = True
+
+ # Tags - only need to play with tags if tags parameter has been set to something
+ if tags is not None:
+ # Get tags
+ current_tags = get_target_group_tags(connection, module, target_group['TargetGroupArn'])
+
+ # Delete necessary tags
+ tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(current_tags), tags, purge_tags)
+ if tags_to_delete:
+ try:
+ connection.remove_tags(ResourceArns=[target_group['TargetGroupArn']], TagKeys=tags_to_delete, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete tags from target group")
+ changed = True
+
+ # Add/update tags
+ if tags_need_modify:
+ try:
+ connection.add_tags(ResourceArns=[target_group['TargetGroupArn']], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify), aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't add tags to target group")
+ changed = True
+
+ # Get the target group again
+ target_group = get_target_group(connection, module)
+
+ # Get the target group attributes again
+ target_group.update(get_tg_attributes(connection, module, target_group['TargetGroupArn']))
+
+ # Convert target_group to snake_case
+ snaked_tg = camel_dict_to_snake_dict(target_group)
+
+ snaked_tg['tags'] = boto3_tag_list_to_ansible_dict(get_target_group_tags(connection, module, target_group['TargetGroupArn']))
+
+ module.exit_json(changed=changed, **snaked_tg)
+
+
+def delete_target_group(connection, module):
+ changed = False
+ tg = get_target_group(connection, module)
+
+ if tg:
+ try:
+ connection.delete_target_group(TargetGroupArn=tg['TargetGroupArn'], aws_retry=True)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete target group")
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ protocols_list = ['http', 'https', 'tcp', 'tls', 'udp', 'tcp_udp', 'HTTP',
+ 'HTTPS', 'TCP', 'TLS', 'UDP', 'TCP_UDP']
+ argument_spec = dict(
+ deregistration_delay_timeout=dict(type='int'),
+ deregistration_connection_termination=dict(type='bool', default=False),
+ health_check_protocol=dict(choices=protocols_list),
+ health_check_port=dict(),
+ health_check_path=dict(),
+ health_check_interval=dict(type='int'),
+ health_check_timeout=dict(type='int'),
+ healthy_threshold_count=dict(type='int'),
+ modify_targets=dict(default=True, type='bool'),
+ name=dict(required=True),
+ port=dict(type='int'),
+ protocol=dict(choices=protocols_list),
+ protocol_version=dict(type='str', choices=['GRPC', 'HTTP1', 'HTTP2']),
+ purge_tags=dict(default=True, type='bool'),
+ stickiness_enabled=dict(type='bool'),
+ stickiness_type=dict(),
+ stickiness_lb_cookie_duration=dict(type='int'),
+ stickiness_app_cookie_duration=dict(type='int'),
+ stickiness_app_cookie_name=dict(),
+ load_balancing_algorithm_type=dict(type='str', choices=['round_robin', 'least_outstanding_requests']),
+ state=dict(required=True, choices=['present', 'absent']),
+ successful_response_codes=dict(),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ target_type=dict(choices=['instance', 'ip', 'lambda', 'alb']),
+ targets=dict(type='list', elements='dict'),
+ unhealthy_threshold_count=dict(type='int'),
+ vpc_id=dict(),
+ preserve_client_ip_enabled=dict(type='bool'),
+ proxy_protocol_v2_enabled=dict(type='bool'),
+ wait_timeout=dict(type='int', default=200),
+ wait=dict(type='bool', default=False)
+ )
+ required_by = dict(
+ health_check_path=['health_check_protocol'],
+ successful_response_codes=['health_check_protocol'],
+ )
+ required_if = [
+ ['target_type', 'instance', ['protocol', 'port', 'vpc_id']],
+ ['target_type', 'ip', ['protocol', 'port', 'vpc_id']],
+ ['target_type', 'alb', ['protocol', 'port', 'vpc_id']],
+ ]
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, required_by=required_by, required_if=required_if)
+
+ if module.params.get('target_type') is None:
+ module.params['target_type'] = 'instance'
+
+ connection = module.client('elbv2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+
+ if module.params.get('state') == 'present':
+ if module.params.get('protocol') in ['http', 'https', 'HTTP', 'HTTPS'] and module.params.get('deregistration_connection_termination', None):
+ module.fail_json(msg="A target group with HTTP/S protocol does not support setting deregistration_connection_termination")
+
+ create_or_update_target_group(connection, module)
+ else:
+ delete_target_group(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/elb_target_group_info.py b/ansible_collections/community/aws/plugins/modules/elb_target_group_info.py
new file mode 100644
index 000000000..86cc03782
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/elb_target_group_info.py
@@ -0,0 +1,319 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: elb_target_group_info
+version_added: 1.0.0
+short_description: Gather information about ELB target groups in AWS
+description:
+ - Gather information about ELB target groups in AWS
+author: Rob White (@wimnat)
+options:
+ load_balancer_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the load balancer.
+ required: false
+ type: str
+ target_group_arns:
+ description:
+ - The Amazon Resource Names (ARN) of the target groups.
+ required: false
+ type: list
+ elements: str
+ names:
+ description:
+ - The names of the target groups.
+ required: false
+ type: list
+ elements: str
+ collect_targets_health:
+ description:
+ - When set to C(True), output contains targets health description
+ required: false
+ default: false
+ type: bool
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all target groups
+ community.aws.elb_target_group_info:
+
+- name: Gather information about the target group attached to a particular ELB
+ community.aws.elb_target_group_info:
+ load_balancer_arn: "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:loadbalancer/app/my-elb/aabbccddeeff"
+
+- name: Gather information about a target groups named 'tg1' and 'tg2'
+ community.aws.elb_target_group_info:
+ names:
+ - tg1
+ - tg2
+
+'''
+
+RETURN = r'''
+target_groups:
+ description: a list of target groups
+ returned: always
+ type: complex
+ contains:
+ deregistration_delay_timeout_seconds:
+ description: The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused.
+ returned: always
+ type: int
+ sample: 300
+ health_check_interval_seconds:
+ description: The approximate amount of time, in seconds, between health checks of an individual target.
+ returned: always
+ type: int
+ sample: 30
+ health_check_path:
+ description: The destination for the health check request.
+ returned: always
+ type: str
+ sample: /index.html
+ health_check_port:
+ description: The port to use to connect with the target.
+ returned: always
+ type: str
+ sample: traffic-port
+ health_check_protocol:
+ description: The protocol to use to connect with the target.
+ returned: always
+ type: str
+ sample: HTTP
+ health_check_timeout_seconds:
+ description: The amount of time, in seconds, during which no response means a failed health check.
+ returned: always
+ type: int
+ sample: 5
+ healthy_threshold_count:
+ description: The number of consecutive health checks successes required before considering an unhealthy target healthy.
+ returned: always
+ type: int
+ sample: 5
+ load_balancer_arns:
+ description: The Amazon Resource Names (ARN) of the load balancers that route traffic to this target group.
+ returned: always
+ type: list
+ sample: []
+ matcher:
+ description: The HTTP codes to use when checking for a successful response from a target.
+ returned: always
+ type: dict
+ sample: {
+ "http_code": "200"
+ }
+ port:
+ description: The port on which the targets are listening.
+ returned: always
+ type: int
+ sample: 80
+ protocol:
+ description: The protocol to use for routing traffic to the targets.
+ returned: always
+ type: str
+ sample: HTTP
+ stickiness_enabled:
+ description: Indicates whether sticky sessions are enabled.
+ returned: always
+ type: bool
+ sample: true
+ stickiness_lb_cookie_duration_seconds:
+ description: Indicates whether sticky sessions are enabled.
+ returned: always
+ type: int
+ sample: 86400
+ stickiness_type:
+ description: The type of sticky sessions.
+ returned: always
+ type: str
+ sample: lb_cookie
+ tags:
+ description: The tags attached to the target group.
+ returned: always
+ type: dict
+ sample: "{
+ 'Tag': 'Example'
+ }"
+ target_group_arn:
+ description: The Amazon Resource Name (ARN) of the target group.
+ returned: always
+ type: str
+ sample: "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/mytargetgroup/aabbccddee0044332211"
+ targets_health_description:
+ description: Targets health description.
+ returned: when collect_targets_health is enabled
+ type: complex
+ contains:
+ health_check_port:
+ description: The port to check target health.
+ returned: always
+ type: str
+ sample: '80'
+ target:
+ description: The target metadata.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description: The ID of the target.
+ returned: always
+ type: str
+ sample: i-0123456789
+ port:
+ description: The port to use to connect with the target.
+ returned: always
+ type: int
+ sample: 80
+ target_health:
+ description: The target health status.
+ returned: always
+ type: complex
+ contains:
+ state:
+ description: The state of the target health.
+ returned: always
+ type: str
+ sample: healthy
+ target_group_name:
+ description: The name of the target group.
+ returned: always
+ type: str
+ sample: mytargetgroup
+ unhealthy_threshold_count:
+ description: The number of consecutive health check failures required before considering the target unhealthy.
+ returned: always
+ type: int
+ sample: 2
+ vpc_id:
+ description: The ID of the VPC for the targets.
+ returned: always
+ type: str
+ sample: vpc-0123456
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict
+
+
+@AWSRetry.jittered_backoff(retries=10)
+def get_paginator(**kwargs):
+ paginator = client.get_paginator('describe_target_groups')
+ return paginator.paginate(**kwargs).build_full_result()
+
+
+def get_target_group_attributes(target_group_arn):
+
+ try:
+ target_group_attributes = boto3_tag_list_to_ansible_dict(client.describe_target_group_attributes(TargetGroupArn=target_group_arn)['Attributes'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe target group attributes")
+
+ # Replace '.' with '_' in attribute key names to make it more Ansibley
+ return dict((k.replace('.', '_'), v)
+ for (k, v) in target_group_attributes.items())
+
+
+def get_target_group_tags(target_group_arn):
+
+ try:
+ return boto3_tag_list_to_ansible_dict(client.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe group tags")
+
+
+def get_target_group_targets_health(target_group_arn):
+
+ try:
+ return client.describe_target_health(TargetGroupArn=target_group_arn)['TargetHealthDescriptions']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get target health")
+
+
+def list_target_groups():
+
+ load_balancer_arn = module.params.get("load_balancer_arn")
+ target_group_arns = module.params.get("target_group_arns")
+ names = module.params.get("names")
+ collect_targets_health = module.params.get("collect_targets_health")
+
+ try:
+ if not load_balancer_arn and not target_group_arns and not names:
+ target_groups = get_paginator()
+ if load_balancer_arn:
+ target_groups = get_paginator(LoadBalancerArn=load_balancer_arn)
+ if target_group_arns:
+ target_groups = get_paginator(TargetGroupArns=target_group_arns)
+ if names:
+ target_groups = get_paginator(Names=names)
+ except is_boto3_error_code('TargetGroupNotFound'):
+ module.exit_json(target_groups=[])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to list target groups")
+
+ # Get the attributes and tags for each target group
+ for target_group in target_groups['TargetGroups']:
+ target_group.update(get_target_group_attributes(target_group['TargetGroupArn']))
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_target_groups = [camel_dict_to_snake_dict(target_group) for target_group in target_groups['TargetGroups']]
+
+ # Get tags for each target group
+ for snaked_target_group in snaked_target_groups:
+ snaked_target_group['tags'] = get_target_group_tags(snaked_target_group['target_group_arn'])
+ if collect_targets_health:
+ snaked_target_group['targets_health_description'] = [camel_dict_to_snake_dict(
+ target) for target in get_target_group_targets_health(snaked_target_group['target_group_arn'])]
+
+ module.exit_json(target_groups=snaked_target_groups)
+
+
+def main():
+ global module
+ global client
+
+ argument_spec = dict(
+ load_balancer_arn=dict(type='str'),
+ target_group_arns=dict(type='list', elements='str'),
+ names=dict(type='list', elements='str'),
+ collect_targets_health=dict(default=False, type='bool', required=False),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[['load_balancer_arn', 'target_group_arns', 'names']],
+ supports_check_mode=True,
+ )
+
+ try:
+ client = module.client('elbv2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ list_target_groups()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/elb_target_info.py b/ansible_collections/community/aws/plugins/modules/elb_target_info.py
new file mode 100644
index 000000000..4f91ac7f3
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/elb_target_info.py
@@ -0,0 +1,430 @@
+#!/usr/bin/python
+# Copyright: (c) 2018, Yaakov Kuperman <ykuperman@gmail.com>
+# GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+
+DOCUMENTATION = '''
+---
+module: elb_target_info
+version_added: 1.0.0
+short_description: Gathers which target groups a target is associated with.
+description:
+ - This module will search through every target group in a region to find
+ which ones have registered a given instance ID or IP.
+
+author: "Yaakov Kuperman (@yaakov-github)"
+options:
+ instance_id:
+ description:
+ - What instance ID to get information for.
+ type: str
+ required: true
+ get_unused_target_groups:
+ description:
+ - Whether or not to get target groups not used by any load balancers.
+ type: bool
+ default: true
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = """
+# practical use case - dynamically de-registering and re-registering nodes
+
+ - name: Get EC2 Metadata
+ amazon.aws.ec2_metadata_facts:
+
+ - name: Get initial list of target groups
+ delegate_to: localhost
+ community.aws.elb_target_info:
+ instance_id: "{{ ansible_ec2_instance_id }}"
+ region: "{{ ansible_ec2_placement_region }}"
+ register: target_info
+
+ - name: save fact for later
+ ansible.builtin.set_fact:
+ original_tgs: "{{ target_info.instance_target_groups }}"
+
+ - name: Deregister instance from all target groups
+ delegate_to: localhost
+ community.aws.elb_target:
+ target_group_arn: "{{ item.0.target_group_arn }}"
+ target_port: "{{ item.1.target_port }}"
+ target_az: "{{ item.1.target_az }}"
+ target_id: "{{ item.1.target_id }}"
+ state: absent
+ target_status: "draining"
+ region: "{{ ansible_ec2_placement_region }}"
+ with_subelements:
+ - "{{ original_tgs }}"
+ - "targets"
+
+ # This avoids having to wait for 'elb_target' to serially deregister each
+ # target group. An alternative would be to run all of the 'elb_target'
+ # tasks async and wait for them to finish.
+
+ - name: wait for all targets to deregister simultaneously
+ delegate_to: localhost
+ community.aws.elb_target_info:
+ get_unused_target_groups: false
+ instance_id: "{{ ansible_ec2_instance_id }}"
+ region: "{{ ansible_ec2_placement_region }}"
+ register: target_info
+ until: (target_info.instance_target_groups | length) == 0
+ retries: 60
+ delay: 10
+
+ - name: reregister in elbv2s
+ community.aws.elb_target:
+ region: "{{ ansible_ec2_placement_region }}"
+ target_group_arn: "{{ item.0.target_group_arn }}"
+ target_port: "{{ item.1.target_port }}"
+ target_az: "{{ item.1.target_az }}"
+ target_id: "{{ item.1.target_id }}"
+ state: present
+ target_status: "initial"
+ with_subelements:
+ - "{{ original_tgs }}"
+ - "targets"
+
+ # wait until all groups associated with this instance are 'healthy' or
+ # 'unused'
+ - name: wait for registration
+ community.aws.elb_target_info:
+ get_unused_target_groups: false
+ instance_id: "{{ ansible_ec2_instance_id }}"
+ region: "{{ ansible_ec2_placement_region }}"
+ register: target_info
+ until: (target_info.instance_target_groups |
+ map(attribute='targets') |
+ flatten |
+ map(attribute='target_health') |
+ rejectattr('state', 'equalto', 'healthy') |
+ rejectattr('state', 'equalto', 'unused') |
+ list |
+ length) == 0
+ retries: 61
+ delay: 10
+
+# using the target groups to generate AWS CLI commands to reregister the
+# instance - useful in case the playbook fails mid-run and manual
+# rollback is required
+ - name: "reregistration commands: ELBv2s"
+ ansible.builtin.debug:
+ msg: >
+ aws --region {{ansible_ec2_placement_region}} elbv2
+ register-targets --target-group-arn {{item.target_group_arn}}
+ --targets{%for target in item.targets%}
+ Id={{target.target_id}},
+ Port={{target.target_port}}{%if target.target_az%},AvailabilityZone={{target.target_az}}
+ {%endif%}
+ {%endfor%}
+ loop: "{{target_info.instance_target_groups}}"
+
+"""
+
+RETURN = """
+instance_target_groups:
+ description: a list of target groups to which the instance is registered to
+ returned: always
+ type: complex
+ contains:
+ target_group_arn:
+ description: The ARN of the target group
+ type: str
+ returned: always
+ sample:
+ - "arn:aws:elasticloadbalancing:eu-west-1:123456789012:targetgroup/target-group/deadbeefdeadbeef"
+ target_group_type:
+ description: Which target type is used for this group
+ returned: always
+ type: str
+ sample:
+ - ip
+ - instance
+ targets:
+ description: A list of targets that point to this instance ID
+ returned: always
+ type: complex
+ contains:
+ target_id:
+ description: the target ID referring to this instance
+ type: str
+ returned: always
+ sample:
+ - i-deadbeef
+ - 1.2.3.4
+ target_port:
+ description: which port this target is listening on
+ type: str
+ returned: always
+ sample:
+ - 80
+ target_az:
+ description: which availability zone is explicitly
+ associated with this target
+ type: str
+ returned: when an AZ is associated with this instance
+ sample:
+ - us-west-2a
+ target_health:
+ description:
+ - The target health description.
+ - See following link for all the possible values
+ U(https://boto3.readthedocs.io/en/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_target_health)
+ returned: always
+ type: complex
+ contains:
+ description:
+ description: description of target health
+ returned: if I(state!=present)
+ sample:
+ - "Target desregistration is in progress"
+ type: str
+ reason:
+ description: reason code for target health
+ returned: if I(state!=healthy)
+ sample:
+ - "Target.Deregistration in progress"
+ type: str
+ state:
+ description: health state
+ returned: always
+ sample:
+ - "healthy"
+ - "draining"
+ - "initial"
+ - "unhealthy"
+ - "unused"
+ - "unavailable"
+ type: str
+"""
+
+__metaclass__ = type
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ # we can handle the lack of boto3 based on the ec2 module
+ pass
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
+
+
+class Target(object):
+ """Models a target in a target group"""
+ def __init__(self, target_id, port, az, raw_target_health):
+ self.target_port = port
+ self.target_id = target_id
+ self.target_az = az
+ self.target_health = self.convert_target_health(raw_target_health)
+
+ def convert_target_health(self, raw_target_health):
+ return camel_dict_to_snake_dict(raw_target_health)
+
+
+class TargetGroup(object):
+ """Models an elbv2 target group"""
+
+ def __init__(self, **kwargs):
+ self.target_group_type = kwargs["target_group_type"]
+ self.target_group_arn = kwargs["target_group_arn"]
+ # the relevant targets associated with this group
+ self.targets = []
+
+ def add_target(self, target_id, target_port, target_az, raw_target_health):
+ self.targets.append(Target(target_id,
+ target_port,
+ target_az,
+ raw_target_health))
+
+ def to_dict(self):
+ object_dict = vars(self)
+ object_dict["targets"] = [vars(each) for each in self.get_targets()]
+ return object_dict
+
+ def get_targets(self):
+ return list(self.targets)
+
+
+class TargetInfoGatherer(object):
+
+ def __init__(self, module, instance_id, get_unused_target_groups):
+ self.module = module
+ try:
+ self.ec2 = self.module.client(
+ "ec2",
+ retry_decorator=AWSRetry.jittered_backoff(retries=10)
+ )
+ except (ClientError, BotoCoreError) as e:
+ self.module.fail_json_aws(e,
+ msg="Couldn't connect to ec2"
+ )
+
+ try:
+ self.elbv2 = self.module.client(
+ "elbv2",
+ retry_decorator=AWSRetry.jittered_backoff(retries=10)
+ )
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e,
+ msg="Could not connect to elbv2"
+ )
+
+ self.instance_id = instance_id
+ self.get_unused_target_groups = get_unused_target_groups
+ self.tgs = self._get_target_groups()
+
+ def _get_instance_ips(self):
+ """Fetch all IPs associated with this instance so that we can determine
+ whether or not an instance is in an IP-based target group"""
+ try:
+ # get ahold of the instance in the API
+ reservations = self.ec2.describe_instances(
+ InstanceIds=[self.instance_id],
+ aws_retry=True
+ )["Reservations"]
+ except (BotoCoreError, ClientError) as e:
+ # typically this will happen if the instance doesn't exist
+ self.module.fail_json_aws(e,
+ msg="Could not get instance info" +
+ " for instance '%s'" %
+ (self.instance_id)
+ )
+
+ if len(reservations) < 1:
+ self.module.fail_json(
+ msg="Instance ID %s could not be found" % self.instance_id
+ )
+
+ instance = reservations[0]["Instances"][0]
+
+ # IPs are represented in a few places in the API, this should
+ # account for all of them
+ ips = set()
+ ips.add(instance["PrivateIpAddress"])
+ for nic in instance["NetworkInterfaces"]:
+ ips.add(nic["PrivateIpAddress"])
+ for ip in nic["PrivateIpAddresses"]:
+ ips.add(ip["PrivateIpAddress"])
+
+ return list(ips)
+
+ def _get_target_group_objects(self):
+ """helper function to build a list of TargetGroup objects based on
+ the AWS API"""
+ try:
+ paginator = self.elbv2.get_paginator(
+ "describe_target_groups"
+ )
+ tg_response = paginator.paginate().build_full_result()
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e,
+ msg="Could not describe target" +
+ " groups"
+ )
+
+ # build list of TargetGroup objects representing every target group in
+ # the system
+ target_groups = []
+ for each_tg in tg_response["TargetGroups"]:
+ if not self.get_unused_target_groups and \
+ len(each_tg["LoadBalancerArns"]) < 1:
+ # only collect target groups that actually are connected
+ # to LBs
+ continue
+
+ target_groups.append(
+ TargetGroup(target_group_arn=each_tg["TargetGroupArn"],
+ target_group_type=each_tg["TargetType"],
+ )
+ )
+ return target_groups
+
+ def _get_target_descriptions(self, target_groups):
+ """Helper function to build a list of all the target descriptions
+ for this target in a target group"""
+ # Build a list of all the target groups pointing to this instance
+ # based on the previous list
+ tgs = set()
+ # Loop through all the target groups
+ for tg in target_groups:
+ try:
+ # Get the list of targets for that target group
+ response = self.elbv2.describe_target_health(
+ TargetGroupArn=tg.target_group_arn,
+ aws_retry=True
+ )
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e,
+ msg="Could not describe target " +
+ "health for target group %s" %
+ tg.target_group_arn
+ )
+
+ for t in response["TargetHealthDescriptions"]:
+ # If the target group has this instance as a target, add to
+ # list. This logic also accounts for the possibility of a
+ # target being in the target group multiple times with
+ # overridden ports
+ if t["Target"]["Id"] == self.instance_id or \
+ t["Target"]["Id"] in self.instance_ips:
+
+ # The 'AvailabilityZone' parameter is a weird one, see the
+ # API docs for more. Basically it's only supposed to be
+ # there under very specific circumstances, so we need
+ # to account for that
+ az = t["Target"]["AvailabilityZone"] \
+ if "AvailabilityZone" in t["Target"] \
+ else None
+
+ tg.add_target(t["Target"]["Id"],
+ t["Target"]["Port"],
+ az,
+ t["TargetHealth"])
+ # since tgs is a set, each target group will be added only
+ # once, even though we call add on each successful match
+ tgs.add(tg)
+ return list(tgs)
+
+ def _get_target_groups(self):
+ # do this first since we need the IPs later on in this function
+ self.instance_ips = self._get_instance_ips()
+
+ # build list of target groups
+ target_groups = self._get_target_group_objects()
+ return self._get_target_descriptions(target_groups)
+
+
+def main():
+ argument_spec = dict(
+ instance_id={"required": True, "type": "str"},
+ get_unused_target_groups={"required": False,
+ "default": True, "type": "bool"}
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ instance_id = module.params["instance_id"]
+ get_unused_target_groups = module.params["get_unused_target_groups"]
+
+ tg_gatherer = TargetInfoGatherer(module,
+ instance_id,
+ get_unused_target_groups
+ )
+
+ instance_target_groups = [each.to_dict() for each in tg_gatherer.tgs]
+
+ module.exit_json(instance_target_groups=instance_target_groups)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/glue_connection.py b/ansible_collections/community/aws/plugins/modules/glue_connection.py
new file mode 100644
index 000000000..bcfacb171
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/glue_connection.py
@@ -0,0 +1,393 @@
+#!/usr/bin/python
+# Copyright: (c) 2018, Rob White (@wimnat)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: glue_connection
+version_added: 1.0.0
+short_description: Manage an AWS Glue connection
+description:
+ - Manage an AWS Glue connection. See U(https://aws.amazon.com/glue/) for details.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_glue_connection).
+ The usage did not change.
+author:
+ - "Rob White (@wimnat)"
+options:
+ availability_zone:
+ description:
+ - Availability Zone used by the connection
+ - Required when I(connection_type=NETWORK).
+ type: str
+ version_added: 1.5.0
+ catalog_id:
+ description:
+ - The ID of the Data Catalog in which to create the connection. If none is supplied,
+ the AWS account ID is used by default.
+ type: str
+ connection_properties:
+ description:
+ - A dict of key-value pairs used as parameters for this connection.
+ - Required when I(state=present).
+ type: dict
+ connection_type:
+ description:
+ - The type of the connection. Currently, SFTP is not supported.
+ default: JDBC
+ choices: [ 'CUSTOM', 'JDBC', 'KAFKA', 'MARKETPLACE', 'MONGODB', 'NETWORK' ]
+ type: str
+ description:
+ description:
+ - The description of the connection.
+ type: str
+ match_criteria:
+ description:
+ - A list of UTF-8 strings that specify the criteria that you can use in selecting this connection.
+ type: list
+ elements: str
+ name:
+ description:
+ - The name of the connection.
+ required: true
+ type: str
+ security_groups:
+ description:
+ - A list of security groups to be used by the connection. Use either security group name or ID.
+ - Required when I(connection_type=NETWORK).
+ type: list
+ elements: str
+ state:
+ description:
+ - Create or delete the AWS Glue connection.
+ required: true
+ choices: [ 'present', 'absent' ]
+ type: str
+ subnet_id:
+ description:
+ - The subnet ID used by the connection.
+ - Required when I(connection_type=NETWORK).
+ type: str
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create an AWS Glue connection
+- community.aws.glue_connection:
+ name: my-glue-connection
+ connection_properties:
+ JDBC_CONNECTION_URL: jdbc:mysql://mydb:3306/databasename
+ USERNAME: my-username
+ PASSWORD: my-password
+ state: present
+
+# Create an AWS Glue network connection
+- community.aws.glue_connection:
+ name: my-glue-network-connection
+ availability_zone: us-east-1a
+ connection_properties:
+ JDBC_ENFORCE_SSL: "false"
+ connection_type: NETWORK
+ description: Test connection
+ security_groups:
+ - sg-glue
+ subnet_id: subnet-123abc
+ state: present
+
+# Delete an AWS Glue connection
+- community.aws.glue_connection:
+ name: my-glue-connection
+ state: absent
+'''
+
+RETURN = r'''
+connection_properties:
+ description:
+ - (deprecated) A dict of key-value pairs (converted to lowercase) used as parameters for this connection.
+ - This return key has been deprecated, and will be removed in a release after 2024-06-01.
+ returned: when state is present
+ type: dict
+ sample: {'jdbc_connection_url':'jdbc:mysql://mydb:3306/databasename','username':'x','password':'y'}
+connection_type:
+ description: The type of the connection.
+ returned: when state is present
+ type: str
+ sample: JDBC
+creation_time:
+ description: The time this connection definition was created.
+ returned: when state is present
+ type: str
+ sample: "2018-04-21T05:19:58.326000+00:00"
+description:
+ description: Description of the job being defined.
+ returned: when state is present
+ type: str
+ sample: My first Glue job
+last_updated_time:
+ description: The last time this connection definition was updated.
+ returned: when state is present
+ type: str
+ sample: "2018-04-21T05:19:58.326000+00:00"
+match_criteria:
+ description: A list of criteria that can be used in selecting this connection.
+ returned: when state is present
+ type: list
+ sample: []
+name:
+ description: The name of the connection definition.
+ returned: when state is present
+ type: str
+ sample: my-glue-connection
+physical_connection_requirements:
+ description: A dict of physical connection requirements, such as VPC and SecurityGroup,
+ needed for making this connection successfully.
+ returned: when state is present
+ type: dict
+ sample: {'subnet-id':'subnet-aabbccddee'}
+raw_connection_properties:
+ description: A dict of key-value pairs used as parameters for this connection.
+ returned: when state is present
+ type: dict
+ sample: {'JDBC_CONNECTION_URL':'jdbc:mysql://mydb:3306/databasename','USERNAME':'x','PASSWORD':'y'}
+'''
+
+# Non-ansible imports
+import copy
+import time
+try:
+ import botocore
+except ImportError:
+ pass
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names
+
+
+def _get_glue_connection(connection, module):
+ """
+ Get an AWS Glue connection based on name. If not found, return None.
+
+ :param connection: AWS boto3 glue connection
+ :param module: Ansible module
+ :return: boto3 Glue connection dict or None if not found
+ """
+
+ connection_name = module.params.get("name")
+ connection_catalog_id = module.params.get("catalog_id")
+
+ params = {'Name': connection_name}
+ if connection_catalog_id is not None:
+ params['CatalogId'] = connection_catalog_id
+
+ try:
+ return connection.get_connection(aws_retry=True, **params)['Connection']
+ except is_boto3_error_code('EntityNotFoundException'):
+ return None
+
+
+def _compare_glue_connection_params(user_params, current_params):
+ """
+ Compare Glue connection params. If there is a difference, return True immediately else return False
+
+ :param user_params: the Glue connection parameters passed by the user
+ :param current_params: the Glue connection parameters currently configured
+ :return: True if any parameter is mismatched else False
+ """
+
+ # Weirdly, boto3 doesn't return some keys if the value is empty e.g. Description
+ # To counter this, add the key if it's missing with a blank value
+
+ if 'Description' not in current_params:
+ current_params['Description'] = ""
+ if 'MatchCriteria' not in current_params:
+ current_params['MatchCriteria'] = list()
+ if 'PhysicalConnectionRequirements' not in current_params:
+ current_params['PhysicalConnectionRequirements'] = dict()
+ current_params['PhysicalConnectionRequirements']['SecurityGroupIdList'] = []
+ current_params['PhysicalConnectionRequirements']['SubnetId'] = ""
+
+ if 'ConnectionProperties' in user_params['ConnectionInput'] and user_params['ConnectionInput']['ConnectionProperties'] \
+ != current_params['ConnectionProperties']:
+ return True
+ if 'ConnectionType' in user_params['ConnectionInput'] and user_params['ConnectionInput']['ConnectionType'] \
+ != current_params['ConnectionType']:
+ return True
+ if 'Description' in user_params['ConnectionInput'] and user_params['ConnectionInput']['Description'] != current_params['Description']:
+ return True
+ if 'MatchCriteria' in user_params['ConnectionInput'] and set(user_params['ConnectionInput']['MatchCriteria']) != set(current_params['MatchCriteria']):
+ return True
+ if 'PhysicalConnectionRequirements' in user_params['ConnectionInput']:
+ if 'SecurityGroupIdList' in user_params['ConnectionInput']['PhysicalConnectionRequirements'] and \
+ set(user_params['ConnectionInput']['PhysicalConnectionRequirements']['SecurityGroupIdList']) \
+ != set(current_params['PhysicalConnectionRequirements']['SecurityGroupIdList']):
+ return True
+ if 'SubnetId' in user_params['ConnectionInput']['PhysicalConnectionRequirements'] and \
+ user_params['ConnectionInput']['PhysicalConnectionRequirements']['SubnetId'] \
+ != current_params['PhysicalConnectionRequirements']['SubnetId']:
+ return True
+ if 'AvailabilityZone' in user_params['ConnectionInput']['PhysicalConnectionRequirements'] and \
+ user_params['ConnectionInput']['PhysicalConnectionRequirements']['AvailabilityZone'] \
+ != current_params['PhysicalConnectionRequirements']['AvailabilityZone']:
+ return True
+
+ return False
+
+
+# Glue module doesn't appear to have any waiters, unlike EC2 or RDS
+def _await_glue_connection(connection, module):
+ start_time = time.time()
+ wait_timeout = start_time + 30
+ check_interval = 5
+
+ while wait_timeout > time.time():
+ glue_connection = _get_glue_connection(connection, module)
+ if glue_connection and glue_connection.get('Name'):
+ return glue_connection
+ time.sleep(check_interval)
+
+ module.fail_json(msg='Timeout waiting for Glue connection %s' % module.params.get('name'))
+
+
+def create_or_update_glue_connection(connection, connection_ec2, module, glue_connection):
+ """
+ Create or update an AWS Glue connection
+
+ :param connection: AWS boto3 glue connection
+ :param module: Ansible module
+ :param glue_connection: a dict of AWS Glue connection parameters or None
+ :return:
+ """
+ changed = False
+
+ params = dict()
+ params['ConnectionInput'] = dict()
+ params['ConnectionInput']['Name'] = module.params.get("name")
+ params['ConnectionInput']['ConnectionType'] = module.params.get("connection_type")
+ params['ConnectionInput']['ConnectionProperties'] = module.params.get("connection_properties")
+ if module.params.get("catalog_id") is not None:
+ params['CatalogId'] = module.params.get("catalog_id")
+ if module.params.get("description") is not None:
+ params['ConnectionInput']['Description'] = module.params.get("description")
+ if module.params.get("match_criteria") is not None:
+ params['ConnectionInput']['MatchCriteria'] = module.params.get("match_criteria")
+ if module.params.get("security_groups") is not None or module.params.get("subnet_id") is not None:
+ params['ConnectionInput']['PhysicalConnectionRequirements'] = dict()
+ if module.params.get("security_groups") is not None:
+ # Get security group IDs from names
+ security_group_ids = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), connection_ec2, boto3=True)
+ params['ConnectionInput']['PhysicalConnectionRequirements']['SecurityGroupIdList'] = security_group_ids
+ if module.params.get("subnet_id") is not None:
+ params['ConnectionInput']['PhysicalConnectionRequirements']['SubnetId'] = module.params.get("subnet_id")
+ if module.params.get("availability_zone") is not None:
+ params['ConnectionInput']['PhysicalConnectionRequirements']['AvailabilityZone'] = module.params.get("availability_zone")
+
+ # If glue_connection is not None then check if it needs to be modified, else create it
+ if glue_connection:
+ if _compare_glue_connection_params(params, glue_connection):
+ try:
+ # We need to slightly modify the params for an update
+ update_params = copy.deepcopy(params)
+ update_params['Name'] = update_params['ConnectionInput']['Name']
+ if not module.check_mode:
+ connection.update_connection(aws_retry=True, **update_params)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e)
+ else:
+ try:
+ if not module.check_mode:
+ connection.create_connection(aws_retry=True, **params)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e)
+
+ # If changed, get the Glue connection again
+ if changed and not module.check_mode:
+ glue_connection = _await_glue_connection(connection, module)
+
+ if glue_connection:
+ module.deprecate("The 'connection_properties' return key is deprecated and will be replaced"
+ " by 'raw_connection_properties'. Both values are returned for now.",
+ date='2024-06-01', collection_name='community.aws')
+ glue_connection['RawConnectionProperties'] = glue_connection['ConnectionProperties']
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_connection or {}, ignore_list=['RawConnectionProperties']))
+
+
+def delete_glue_connection(connection, module, glue_connection):
+ """
+ Delete an AWS Glue connection
+
+ :param connection: AWS boto3 glue connection
+ :param module: Ansible module
+ :param glue_connection: a dict of AWS Glue connection parameters or None
+ :return:
+ """
+ changed = False
+
+ params = {'ConnectionName': module.params.get("name")}
+ if module.params.get("catalog_id") is not None:
+ params['CatalogId'] = module.params.get("catalog_id")
+
+ if glue_connection:
+ try:
+ if not module.check_mode:
+ connection.delete_connection(aws_retry=True, **params)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+
+ argument_spec = (
+ dict(
+ availability_zone=dict(type='str'),
+ catalog_id=dict(type='str'),
+ connection_properties=dict(type='dict'),
+ connection_type=dict(type='str', default='JDBC', choices=['CUSTOM', 'JDBC', 'KAFKA', 'MARKETPLACE', 'MONGODB', 'NETWORK']),
+ description=dict(type='str'),
+ match_criteria=dict(type='list', elements='str'),
+ name=dict(required=True, type='str'),
+ security_groups=dict(type='list', elements='str'),
+ state=dict(required=True, choices=['present', 'absent'], type='str'),
+ subnet_id=dict(type='str')
+ )
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present', ['connection_properties']),
+ ('connection_type', 'NETWORK', ['availability_zone', 'security_groups', 'subnet_id'])
+ ],
+ supports_check_mode=True
+ )
+
+ retry_decorator = AWSRetry.jittered_backoff(retries=10)
+ connection_glue = module.client('glue', retry_decorator=retry_decorator)
+ connection_ec2 = module.client('ec2', retry_decorator=retry_decorator)
+
+ glue_connection = _get_glue_connection(connection_glue, module)
+
+ if module.params.get("state") == 'present':
+ create_or_update_glue_connection(connection_glue, connection_ec2, module, glue_connection)
+ else:
+ delete_glue_connection(connection_glue, module, glue_connection)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/glue_crawler.py b/ansible_collections/community/aws/plugins/modules/glue_crawler.py
new file mode 100644
index 000000000..a47b8eb3f
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/glue_crawler.py
@@ -0,0 +1,426 @@
+#!/usr/bin/python
+# Copyright: (c) 2018, Rob White (@wimnat)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: glue_crawler
+version_added: 4.1.0
+short_description: Manage an AWS Glue crawler
+description:
+ - Manage an AWS Glue crawler. See U(https://aws.amazon.com/glue/) for details.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_glue_crawler).
+ The usage did not change.
+author:
+ - 'Ivan Chekaldin (@ichekaldin)'
+options:
+ database_name:
+ description:
+ - The name of the database where results are written.
+ type: str
+ description:
+ description:
+ - Description of the crawler being defined.
+ type: str
+ name:
+ description:
+ - The name you assign to this crawler definition. It must be unique in your account.
+ required: true
+ type: str
+ recrawl_policy:
+ description:
+ - A policy that specifies whether to crawl the entire dataset again, or to crawl only folders that were added since the last crawler run.
+ suboptions:
+ recrawl_behavior:
+ description:
+ - Specifies whether to crawl the entire dataset again or to crawl only folders that were added since the last crawler run.
+ - Supported options are C(CRAWL_EVERYTHING) and C(CRAWL_NEW_FOLDERS_ONLY).
+ type: str
+ type: dict
+ role:
+ description:
+ - The name or ARN of the IAM role associated with this crawler.
+ - Required when I(state=present).
+ type: str
+ schema_change_policy:
+ description:
+ - The policy for the crawler's update and deletion behavior.
+ suboptions:
+ delete_behavior:
+ description:
+ - Defines the deletion behavior when the crawler finds a deleted object.
+ - Supported options are C(LOG), C(DELETE_FROM_DATABASE), and C(DEPRECATE_IN_DATABASE).
+ type: str
+ update_behavior:
+ description:
+ - Defines the update behavior when the crawler finds a changed schema..
+ - Supported options are C(LOG) and C(UPDATE_IN_DATABASE).
+ type: str
+ type: dict
+ state:
+ description:
+ - Create or delete the AWS Glue crawler.
+ required: true
+ choices: [ 'present', 'absent' ]
+ type: str
+ table_prefix:
+ description:
+ - The table prefix used for catalog tables that are created.
+ type: str
+ targets:
+ description:
+ - A list of targets to crawl. See example below.
+ - Required when I(state=present).
+ type: dict
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+ - amazon.aws.tags
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create an AWS Glue crawler
+- community.aws.glue_crawler:
+ name: my-glue-crawler
+ database_name: my_database
+ role: my-iam-role
+ schema_change_policy:
+ delete_behavior: DELETE_FROM_DATABASE
+ update_behavior: UPDATE_IN_DATABASE
+ recrawl_policy:
+ recrawl_ehavior: CRAWL_EVERYTHING
+ targets:
+ S3Targets:
+ - Path: "s3://my-bucket/prefix/folder/"
+ ConnectionName: my-connection
+ Exclusions:
+ - "**.json"
+ - "**.yml"
+ state: present
+
+# Delete an AWS Glue crawler
+- community.aws.glue_crawler:
+ name: my-glue-crawler
+ state: absent
+'''
+
+RETURN = r'''
+creation_time:
+ description: The time and date that this crawler definition was created.
+ returned: when state is present
+ type: str
+ sample: '2021-04-01T05:19:58.326000+00:00'
+database_name:
+ description: The name of the database where results are written.
+ returned: when state is present
+ type: str
+ sample: my_table
+description:
+ description: Description of the crawler.
+ returned: when state is present
+ type: str
+ sample: My crawler
+last_updated:
+ description: The time and date that this crawler definition was last updated.
+ returned: when state is present
+ type: str
+ sample: '2021-04-01T05:19:58.326000+00:00'
+name:
+ description: The name of the AWS Glue crawler.
+ returned: always
+ type: str
+ sample: my-glue-crawler
+recrawl_policy:
+ description: A policy that specifies whether to crawl the entire dataset again, or to crawl only folders that were added since the last crawler run.
+ returned: when state is present
+ type: complex
+ contains:
+ RecrawlBehavior:
+ description: Whether to crawl the entire dataset again or to crawl only folders that were added since the last crawler run.
+ returned: when state is present
+ type: str
+ sample: CRAWL_EVERYTHING
+role:
+ description: The name or ARN of the IAM role associated with this crawler.
+ returned: when state is present
+ type: str
+ sample: my-iam-role
+schema_change_policy:
+ description: The policy for the crawler's update and deletion behavior.
+ returned: when state is present
+ type: complex
+ contains:
+ DeleteBehavior:
+ description: The deletion behavior when the crawler finds a deleted object.
+ returned: when state is present
+ type: str
+ sample: DELETE_FROM_DATABASE
+ UpdateBehavior:
+ description: The update behavior when the crawler finds a changed schema.
+ returned: when state is present
+ type: str
+ sample: UPDATE_IN_DATABASE
+
+table_prefix:
+ description: The table prefix used for catalog tables that are created.
+ returned: when state is present
+ type: str
+ sample: my_prefix
+targets:
+ description: A list of targets to crawl.
+ returned: when state is present
+ type: complex
+ contains:
+ S3Targets:
+ description: List of S3 targets.
+ returned: when state is present
+ type: list
+ JdbcTargets:
+ description: List of JDBC targets.
+ returned: when state is present
+ type: list
+ MongoDBTargets:
+ description: List of Mongo DB targets.
+ returned: when state is present
+ type: list
+ DynamoDBTargets:
+ description: List of DynamoDB targets.
+ returned: when state is present
+ type: list
+ CatalogTargets:
+ description: List of catalog targets.
+ returned: when state is present
+ type: list
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info
+
+
+def _get_glue_crawler(connection, module, glue_crawler_name):
+ '''
+ Get an AWS Glue crawler based on name. If not found, return None.
+ '''
+ try:
+ return connection.get_crawler(aws_retry=True, Name=glue_crawler_name)['Crawler']
+ except is_boto3_error_code('EntityNotFoundException'):
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+
+
+def _trim_targets(targets):
+ return [_trim_target(t) for t in targets]
+
+
+def _trim_target(target):
+ """
+ Some target types have optional parameters which AWS will fill in and return
+ To compare the desired targets and the current targets we need to ignore the defaults
+ """
+ if not target:
+ return None
+ retval = target.copy()
+ if not retval.get('Exclusions', None):
+ retval.pop('Exclusions', None)
+ return retval
+
+
+def _compare_glue_crawler_params(user_params, current_params):
+ '''
+ Compare Glue crawler params. If there is a difference, return True immediately else return False
+ '''
+ if 'DatabaseName' in user_params and user_params['DatabaseName'] != current_params['DatabaseName']:
+ return True
+ if 'Description' in user_params and user_params['Description'] != current_params['Description']:
+ return True
+ if 'RecrawlPolicy' in user_params and user_params['RecrawlPolicy'] != current_params['RecrawlPolicy']:
+ return True
+ if 'Role' in user_params and user_params['Role'] != current_params['Role']:
+ return True
+ if 'SchemaChangePolicy' in user_params and user_params['SchemaChangePolicy'] != current_params['SchemaChangePolicy']:
+ return True
+ if 'TablePrefix' in user_params and user_params['TablePrefix'] != current_params['TablePrefix']:
+ return True
+ if 'Targets' in user_params:
+ if 'S3Targets' in user_params['Targets']:
+ if _trim_targets(user_params['Targets']['S3Targets']) != _trim_targets(current_params['Targets']['S3Targets']):
+ return True
+ if 'JdbcTargets' in user_params['Targets'] and user_params['Targets']['JdbcTargets'] != current_params['Targets']['JdbcTargets']:
+ if _trim_targets(user_params['Targets']['JdbcTargets']) != _trim_targets(current_params['Targets']['JdbcTargets']):
+ return True
+ if 'MongoDBTargets' in user_params['Targets'] and user_params['Targets']['MongoDBTargets'] != current_params['Targets']['MongoDBTargets']:
+ return True
+ if 'DynamoDBTargets' in user_params['Targets'] and user_params['Targets']['DynamoDBTargets'] != current_params['Targets']['DynamoDBTargets']:
+ return True
+ if 'CatalogTargets' in user_params['Targets'] and user_params['Targets']['CatalogTargets'] != current_params['Targets']['CatalogTargets']:
+ return True
+
+ return False
+
+
+def ensure_tags(connection, module, glue_crawler):
+ changed = False
+
+ if module.params.get('tags') is None:
+ return False
+
+ account_id, partition = get_aws_account_info(module)
+ arn = 'arn:{0}:glue:{1}:{2}:crawler/{3}'.format(partition, module.region, account_id, module.params.get('name'))
+
+ try:
+ existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get('Tags', {})
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ if module.check_mode:
+ existing_tags = {}
+ else:
+ module.fail_json_aws(e, msg='Unable to get tags for Glue crawler %s' % module.params.get('name'))
+
+ tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, module.params.get('tags'), module.params.get('purge_tags'))
+
+ if tags_to_remove:
+ changed = True
+ if not module.check_mode:
+ try:
+ connection.untag_resource(aws_retry=True, ResourceArn=arn, TagsToRemove=tags_to_remove)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Unable to set tags for Glue crawler %s' % module.params.get('name'))
+
+ if tags_to_add:
+ changed = True
+ if not module.check_mode:
+ try:
+ connection.tag_resource(aws_retry=True, ResourceArn=arn, TagsToAdd=tags_to_add)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Unable to set tags for Glue crawler %s' % module.params.get('name'))
+
+ return changed
+
+
+def create_or_update_glue_crawler(connection, module, glue_crawler):
+ '''
+ Create or update an AWS Glue crawler
+ '''
+
+ changed = False
+ params = dict()
+ params['Name'] = module.params.get('name')
+ params['Role'] = module.params.get('role')
+ params['Targets'] = module.params.get('targets')
+ if module.params.get('database_name') is not None:
+ params['DatabaseName'] = module.params.get('database_name')
+ if module.params.get('description') is not None:
+ params['Description'] = module.params.get('description')
+ if module.params.get('recrawl_policy') is not None:
+ params['RecrawlPolicy'] = snake_dict_to_camel_dict(module.params.get('recrawl_policy'), capitalize_first=True)
+ if module.params.get('role') is not None:
+ params['Role'] = module.params.get('role')
+ if module.params.get('schema_change_policy') is not None:
+ params['SchemaChangePolicy'] = snake_dict_to_camel_dict(module.params.get('schema_change_policy'), capitalize_first=True)
+ if module.params.get('table_prefix') is not None:
+ params['TablePrefix'] = module.params.get('table_prefix')
+ if module.params.get('targets') is not None:
+ params['Targets'] = module.params.get('targets')
+
+ if glue_crawler:
+ if _compare_glue_crawler_params(params, glue_crawler):
+ try:
+ if not module.check_mode:
+ connection.update_crawler(aws_retry=True, **params)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e)
+ else:
+ try:
+ if not module.check_mode:
+ connection.create_crawler(aws_retry=True, **params)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e)
+
+ glue_crawler = _get_glue_crawler(connection, module, params['Name'])
+
+ changed |= ensure_tags(connection, module, glue_crawler)
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_crawler or {}, ignore_list=['SchemaChangePolicy', 'RecrawlPolicy', 'Targets']))
+
+
+def delete_glue_crawler(connection, module, glue_crawler):
+ '''
+ Delete an AWS Glue crawler
+ '''
+ changed = False
+
+ if glue_crawler:
+ try:
+ if not module.check_mode:
+ connection.delete_crawler(aws_retry=True, Name=glue_crawler['Name'])
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+
+ argument_spec = (
+ dict(
+ database_name=dict(type='str'),
+ description=dict(type='str'),
+ name=dict(required=True, type='str'),
+ purge_tags=dict(type='bool', default=True),
+ recrawl_policy=dict(type='dict', options=dict(
+ recrawl_behavior=dict(type='str')
+ )),
+ role=dict(type='str'),
+ schema_change_policy=dict(type='dict', options=dict(
+ delete_behavior=dict(type='str'),
+ update_behavior=dict(type='str')
+ )),
+ state=dict(required=True, choices=['present', 'absent'], type='str'),
+ table_prefix=dict(type='str'),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ targets=dict(type='dict')
+ )
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present', ['role', 'targets'])
+ ],
+ supports_check_mode=True
+ )
+
+ connection = module.client('glue', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+
+ state = module.params.get('state')
+
+ glue_crawler = _get_glue_crawler(connection, module, module.params.get('name'))
+
+ if state == 'present':
+ create_or_update_glue_crawler(connection, module, glue_crawler)
+ else:
+ delete_glue_crawler(connection, module, glue_crawler)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/glue_job.py b/ansible_collections/community/aws/plugins/modules/glue_job.py
new file mode 100644
index 000000000..47d6156d7
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/glue_job.py
@@ -0,0 +1,484 @@
+#!/usr/bin/python
+# Copyright: (c) 2018, Rob White (@wimnat)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: glue_job
+version_added: 1.0.0
+short_description: Manage an AWS Glue job
+description:
+ - Manage an AWS Glue job. See U(https://aws.amazon.com/glue/) for details.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_glue_job).
+ The usage did not change.
+author:
+ - "Rob White (@wimnat)"
+ - "Vijayanand Sharma (@vijayanandsharma)"
+options:
+ allocated_capacity:
+ description:
+ - The number of AWS Glue data processing units (DPUs) to allocate to this Job. From 2 to 100 DPUs
+ can be allocated; the default is 10. A DPU is a relative measure of processing power that consists
+ of 4 vCPUs of compute capacity and 16 GB of memory.
+ type: int
+ command_name:
+ description:
+ - The name of the job command. This must be 'glueetl'.
+ default: glueetl
+ type: str
+ command_python_version:
+ description:
+ - Python version being used to execute a Python shell job.
+ - AWS currently supports C('2') or C('3').
+ type: str
+ version_added: 2.2.0
+ command_script_location:
+ description:
+ - The S3 path to a script that executes a job.
+ - Required when I(state=present).
+ type: str
+ connections:
+ description:
+ - A list of Glue connections used for this job.
+ type: list
+ elements: str
+ default_arguments:
+ description:
+ - A dict of default arguments for this job. You can specify arguments here that your own job-execution
+ script consumes, as well as arguments that AWS Glue itself consumes.
+ type: dict
+ description:
+ description:
+ - Description of the job being defined.
+ type: str
+ glue_version:
+ description:
+ - Glue version determines the versions of Apache Spark and Python that AWS Glue supports.
+ type: str
+ version_added: 1.5.0
+ max_concurrent_runs:
+ description:
+ - The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when
+ this threshold is reached. The maximum value you can specify is controlled by a service limit.
+ type: int
+ max_retries:
+ description:
+ - The maximum number of times to retry this job if it fails.
+ type: int
+ name:
+ description:
+ - The name you assign to this job definition. It must be unique in your account.
+ required: true
+ type: str
+ number_of_workers:
+ description:
+ - The number of workers of a defined workerType that are allocated when a job runs.
+ type: int
+ version_added: 1.5.0
+ role:
+ description:
+ - The name or ARN of the IAM role associated with this job.
+ - Required when I(state=present).
+ type: str
+ state:
+ description:
+ - Create or delete the AWS Glue job.
+ required: true
+ choices: [ 'present', 'absent' ]
+ type: str
+ timeout:
+ description:
+ - The job timeout in minutes.
+ type: int
+ worker_type:
+ description:
+ - The type of predefined worker that is allocated when a job runs.
+ choices: [ 'Standard', 'G.1X', 'G.2X' ]
+ type: str
+ version_added: 1.5.0
+notes:
+ - Support for I(tags) and I(purge_tags) was added in release 2.2.0.
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+ - amazon.aws.tags
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create an AWS Glue job
+- community.aws.glue_job:
+ command_script_location: "s3://s3bucket/script.py"
+ default_arguments:
+ "--extra-py-files": s3://s3bucket/script-package.zip
+ "--TempDir": "s3://s3bucket/temp/"
+ name: my-glue-job
+ role: my-iam-role
+ state: present
+
+# Delete an AWS Glue job
+- community.aws.glue_job:
+ name: my-glue-job
+ state: absent
+'''
+
+RETURN = r'''
+allocated_capacity:
+ description: The number of AWS Glue data processing units (DPUs) allocated to runs of this job. From 2 to
+ 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power
+ that consists of 4 vCPUs of compute capacity and 16 GB of memory.
+ returned: when state is present
+ type: int
+ sample: 10
+command:
+ description: The JobCommand that executes this job.
+ returned: when state is present
+ type: complex
+ contains:
+ name:
+ description: The name of the job command.
+ returned: when state is present
+ type: str
+ sample: glueetl
+ script_location:
+ description: Specifies the S3 path to a script that executes a job.
+ returned: when state is present
+ type: str
+ sample: mybucket/myscript.py
+ python_version:
+ description: Specifies the Python version.
+ returned: when state is present
+ type: str
+ sample: 3
+connections:
+ description: The connections used for this job.
+ returned: when state is present
+ type: dict
+ sample: "{ Connections: [ 'list', 'of', 'connections' ] }"
+created_on:
+ description: The time and date that this job definition was created.
+ returned: when state is present
+ type: str
+ sample: "2018-04-21T05:19:58.326000+00:00"
+default_arguments:
+ description: The default arguments for this job, specified as name-value pairs.
+ returned: when state is present
+ type: dict
+ sample: "{ 'mykey1': 'myvalue1' }"
+description:
+ description: Description of the job being defined.
+ returned: when state is present
+ type: str
+ sample: My first Glue job
+glue_version:
+ description: Glue version.
+ returned: when state is present
+ type: str
+ sample: 2.0
+job_name:
+ description: The name of the AWS Glue job.
+ returned: always
+ type: str
+ sample: my-glue-job
+execution_property:
+ description: An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.
+ returned: always
+ type: complex
+ contains:
+ max_concurrent_runs:
+ description: The maximum number of concurrent runs allowed for the job. The default is 1. An error is
+ returned when this threshold is reached. The maximum value you can specify is controlled by
+ a service limit.
+ returned: when state is present
+ type: int
+ sample: 1
+last_modified_on:
+ description: The last point in time when this job definition was modified.
+ returned: when state is present
+ type: str
+ sample: "2018-04-21T05:19:58.326000+00:00"
+max_retries:
+ description: The maximum number of times to retry this job after a JobRun fails.
+ returned: when state is present
+ type: int
+ sample: 5
+name:
+ description: The name assigned to this job definition.
+ returned: when state is present
+ type: str
+ sample: my-glue-job
+role:
+ description: The name or ARN of the IAM role associated with this job.
+ returned: when state is present
+ type: str
+ sample: my-iam-role
+timeout:
+ description: The job timeout in minutes.
+ returned: when state is present
+ type: int
+ sample: 300
+'''
+
+# Non-ansible imports
+import copy
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info
+
+
+def _get_glue_job(connection, module, glue_job_name):
+ """
+ Get an AWS Glue job based on name. If not found, return None.
+
+ :param connection: AWS boto3 glue connection
+ :param module: Ansible module
+ :param glue_job_name: Name of Glue job to get
+ :return: boto3 Glue job dict or None if not found
+ """
+ try:
+ return connection.get_job(aws_retry=True, JobName=glue_job_name)['Job']
+ except is_boto3_error_code('EntityNotFoundException'):
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+
+
+def _compare_glue_job_params(user_params, current_params):
+ """
+ Compare Glue job params. If there is a difference, return True immediately else return False
+
+ :param user_params: the Glue job parameters passed by the user
+ :param current_params: the Glue job parameters currently configured
+ :return: True if any parameter is mismatched else False
+ """
+ # Weirdly, boto3 doesn't return some keys if the value is empty e.g. Description
+ # To counter this, add the key if it's missing with a blank value
+
+ if 'Description' not in current_params:
+ current_params['Description'] = ""
+ if 'DefaultArguments' not in current_params:
+ current_params['DefaultArguments'] = dict()
+
+ if 'AllocatedCapacity' in user_params and user_params['AllocatedCapacity'] != current_params['AllocatedCapacity']:
+ return True
+ if 'Command' in user_params:
+ if user_params['Command']['ScriptLocation'] != current_params['Command']['ScriptLocation']:
+ return True
+ if user_params['Command']['PythonVersion'] != current_params['Command']['PythonVersion']:
+ return True
+ if 'Connections' in user_params and user_params['Connections'] != current_params['Connections']:
+ return True
+ if 'DefaultArguments' in user_params and user_params['DefaultArguments'] != current_params['DefaultArguments']:
+ return True
+ if 'Description' in user_params and user_params['Description'] != current_params['Description']:
+ return True
+ if 'ExecutionProperty' in user_params and user_params['ExecutionProperty']['MaxConcurrentRuns'] != current_params['ExecutionProperty']['MaxConcurrentRuns']:
+ return True
+ if 'GlueVersion' in user_params and user_params['GlueVersion'] != current_params['GlueVersion']:
+ return True
+ if 'MaxRetries' in user_params and user_params['MaxRetries'] != current_params['MaxRetries']:
+ return True
+ if 'Role' in user_params and user_params['Role'] != current_params['Role']:
+ return True
+ if 'Timeout' in user_params and user_params['Timeout'] != current_params['Timeout']:
+ return True
+ if 'GlueVersion' in user_params and user_params['GlueVersion'] != current_params['GlueVersion']:
+ return True
+ if 'WorkerType' in user_params and user_params['WorkerType'] != current_params['WorkerType']:
+ return True
+ if 'NumberOfWorkers' in user_params and user_params['NumberOfWorkers'] != current_params['NumberOfWorkers']:
+ return True
+
+ return False
+
+
+def ensure_tags(connection, module, glue_job):
+ changed = False
+
+ if module.params.get('tags') is None:
+ return False
+
+ account_id, partition = get_aws_account_info(module)
+ arn = 'arn:{0}:glue:{1}:{2}:job/{3}'.format(partition, module.region, account_id, module.params.get('name'))
+
+ try:
+ existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get('Tags', {})
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ if module.check_mode:
+ existing_tags = {}
+ else:
+ module.fail_json_aws(e, msg='Unable to get tags for Glue job %s' % module.params.get('name'))
+
+ tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, module.params.get('tags'), module.params.get('purge_tags'))
+
+ if tags_to_remove:
+ changed = True
+ if not module.check_mode:
+ try:
+ connection.untag_resource(aws_retry=True, ResourceArn=arn, TagsToRemove=tags_to_remove)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Unable to set tags for Glue job %s' % module.params.get('name'))
+
+ if tags_to_add:
+ changed = True
+ if not module.check_mode:
+ try:
+ connection.tag_resource(aws_retry=True, ResourceArn=arn, TagsToAdd=tags_to_add)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Unable to set tags for Glue job %s' % module.params.get('name'))
+
+ return changed
+
+
+def create_or_update_glue_job(connection, module, glue_job):
+ """
+ Create or update an AWS Glue job
+
+ :param connection: AWS boto3 glue connection
+ :param module: Ansible module
+ :param glue_job: a dict of AWS Glue job parameters or None
+ :return:
+ """
+
+ changed = False
+ params = dict()
+ params['Name'] = module.params.get("name")
+ params['Role'] = module.params.get("role")
+ if module.params.get("allocated_capacity") is not None:
+ params['AllocatedCapacity'] = module.params.get("allocated_capacity")
+ if module.params.get("command_script_location") is not None:
+ params['Command'] = {'Name': module.params.get("command_name"), 'ScriptLocation': module.params.get("command_script_location")}
+ if module.params.get("command_python_version") is not None:
+ params['Command']['PythonVersion'] = module.params.get("command_python_version")
+ if module.params.get("connections") is not None:
+ params['Connections'] = {'Connections': module.params.get("connections")}
+ if module.params.get("default_arguments") is not None:
+ params['DefaultArguments'] = module.params.get("default_arguments")
+ if module.params.get("description") is not None:
+ params['Description'] = module.params.get("description")
+ if module.params.get("glue_version") is not None:
+ params['GlueVersion'] = module.params.get("glue_version")
+ if module.params.get("max_concurrent_runs") is not None:
+ params['ExecutionProperty'] = {'MaxConcurrentRuns': module.params.get("max_concurrent_runs")}
+ if module.params.get("max_retries") is not None:
+ params['MaxRetries'] = module.params.get("max_retries")
+ if module.params.get("timeout") is not None:
+ params['Timeout'] = module.params.get("timeout")
+ if module.params.get("glue_version") is not None:
+ params['GlueVersion'] = module.params.get("glue_version")
+ if module.params.get("worker_type") is not None:
+ params['WorkerType'] = module.params.get("worker_type")
+ if module.params.get("number_of_workers") is not None:
+ params['NumberOfWorkers'] = module.params.get("number_of_workers")
+
+ # If glue_job is not None then check if it needs to be modified, else create it
+ if glue_job:
+ if _compare_glue_job_params(params, glue_job):
+ try:
+ # Update job needs slightly modified params
+ update_params = {'JobName': params['Name'], 'JobUpdate': copy.deepcopy(params)}
+ del update_params['JobUpdate']['Name']
+ if not module.check_mode:
+ connection.update_job(aws_retry=True, **update_params)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e)
+ else:
+ try:
+ if not module.check_mode:
+ connection.create_job(aws_retry=True, **params)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e)
+
+ glue_job = _get_glue_job(connection, module, params['Name'])
+
+ changed |= ensure_tags(connection, module, glue_job)
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_job or {}, ignore_list=['DefaultArguments']))
+
+
+def delete_glue_job(connection, module, glue_job):
+ """
+ Delete an AWS Glue job
+
+ :param connection: AWS boto3 glue connection
+ :param module: Ansible module
+ :param glue_job: a dict of AWS Glue job parameters or None
+ :return:
+ """
+ changed = False
+
+ if glue_job:
+ try:
+ if not module.check_mode:
+ connection.delete_job(aws_retry=True, JobName=glue_job['Name'])
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+
+ argument_spec = (
+ dict(
+ allocated_capacity=dict(type='int'),
+ command_name=dict(type='str', default='glueetl'),
+ command_python_version=dict(type='str'),
+ command_script_location=dict(type='str'),
+ connections=dict(type='list', elements='str'),
+ default_arguments=dict(type='dict'),
+ description=dict(type='str'),
+ glue_version=dict(type='str'),
+ max_concurrent_runs=dict(type='int'),
+ max_retries=dict(type='int'),
+ name=dict(required=True, type='str'),
+ number_of_workers=dict(type='int'),
+ purge_tags=dict(type='bool', default=True),
+ role=dict(type='str'),
+ state=dict(required=True, choices=['present', 'absent'], type='str'),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ timeout=dict(type='int'),
+ worker_type=dict(choices=['Standard', 'G.1X', 'G.2X'], type='str'),
+ )
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present', ['role', 'command_script_location'])
+ ],
+ supports_check_mode=True
+ )
+
+ retry_decorator = AWSRetry.jittered_backoff(retries=10)
+ connection = module.client('glue', retry_decorator=retry_decorator)
+
+ state = module.params.get("state")
+
+ glue_job = _get_glue_job(connection, module, module.params.get("name"))
+
+ if state == 'present':
+ create_or_update_glue_job(connection, module, glue_job)
+ else:
+ delete_glue_job(connection, module, glue_job)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/iam_access_key.py b/ansible_collections/community/aws/plugins/modules/iam_access_key.py
new file mode 100644
index 000000000..ad61b5b2a
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/iam_access_key.py
@@ -0,0 +1,317 @@
+#!/usr/bin/python
+# Copyright (c) 2021 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: iam_access_key
+version_added: 2.1.0
+short_description: Manage AWS IAM User access keys
+description:
+ - Manage AWS IAM user access keys.
+author: Mark Chappell (@tremble)
+options:
+ user_name:
+ description:
+ - The name of the IAM User to which the key belongs.
+ required: true
+ type: str
+ aliases: ['username']
+ id:
+ description:
+ - The ID of the access key.
+ - Required when I(state=absent).
+ - Mutually exclusive with I(rotate_keys).
+ required: false
+ type: str
+ state:
+ description:
+ - Create or remove the access key.
+ - When I(state=present) and I(id) is not defined a new key will be created.
+ required: false
+ type: str
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ active:
+ description:
+ - Whether the key should be enabled or disabled.
+ - Defaults to C(true) when creating a new key.
+ required: false
+ type: bool
+ aliases: ['enabled']
+ rotate_keys:
+ description:
+ - When there are already 2 access keys attached to the IAM user the oldest
+ key will be removed and a new key created.
+ - Ignored if I(state=absent)
+ - Mutually exclusive with I(id).
+ required: false
+ type: bool
+ default: false
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create a new access key
+ community.aws.iam_access_key:
+ user_name: example_user
+ state: present
+
+- name: Delete the access_key
+ community.aws.iam_access_key:
+ user_name: example_user
+ id: AKIA1EXAMPLE1EXAMPLE
+ state: absent
+'''
+
+RETURN = r'''
+access_key:
+ description: A dictionary containing all the access key information.
+ returned: When the key exists.
+ type: complex
+ contains:
+ access_key_id:
+ description: The ID for the access key.
+ returned: success
+ type: str
+ sample: AKIA1EXAMPLE1EXAMPLE
+ create_date:
+ description: The date and time, in ISO 8601 date-time format, when the access key was created.
+ returned: success
+ type: str
+ sample: "2021-10-09T13:25:42+00:00"
+ user_name:
+ description: The name of the IAM user to which the key is attached.
+ returned: success
+ type: str
+ sample: example_user
+ status:
+ description:
+ - The status of the key.
+ - C(Active) means it can be used.
+ - C(Inactive) means it can not be used.
+ returned: success
+ type: str
+ sample: Inactive
+secret_access_key:
+ description:
+ - The secret access key.
+ - A secret access key is the equivalent of a password which can not be changed and as such should be considered sensitive data.
+ - Secret access keys can only be accessed at creation time.
+ returned: When a new key is created.
+ type: str
+ sample: example/Example+EXAMPLE+example/Example
+deleted_access_key_id:
+ description:
+ - The access key deleted during rotation.
+ returned: When a key was deleted during the rotation of access keys
+ type: str
+ sample: AKIA1EXAMPLE1EXAMPLE
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result
+from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+def delete_access_key(access_keys, user, access_key_id):
+ if not access_key_id:
+ return False
+
+ if access_key_id not in access_keys:
+ return False
+
+ if module.check_mode:
+ return True
+
+ try:
+ client.delete_access_key(
+ aws_retry=True,
+ UserName=user,
+ AccessKeyId=access_key_id,
+ )
+ except is_boto3_error_code('NoSuchEntityException'):
+ # Generally occurs when race conditions have happened and someone
+ # deleted the key while we were checking to see if it existed.
+ return False
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(
+ e, msg='Failed to delete access key "{0}" for user "{1}"'.format(access_key_id, user)
+ )
+
+ return True
+
+
+def update_access_key(access_keys, user, access_key_id, enabled):
+ if access_key_id not in access_keys:
+ module.fail_json(
+ msg='Access key "{0}" not found attached to User "{1}"'.format(access_key_id, user),
+ )
+
+ changes = dict()
+ access_key = access_keys.get(access_key_id)
+
+ if enabled is not None:
+ desired_status = 'Active' if enabled else 'Inactive'
+ if access_key.get('status') != desired_status:
+ changes['Status'] = desired_status
+
+ if not changes:
+ return False
+
+ if module.check_mode:
+ return True
+
+ try:
+ client.update_access_key(
+ aws_retry=True,
+ UserName=user,
+ AccessKeyId=access_key_id,
+ **changes
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(
+ e, changes=changes,
+ msg='Failed to update access key "{0}" for user "{1}"'.format(access_key_id, user),
+ )
+ return True
+
+
+def create_access_key(access_keys, user, rotate_keys, enabled):
+ changed = False
+ oldest_key = False
+
+ if len(access_keys) > 1 and rotate_keys:
+ sorted_keys = sorted(list(access_keys), key=lambda k: access_keys[k].get('create_date', None))
+ oldest_key = sorted_keys[0]
+ changed |= delete_access_key(access_keys, user, oldest_key)
+
+ if module.check_mode:
+ if changed:
+ return dict(deleted_access_key=oldest_key)
+ return True
+
+ try:
+ results = client.create_access_key(aws_retry=True, UserName=user)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to create access key for user "{0}"'.format(user))
+ results = camel_dict_to_snake_dict(results)
+ access_key = results.get('access_key')
+ access_key = normalize_boto3_result(access_key)
+
+ # Update settings which can't be managed on creation
+ if enabled is False:
+ access_key_id = access_key['access_key_id']
+ access_keys = {access_key_id: access_key}
+ update_access_key(access_keys, user, access_key_id, enabled)
+ access_key['status'] = 'Inactive'
+
+ if oldest_key:
+ access_key['deleted_access_key'] = oldest_key
+
+ return access_key
+
+
+def get_access_keys(user):
+ try:
+ results = client.list_access_keys(aws_retry=True, UserName=user)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(
+ e, msg='Failed to get access keys for user "{0}"'.format(user)
+ )
+ if not results:
+ return None
+
+ results = camel_dict_to_snake_dict(results)
+ access_keys = results.get('access_key_metadata', [])
+ if not access_keys:
+ return []
+
+ access_keys = normalize_boto3_result(access_keys)
+ access_keys = {k['access_key_id']: k for k in access_keys}
+ return access_keys
+
+
+def main():
+
+ global module
+ global client
+
+ argument_spec = dict(
+ user_name=dict(required=True, type='str', aliases=['username']),
+ id=dict(required=False, type='str'),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ active=dict(required=False, type='bool', aliases=['enabled']),
+ rotate_keys=dict(required=False, type='bool', default=False),
+ )
+
+ required_if = [
+ ['state', 'absent', ('id')],
+ ]
+ mutually_exclusive = [
+ ['rotate_keys', 'id'],
+ ]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+
+ changed = False
+ state = module.params.get('state')
+ user = module.params.get('user_name')
+ access_key_id = module.params.get('id')
+ rotate_keys = module.params.get('rotate_keys')
+ enabled = module.params.get('active')
+
+ access_keys = get_access_keys(user)
+ results = dict()
+
+ if state == 'absent':
+ changed |= delete_access_key(access_keys, user, access_key_id)
+ else:
+ # If we have an ID then we should try to update it
+ if access_key_id:
+ changed |= update_access_key(access_keys, user, access_key_id, enabled)
+ access_keys = get_access_keys(user)
+ results['access_key'] = access_keys.get(access_key_id, None)
+ # Otherwise we try to create a new one
+ else:
+ secret_key = create_access_key(access_keys, user, rotate_keys, enabled)
+ if isinstance(secret_key, bool):
+ changed |= secret_key
+ else:
+ changed = True
+ results['access_key_id'] = secret_key.get('access_key_id', None)
+ results['secret_access_key'] = secret_key.pop('secret_access_key', None)
+ results['deleted_access_key_id'] = secret_key.pop('deleted_access_key', None)
+ if secret_key:
+ results['access_key'] = secret_key
+ results = scrub_none_parameters(results)
+
+ module.exit_json(changed=changed, **results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/iam_access_key_info.py b/ansible_collections/community/aws/plugins/modules/iam_access_key_info.py
new file mode 100644
index 000000000..91429eff9
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/iam_access_key_info.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# Copyright (c) 2021 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: iam_access_key_info
+version_added: 2.1.0
+short_description: fetch information about AWS IAM User access keys
+description:
+ - 'Fetches information AWS IAM user access keys.'
+ - 'Note: It is not possible to fetch the secret access key.'
+author: Mark Chappell (@tremble)
+options:
+ user_name:
+ description:
+ - The name of the IAM User to which the keys belong.
+ required: true
+ type: str
+ aliases: ['username']
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Fetch Access keys for a user
+ community.aws.iam_access_key_info:
+ user_name: example_user
+'''
+
+RETURN = r'''
+access_key:
+ description: A dictionary containing all the access key information.
+ returned: When the key exists.
+ type: list
+ elements: dict
+ contains:
+ access_key_id:
+ description: The ID for the access key.
+ returned: success
+ type: str
+ sample: AKIA1EXAMPLE1EXAMPLE
+ create_date:
+ description: The date and time, in ISO 8601 date-time format, when the access key was created.
+ returned: success
+ type: str
+ sample: "2021-10-09T13:25:42+00:00"
+ user_name:
+ description: The name of the IAM user to which the key is attached.
+ returned: success
+ type: str
+ sample: example_user
+ status:
+ description:
+ - The status of the key.
+ - C(Active) means it can be used.
+ - C(Inactive) means it can not be used.
+ returned: success
+ type: str
+ sample: Inactive
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+def get_access_keys(user):
+ try:
+ results = client.list_access_keys(aws_retry=True, UserName=user)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(
+ e, msg='Failed to get access keys for user "{0}"'.format(user)
+ )
+ if not results:
+ return None
+
+ results = camel_dict_to_snake_dict(results)
+ access_keys = results.get('access_key_metadata', [])
+ if not access_keys:
+ return []
+
+ access_keys = normalize_boto3_result(access_keys)
+ access_keys = sorted(access_keys, key=lambda d: d.get('create_date', None))
+ return access_keys
+
+
+def main():
+
+ global module
+ global client
+
+ argument_spec = dict(
+ user_name=dict(required=True, type='str', aliases=['username']),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+
+ changed = False
+ user = module.params.get('user_name')
+ access_keys = get_access_keys(user)
+
+ module.exit_json(changed=changed, access_keys=access_keys)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/iam_group.py b/ansible_collections/community/aws/plugins/modules/iam_group.py
new file mode 100644
index 000000000..31987ef1d
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/iam_group.py
@@ -0,0 +1,433 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: iam_group
+version_added: 1.0.0
+short_description: Manage AWS IAM groups
+description:
+ - Manage AWS IAM groups.
+author:
+- Nick Aslanidis (@naslanidis)
+- Maksym Postument (@infectsoldier)
+options:
+ name:
+ description:
+ - The name of the group to create.
+ required: true
+ type: str
+ managed_policies:
+ description:
+ - A list of managed policy ARNs or friendly names to attach to the role.
+ - To embed an inline policy, use M(community.aws.iam_policy).
+ required: false
+ type: list
+ elements: str
+ default: []
+ aliases: ['managed_policy']
+ users:
+ description:
+ - A list of existing users to add as members of the group.
+ required: false
+ type: list
+ elements: str
+ default: []
+ state:
+ description:
+ - Create or remove the IAM group.
+ required: true
+ choices: [ 'present', 'absent' ]
+ type: str
+ purge_policies:
+ description:
+ - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched.
+ required: false
+ default: false
+ type: bool
+ aliases: ['purge_policy', 'purge_managed_policies']
+ purge_users:
+ description:
+ - When I(purge_users=true) users which are not included in I(users) will be detached.
+ required: false
+ default: false
+ type: bool
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create a group
+ community.aws.iam_group:
+ name: testgroup1
+ state: present
+
+- name: Create a group and attach a managed policy using its ARN
+ community.aws.iam_group:
+ name: testgroup1
+ managed_policies:
+ - arn:aws:iam::aws:policy/AmazonSNSFullAccess
+ state: present
+
+- name: Create a group with users as members and attach a managed policy using its ARN
+ community.aws.iam_group:
+ name: testgroup1
+ managed_policies:
+ - arn:aws:iam::aws:policy/AmazonSNSFullAccess
+ users:
+ - test_user1
+ - test_user2
+ state: present
+
+- name: Remove all managed policies from an existing group with an empty list
+ community.aws.iam_group:
+ name: testgroup1
+ state: present
+ purge_policies: true
+
+- name: Remove all group members from an existing group
+ community.aws.iam_group:
+ name: testgroup1
+ managed_policies:
+ - arn:aws:iam::aws:policy/AmazonSNSFullAccess
+ purge_users: true
+ state: present
+
+- name: Delete the group
+ community.aws.iam_group:
+ name: testgroup1
+ state: absent
+
+'''
+RETURN = r'''
+iam_group:
+ description: dictionary containing all the group information including group membership
+ returned: success
+ type: complex
+ contains:
+ group:
+ description: dictionary containing all the group information
+ returned: success
+ type: complex
+ contains:
+ arn:
+ description: the Amazon Resource Name (ARN) specifying the group
+ type: str
+ sample: "arn:aws:iam::1234567890:group/testgroup1"
+ create_date:
+ description: the date and time, in ISO 8601 date-time format, when the group was created
+ type: str
+ sample: "2017-02-08T04:36:28+00:00"
+ group_id:
+ description: the stable and unique string identifying the group
+ type: str
+ sample: AGPA12345EXAMPLE54321
+ group_name:
+ description: the friendly name that identifies the group
+ type: str
+ sample: testgroup1
+ path:
+ description: the path to the group
+ type: str
+ sample: /
+ users:
+ description: list containing all the group members
+ returned: success
+ type: complex
+ contains:
+ arn:
+ description: the Amazon Resource Name (ARN) specifying the user
+ type: str
+ sample: "arn:aws:iam::1234567890:user/test_user1"
+ create_date:
+ description: the date and time, in ISO 8601 date-time format, when the user was created
+ type: str
+ sample: "2017-02-08T04:36:28+00:00"
+ user_id:
+ description: the stable and unique string identifying the user
+ type: str
+ sample: AIDA12345EXAMPLE54321
+ user_name:
+ description: the friendly name that identifies the user
+ type: str
+ sample: testgroup1
+ path:
+ description: the path to the user
+ type: str
+ sample: /
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+def compare_attached_group_policies(current_attached_policies, new_attached_policies):
+
+ # If new_attached_policies is None it means we want to remove all policies
+ if len(current_attached_policies) > 0 and new_attached_policies is None:
+ return False
+
+ current_attached_policies_arn_list = []
+ for policy in current_attached_policies:
+ current_attached_policies_arn_list.append(policy['PolicyArn'])
+
+ if set(current_attached_policies_arn_list) == set(new_attached_policies):
+ return True
+ else:
+ return False
+
+
+def compare_group_members(current_group_members, new_group_members):
+
+ # If new_attached_policies is None it means we want to remove all policies
+ if len(current_group_members) > 0 and new_group_members is None:
+ return False
+ if set(current_group_members) == set(new_group_members):
+ return True
+ else:
+ return False
+
+
+def convert_friendly_names_to_arns(connection, module, policy_names):
+
+ if not any(not policy.startswith('arn:') for policy in policy_names if policy is not None):
+ return policy_names
+ allpolicies = {}
+ paginator = connection.get_paginator('list_policies')
+ policies = paginator.paginate().build_full_result()['Policies']
+
+ for policy in policies:
+ allpolicies[policy['PolicyName']] = policy['Arn']
+ allpolicies[policy['Arn']] = policy['Arn']
+ try:
+ return [allpolicies[policy] for policy in policy_names]
+ except KeyError as e:
+ module.fail_json(msg="Couldn't find policy: " + str(e))
+
+
+def create_or_update_group(connection, module):
+
+ params = dict()
+ params['GroupName'] = module.params.get('name')
+ managed_policies = module.params.get('managed_policies')
+ users = module.params.get('users')
+ purge_users = module.params.get('purge_users')
+ purge_policies = module.params.get('purge_policies')
+ changed = False
+ if managed_policies:
+ managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies)
+
+ # Get group
+ try:
+ group = get_group(connection, module, params['GroupName'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get group")
+
+ # If group is None, create it
+ if group is None:
+ # Check mode means we would create the group
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ try:
+ group = connection.create_group(**params)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create group")
+
+ # Manage managed policies
+ current_attached_policies = get_attached_policy_list(connection, module, params['GroupName'])
+ if not compare_attached_group_policies(current_attached_policies, managed_policies):
+ current_attached_policies_arn_list = []
+ for policy in current_attached_policies:
+ current_attached_policies_arn_list.append(policy['PolicyArn'])
+
+ # If managed_policies has a single empty element we want to remove all attached policies
+ if purge_policies:
+ # Detach policies not present
+ for policy_arn in list(set(current_attached_policies_arn_list) - set(managed_policies)):
+ changed = True
+ if not module.check_mode:
+ try:
+ connection.detach_group_policy(GroupName=params['GroupName'], PolicyArn=policy_arn)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't detach policy from group %s" % params['GroupName'])
+ # If there are policies to adjust that aren't in the current list, then things have changed
+ # Otherwise the only changes were in purging above
+ if set(managed_policies) - set(current_attached_policies_arn_list):
+ changed = True
+ # If there are policies in managed_policies attach each policy
+ if managed_policies != [None] and not module.check_mode:
+ for policy_arn in managed_policies:
+ try:
+ connection.attach_group_policy(GroupName=params['GroupName'], PolicyArn=policy_arn)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't attach policy to group %s" % params['GroupName'])
+
+ # Manage group memberships
+ try:
+ current_group_members = get_group(connection, module, params['GroupName'])['Users']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName'])
+
+ current_group_members_list = []
+ for member in current_group_members:
+ current_group_members_list.append(member['UserName'])
+
+ if not compare_group_members(current_group_members_list, users):
+
+ if purge_users:
+ for user in list(set(current_group_members_list) - set(users)):
+ # Ensure we mark things have changed if any user gets purged
+ changed = True
+ # Skip actions for check mode
+ if not module.check_mode:
+ try:
+ connection.remove_user_from_group(GroupName=params['GroupName'], UserName=user)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't remove user %s from group %s" % (user, params['GroupName']))
+ # If there are users to adjust that aren't in the current list, then things have changed
+ # Otherwise the only changes were in purging above
+ if set(users) - set(current_group_members_list):
+ changed = True
+ # Skip actions for check mode
+ if users != [None] and not module.check_mode:
+ for user in users:
+ try:
+ connection.add_user_to_group(GroupName=params['GroupName'], UserName=user)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't add user %s to group %s" % (user, params['GroupName']))
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ # Get the group again
+ try:
+ group = get_group(connection, module, params['GroupName'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName'])
+
+ module.exit_json(changed=changed, iam_group=camel_dict_to_snake_dict(group))
+
+
+def destroy_group(connection, module):
+
+ params = dict()
+ params['GroupName'] = module.params.get('name')
+
+ try:
+ group = get_group(connection, module, params['GroupName'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName'])
+ if group:
+ # Check mode means we would remove this group
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # Remove any attached policies otherwise deletion fails
+ try:
+ for policy in get_attached_policy_list(connection, module, params['GroupName']):
+ connection.detach_group_policy(GroupName=params['GroupName'], PolicyArn=policy['PolicyArn'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't remove policy from group %s" % params['GroupName'])
+
+ # Remove any users in the group otherwise deletion fails
+ current_group_members_list = []
+ try:
+ current_group_members = get_group(connection, module, params['GroupName'])['Users']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName'])
+ for member in current_group_members:
+ current_group_members_list.append(member['UserName'])
+ for user in current_group_members_list:
+ try:
+ connection.remove_user_from_group(GroupName=params['GroupName'], UserName=user)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Couldn't remove user %s from group %s" % (user, params['GroupName']))
+
+ try:
+ connection.delete_group(**params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Couldn't delete group %s" % params['GroupName'])
+
+ else:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=True)
+
+
+@AWSRetry.exponential_backoff()
+def get_group(connection, module, name):
+ try:
+ paginator = connection.get_paginator('get_group')
+ return paginator.paginate(GroupName=name).build_full_result()
+ except is_boto3_error_code('NoSuchEntity'):
+ return None
+
+
+@AWSRetry.exponential_backoff()
+def get_attached_policy_list(connection, module, name):
+
+ try:
+ paginator = connection.get_paginator('list_attached_group_policies')
+ return paginator.paginate(GroupName=name).build_full_result()['AttachedPolicies']
+ except is_boto3_error_code('NoSuchEntity'):
+ return None
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(required=True),
+ managed_policies=dict(default=[], type='list', aliases=['managed_policy'], elements='str'),
+ users=dict(default=[], type='list', elements='str'),
+ state=dict(choices=['present', 'absent'], required=True),
+ purge_users=dict(default=False, type='bool'),
+ purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ connection = module.client('iam')
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ create_or_update_group(connection, module)
+ else:
+ destroy_group(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/iam_managed_policy.py b/ansible_collections/community/aws/plugins/modules/iam_managed_policy.py
new file mode 100644
index 000000000..f86f019d5
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/iam_managed_policy.py
@@ -0,0 +1,371 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: iam_managed_policy
+version_added: 1.0.0
+short_description: Manage User Managed IAM policies
+description:
+ - Allows creating and removing managed IAM policies
+options:
+ policy_name:
+ description:
+ - The name of the managed policy.
+ required: True
+ type: str
+ policy_description:
+ description:
+ - A helpful description of this policy, this value is immutable and only set when creating a new policy.
+ default: ''
+ type: str
+ policy:
+ description:
+ - A properly json formatted policy
+ type: json
+ make_default:
+ description:
+ - Make this revision the default revision.
+ default: True
+ type: bool
+ only_version:
+ description:
+ - Remove all other non default revisions, if this is used with C(make_default) it will result in all other versions of this policy being deleted.
+ type: bool
+ default: false
+ state:
+ description:
+ - Should this managed policy be present or absent. Set to absent to detach all entities from this policy and remove it if found.
+ default: present
+ choices: [ "present", "absent" ]
+ type: str
+
+author: "Dan Kozlowski (@dkhenry)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Create a policy
+- name: Create IAM Managed Policy
+ community.aws.iam_managed_policy:
+ policy_name: "ManagedPolicy"
+ policy_description: "A Helpful managed policy"
+ policy: "{{ lookup('template', 'managed_policy.json.j2') }}"
+ state: present
+
+# Update a policy with a new default version
+- name: Update an IAM Managed Policy with new default version
+ community.aws.iam_managed_policy:
+ policy_name: "ManagedPolicy"
+ policy: "{{ lookup('file', 'managed_policy_update.json') }}"
+ state: present
+
+# Update a policy with a new non default version
+- name: Update an IAM Managed Policy with a non default version
+ community.aws.iam_managed_policy:
+ policy_name: "ManagedPolicy"
+ policy:
+ Version: "2012-10-17"
+ Statement:
+ - Effect: "Allow"
+ Action: "logs:CreateLogGroup"
+ Resource: "*"
+ make_default: false
+ state: present
+
+# Update a policy and make it the only version and the default version
+- name: Update an IAM Managed Policy with default version as the only version
+ community.aws.iam_managed_policy:
+ policy_name: "ManagedPolicy"
+ policy: |
+ {
+ "Version": "2012-10-17",
+ "Statement":[{
+ "Effect": "Allow",
+ "Action": "logs:PutRetentionPolicy",
+ "Resource": "*"
+ }]
+ }
+ only_version: true
+ state: present
+
+# Remove a policy
+- name: Remove an existing IAM Managed Policy
+ community.aws.iam_managed_policy:
+ policy_name: "ManagedPolicy"
+ state: absent
+'''
+
+RETURN = r'''
+policy:
+ description: Returns the policy json structure, when state == absent this will return the value of the removed policy.
+ returned: success
+ type: complex
+ contains: {}
+ sample: '{
+ "arn": "arn:aws:iam::aws:policy/AdministratorAccess "
+ "attachment_count": 0,
+ "create_date": "2017-03-01T15:42:55.981000+00:00",
+ "default_version_id": "v1",
+ "is_attachable": true,
+ "path": "/",
+ "policy_id": "ANPA1245EXAMPLE54321",
+ "policy_name": "AdministratorAccess",
+ "update_date": "2017-03-01T15:42:55.981000+00:00"
+ }'
+'''
+
+import json
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies
+
+
+@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
+def list_policies_with_backoff():
+ paginator = client.get_paginator('list_policies')
+ return paginator.paginate(Scope='Local').build_full_result()
+
+
+def get_policy_by_name(name):
+ try:
+ response = list_policies_with_backoff()
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't list policies")
+ for policy in response['Policies']:
+ if policy['PolicyName'] == name:
+ return policy
+ return None
+
+
+def delete_oldest_non_default_version(policy):
+ try:
+ versions = [v for v in client.list_policy_versions(PolicyArn=policy['Arn'])['Versions']
+ if not v['IsDefaultVersion']]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't list policy versions")
+ versions.sort(key=lambda v: v['CreateDate'], reverse=True)
+ for v in versions[-1:]:
+ try:
+ client.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete policy version")
+
+
+# This needs to return policy_version, changed
+def get_or_create_policy_version(policy, policy_document):
+ try:
+ versions = client.list_policy_versions(PolicyArn=policy['Arn'])['Versions']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't list policy versions")
+
+ for v in versions:
+ try:
+ document = client.get_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId'])['PolicyVersion']['Document']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get policy version {0}".format(v['VersionId']))
+
+ if module.check_mode and compare_policies(document, json.loads(to_native(policy_document))):
+ return v, True
+
+ # If the current policy matches the existing one
+ if not compare_policies(document, json.loads(to_native(policy_document))):
+ return v, False
+
+ # No existing version so create one
+ # There is a service limit (typically 5) of policy versions.
+ #
+ # Rather than assume that it is 5, we'll try to create the policy
+ # and if that doesn't work, delete the oldest non default policy version
+ # and try again.
+ try:
+ version = client.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion']
+ return version, True
+ except is_boto3_error_code('LimitExceeded'):
+ delete_oldest_non_default_version(policy)
+ try:
+ version = client.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion']
+ return version, True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as second_e:
+ module.fail_json_aws(second_e, msg="Couldn't create policy version")
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't create policy version")
+
+
+def set_if_default(policy, policy_version, is_default):
+ if is_default and not policy_version['IsDefaultVersion']:
+ try:
+ client.set_default_policy_version(PolicyArn=policy['Arn'], VersionId=policy_version['VersionId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't set default policy version")
+ return True
+ return False
+
+
+def set_if_only(policy, policy_version, is_only):
+ if is_only:
+ try:
+ versions = [v for v in client.list_policy_versions(PolicyArn=policy['Arn'])[
+ 'Versions'] if not v['IsDefaultVersion']]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't list policy versions")
+ for v in versions:
+ try:
+ client.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete policy version")
+ return len(versions) > 0
+ return False
+
+
+def detach_all_entities(policy, **kwargs):
+ try:
+ entities = client.list_entities_for_policy(PolicyArn=policy['Arn'], **kwargs)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't detach list entities for policy {0}".format(policy['PolicyName']))
+
+ for g in entities['PolicyGroups']:
+ try:
+ client.detach_group_policy(PolicyArn=policy['Arn'], GroupName=g['GroupName'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't detach group policy {0}".format(g['GroupName']))
+ for u in entities['PolicyUsers']:
+ try:
+ client.detach_user_policy(PolicyArn=policy['Arn'], UserName=u['UserName'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't detach user policy {0}".format(u['UserName']))
+ for r in entities['PolicyRoles']:
+ try:
+ client.detach_role_policy(PolicyArn=policy['Arn'], RoleName=r['RoleName'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't detach role policy {0}".format(r['RoleName']))
+ if entities['IsTruncated']:
+ detach_all_entities(policy, marker=entities['Marker'])
+
+
+def create_or_update_policy(existing_policy):
+ name = module.params.get('policy_name')
+ description = module.params.get('policy_description')
+ default = module.params.get('make_default')
+ only = module.params.get('only_version')
+
+ policy = None
+
+ if module.params.get('policy') is not None:
+ policy = json.dumps(json.loads(module.params.get('policy')))
+
+ if existing_policy is None:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # Create policy when none already exists
+ try:
+ rvalue = client.create_policy(PolicyName=name, Path='/', PolicyDocument=policy, Description=description)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create policy {0}".format(name))
+
+ module.exit_json(changed=True, policy=camel_dict_to_snake_dict(rvalue['Policy']))
+ else:
+ policy_version, changed = get_or_create_policy_version(existing_policy, policy)
+ changed = set_if_default(existing_policy, policy_version, default) or changed
+ changed = set_if_only(existing_policy, policy_version, only) or changed
+
+ # If anything has changed we need to refresh the policy
+ if changed:
+ try:
+ updated_policy = client.get_policy(PolicyArn=existing_policy['Arn'])['Policy']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json(msg="Couldn't get policy")
+
+ module.exit_json(changed=changed, policy=camel_dict_to_snake_dict(updated_policy))
+ else:
+ module.exit_json(changed=changed, policy=camel_dict_to_snake_dict(existing_policy))
+
+
+def delete_policy(existing_policy):
+ # Check for existing policy
+ if existing_policy:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # Detach policy
+ detach_all_entities(existing_policy)
+ # Delete Versions
+ try:
+ versions = client.list_policy_versions(PolicyArn=existing_policy['Arn'])['Versions']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't list policy versions")
+ for v in versions:
+ if not v['IsDefaultVersion']:
+ try:
+ client.delete_policy_version(PolicyArn=existing_policy['Arn'], VersionId=v['VersionId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(
+ e, msg="Couldn't delete policy version {0}".format(v['VersionId']))
+ # Delete policy
+ try:
+ client.delete_policy(PolicyArn=existing_policy['Arn'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete policy {0}".format(existing_policy['PolicyName']))
+
+ # This is the one case where we will return the old policy
+ module.exit_json(changed=True, policy=camel_dict_to_snake_dict(existing_policy))
+ else:
+ module.exit_json(changed=False, policy=None)
+
+
+def main():
+ global module
+ global client
+
+ argument_spec = dict(
+ policy_name=dict(required=True),
+ policy_description=dict(default=''),
+ policy=dict(type='json'),
+ make_default=dict(type='bool', default=True),
+ only_version=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=[['state', 'present', ['policy']]],
+ supports_check_mode=True
+ )
+
+ name = module.params.get('policy_name')
+ state = module.params.get('state')
+
+ try:
+ client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ existing_policy = get_policy_by_name(name)
+
+ if state == 'present':
+ create_or_update_policy(existing_policy)
+ else:
+ delete_policy(existing_policy)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/iam_mfa_device_info.py b/ansible_collections/community/aws/plugins/modules/iam_mfa_device_info.py
new file mode 100644
index 000000000..16abae170
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/iam_mfa_device_info.py
@@ -0,0 +1,104 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: iam_mfa_device_info
+version_added: 1.0.0
+short_description: List the MFA (Multi-Factor Authentication) devices registered for a user
+description:
+ - List the MFA (Multi-Factor Authentication) devices registered for a user
+author: Victor Costan (@pwnall)
+options:
+ user_name:
+ description:
+ - The name of the user whose MFA devices will be listed
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+RETURN = """
+mfa_devices:
+ description: The MFA devices registered for the given user
+ returned: always
+ type: list
+ sample:
+ - enable_date: "2016-03-11T23:25:36+00:00"
+ serial_number: arn:aws:iam::123456789012:mfa/example
+ user_name: example
+ - enable_date: "2016-03-11T23:25:37+00:00"
+ serial_number: arn:aws:iam::123456789012:mfa/example
+ user_name: example
+"""
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# more details: https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListMFADevices.html
+- name: List MFA devices
+ community.aws.iam_mfa_device_info:
+ register: mfa_devices
+
+# more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html
+- name: Assume an existing role
+ community.aws.sts_assume_role:
+ mfa_serial_number: "{{ mfa_devices.mfa_devices[0].serial_number }}"
+ role_arn: "arn:aws:iam::123456789012:role/someRole"
+ role_session_name: "someRoleSession"
+ register: assumed_role
+'''
+
+try:
+ import botocore
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+def list_mfa_devices(connection, module):
+ user_name = module.params.get('user_name')
+ changed = False
+
+ args = {}
+ if user_name is not None:
+ args['UserName'] = user_name
+ try:
+ response = connection.list_mfa_devices(**args)
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to list MFA devices")
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
+
+
+def main():
+ argument_spec = dict(
+ user_name=dict(required=False, default=None),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ connection = module.client('iam')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ list_mfa_devices(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/iam_password_policy.py b/ansible_collections/community/aws/plugins/modules/iam_password_policy.py
new file mode 100644
index 000000000..19614d26d
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/iam_password_policy.py
@@ -0,0 +1,213 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: iam_password_policy
+version_added: 1.0.0
+short_description: Update an IAM Password Policy
+description:
+ - Module updates an IAM Password Policy on a given AWS account
+author:
+ - "Aaron Smith (@slapula)"
+options:
+ state:
+ description:
+ - Specifies the overall state of the password policy.
+ required: true
+ choices: ['present', 'absent']
+ type: str
+ min_pw_length:
+ description:
+ - Minimum password length.
+ default: 6
+ aliases: [minimum_password_length]
+ type: int
+ require_symbols:
+ description:
+ - Require symbols in password.
+ default: false
+ type: bool
+ require_numbers:
+ description:
+ - Require numbers in password.
+ default: false
+ type: bool
+ require_uppercase:
+ description:
+ - Require uppercase letters in password.
+ default: false
+ type: bool
+ require_lowercase:
+ description:
+ - Require lowercase letters in password.
+ default: false
+ type: bool
+ allow_pw_change:
+ description:
+ - Allow users to change their password.
+ default: false
+ type: bool
+ aliases: [allow_password_change]
+ pw_max_age:
+ description:
+ - Maximum age for a password in days. When this option is 0 then passwords
+ do not expire automatically.
+ default: 0
+ aliases: [password_max_age]
+ type: int
+ pw_reuse_prevent:
+ description:
+ - Prevent re-use of passwords.
+ default: 0
+ aliases: [password_reuse_prevent, prevent_reuse]
+ type: int
+ pw_expire:
+ description:
+ - Prevents users from change an expired password.
+ default: false
+ type: bool
+ aliases: [password_expire, expire]
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+- name: Password policy for AWS account
+ community.aws.iam_password_policy:
+ state: present
+ min_pw_length: 8
+ require_symbols: false
+ require_numbers: true
+ require_uppercase: true
+ require_lowercase: true
+ allow_pw_change: true
+ pw_max_age: 60
+ pw_reuse_prevent: 5
+ pw_expire: false
+'''
+
+RETURN = ''' # '''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+
+
+class IAMConnection(object):
+ def __init__(self, module):
+ try:
+ self.connection = module.resource('iam')
+ self.module = module
+ except Exception as e:
+ module.fail_json(msg="Failed to connect to AWS: %s" % str(e))
+
+ def policy_to_dict(self, policy):
+ policy_attributes = [
+ 'allow_users_to_change_password', 'expire_passwords', 'hard_expiry',
+ 'max_password_age', 'minimum_password_length', 'password_reuse_prevention',
+ 'require_lowercase_characters', 'require_numbers', 'require_symbols', 'require_uppercase_characters'
+ ]
+ ret = {}
+ for attr in policy_attributes:
+ ret[attr] = getattr(policy, attr)
+ return ret
+
+ def update_password_policy(self, module, policy):
+ min_pw_length = module.params.get('min_pw_length')
+ require_symbols = module.params.get('require_symbols')
+ require_numbers = module.params.get('require_numbers')
+ require_uppercase = module.params.get('require_uppercase')
+ require_lowercase = module.params.get('require_lowercase')
+ allow_pw_change = module.params.get('allow_pw_change')
+ pw_max_age = module.params.get('pw_max_age')
+ pw_reuse_prevent = module.params.get('pw_reuse_prevent')
+ pw_expire = module.params.get('pw_expire')
+
+ update_parameters = dict(
+ MinimumPasswordLength=min_pw_length,
+ RequireSymbols=require_symbols,
+ RequireNumbers=require_numbers,
+ RequireUppercaseCharacters=require_uppercase,
+ RequireLowercaseCharacters=require_lowercase,
+ AllowUsersToChangePassword=allow_pw_change,
+ HardExpiry=pw_expire
+ )
+ if pw_reuse_prevent:
+ update_parameters.update(PasswordReusePrevention=pw_reuse_prevent)
+ if pw_max_age:
+ update_parameters.update(MaxPasswordAge=pw_max_age)
+
+ try:
+ original_policy = self.policy_to_dict(policy)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ original_policy = {}
+
+ try:
+ results = policy.update(**update_parameters)
+ policy.reload()
+ updated_policy = self.policy_to_dict(policy)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't update IAM Password Policy")
+
+ changed = (original_policy != updated_policy)
+ return (changed, updated_policy, camel_dict_to_snake_dict(results))
+
+ def delete_password_policy(self, policy):
+ try:
+ results = policy.delete()
+ except is_boto3_error_code('NoSuchEntity'):
+ self.module.exit_json(changed=False, task_status={'IAM': "Couldn't find IAM Password Policy"})
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ self.module.fail_json_aws(e, msg="Couldn't delete IAM Password Policy")
+ return camel_dict_to_snake_dict(results)
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec={
+ 'state': dict(choices=['present', 'absent'], required=True),
+ 'min_pw_length': dict(type='int', aliases=['minimum_password_length'], default=6),
+ 'require_symbols': dict(type='bool', default=False),
+ 'require_numbers': dict(type='bool', default=False),
+ 'require_uppercase': dict(type='bool', default=False),
+ 'require_lowercase': dict(type='bool', default=False),
+ 'allow_pw_change': dict(type='bool', aliases=['allow_password_change'], default=False),
+ 'pw_max_age': dict(type='int', aliases=['password_max_age'], default=0),
+ 'pw_reuse_prevent': dict(type='int', aliases=['password_reuse_prevent', 'prevent_reuse'], default=0),
+ 'pw_expire': dict(type='bool', aliases=['password_expire', 'expire'], default=False),
+ },
+ supports_check_mode=True,
+ )
+
+ resource = IAMConnection(module)
+ policy = resource.connection.AccountPasswordPolicy()
+
+ state = module.params.get('state')
+
+ if state == 'present':
+ (changed, new_policy, update_result) = resource.update_password_policy(module, policy)
+ module.exit_json(changed=changed, task_status={'IAM': update_result}, policy=new_policy)
+
+ if state == 'absent':
+ delete_result = resource.delete_password_policy(policy)
+ module.exit_json(changed=True, task_status={'IAM': delete_result})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/iam_role.py b/ansible_collections/community/aws/plugins/modules/iam_role.py
new file mode 100644
index 000000000..4add6a525
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/iam_role.py
@@ -0,0 +1,736 @@
+#!/usr/bin/python
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: iam_role
+version_added: 1.0.0
+short_description: Manage AWS IAM roles
+description:
+ - Manage AWS IAM roles.
+author:
+ - "Rob White (@wimnat)"
+options:
+ path:
+ description:
+ - The path to the role. For more information about paths, see U(https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html).
+ default: "/"
+ type: str
+ name:
+ description:
+ - The name of the role to create.
+ required: true
+ type: str
+ description:
+ description:
+ - Provides a description of the role.
+ type: str
+ boundary:
+ description:
+ - The ARN of an IAM managed policy to use to restrict the permissions this role can pass on to IAM roles/users that it creates.
+ - Boundaries cannot be set on Instance Profiles, as such if this option is specified then I(create_instance_profile) must be C(false).
+ - This is intended for roles/users that have permissions to create new IAM objects.
+ - For more information on boundaries, see U(https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html).
+ aliases: [boundary_policy_arn]
+ type: str
+ assume_role_policy_document:
+ description:
+ - The trust relationship policy document that grants an entity permission to assume the role.
+ - This parameter is required when I(state=present).
+ type: json
+ managed_policies:
+ description:
+ - A list of managed policy ARNs, managed policy ARNs or friendly names.
+ - To remove all policies set I(purge_polices=true) and I(managed_policies=[None]).
+ - To embed an inline policy, use M(community.aws.iam_policy).
+ aliases: ['managed_policy']
+ type: list
+ elements: str
+ max_session_duration:
+ description:
+ - The maximum duration (in seconds) of a session when assuming the role.
+ - Valid values are between 1 and 12 hours (3600 and 43200 seconds).
+ type: int
+ purge_policies:
+ description:
+ - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched.
+ type: bool
+ aliases: ['purge_policy', 'purge_managed_policies']
+ default: true
+ state:
+ description:
+ - Create or remove the IAM role.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ create_instance_profile:
+ description:
+ - Creates an IAM instance profile along with the role.
+ default: true
+ type: bool
+ delete_instance_profile:
+ description:
+ - When I(delete_instance_profile=true) and I(state=absent) deleting a role will also delete the instance
+ profile created with the same I(name) as the role.
+ - Only applies when I(state=absent).
+ default: false
+ type: bool
+ wait_timeout:
+ description:
+ - How long (in seconds) to wait for creation / update to complete.
+ default: 120
+ type: int
+ wait:
+ description:
+ - When I(wait=True) the module will wait for up to I(wait_timeout) seconds
+ for IAM role creation before returning.
+ default: True
+ type: bool
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+ - amazon.aws.tags
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create a role with description and tags
+ community.aws.iam_role:
+ name: mynewrole
+ assume_role_policy_document: "{{ lookup('file','policy.json') }}"
+ description: This is My New Role
+ tags:
+ env: dev
+
+- name: "Create a role and attach a managed policy called 'PowerUserAccess'"
+ community.aws.iam_role:
+ name: mynewrole
+ assume_role_policy_document: "{{ lookup('file','policy.json') }}"
+ managed_policies:
+ - arn:aws:iam::aws:policy/PowerUserAccess
+
+- name: Keep the role created above but remove all managed policies
+ community.aws.iam_role:
+ name: mynewrole
+ assume_role_policy_document: "{{ lookup('file','policy.json') }}"
+ managed_policies: []
+
+- name: Delete the role
+ community.aws.iam_role:
+ name: mynewrole
+ assume_role_policy_document: "{{ lookup('file', 'policy.json') }}"
+ state: absent
+
+'''
+RETURN = r'''
+iam_role:
+ description: dictionary containing the IAM Role data
+ returned: success
+ type: complex
+ contains:
+ path:
+ description: the path to the role
+ type: str
+ returned: always
+ sample: /
+ role_name:
+ description: the friendly name that identifies the role
+ type: str
+ returned: always
+ sample: myrole
+ role_id:
+ description: the stable and unique string identifying the role
+ type: str
+ returned: always
+ sample: ABCDEFF4EZ4ABCDEFV4ZC
+ arn:
+ description: the Amazon Resource Name (ARN) specifying the role
+ type: str
+ returned: always
+ sample: "arn:aws:iam::1234567890:role/mynewrole"
+ create_date:
+ description: the date and time, in ISO 8601 date-time format, when the role was created
+ type: str
+ returned: always
+ sample: "2016-08-14T04:36:28+00:00"
+ assume_role_policy_document:
+ description:
+ - the policy that grants an entity permission to assume the role
+ - |
+ note: the case of keys in this dictionary are currently converted from CamelCase to
+ snake_case. In a release after 2023-12-01 this behaviour will change
+ type: dict
+ returned: always
+ sample: {
+ 'statement': [
+ {
+ 'action': 'sts:AssumeRole',
+ 'effect': 'Allow',
+ 'principal': {
+ 'service': 'ec2.amazonaws.com'
+ },
+ 'sid': ''
+ }
+ ],
+ 'version': '2012-10-17'
+ }
+ assume_role_policy_document_raw:
+ description: the policy that grants an entity permission to assume the role
+ type: dict
+ returned: always
+ version_added: 5.3.0
+ sample: {
+ 'Statement': [
+ {
+ 'Action': 'sts:AssumeRole',
+ 'Effect': 'Allow',
+ 'Principal': {
+ 'Service': 'ec2.amazonaws.com'
+ },
+ 'Sid': ''
+ }
+ ],
+ 'Version': '2012-10-17'
+ }
+
+ attached_policies:
+ description: a list of dicts containing the name and ARN of the managed IAM policies attached to the role
+ type: list
+ returned: always
+ sample: [
+ {
+ 'policy_arn': 'arn:aws:iam::aws:policy/PowerUserAccess',
+ 'policy_name': 'PowerUserAccess'
+ }
+ ]
+ tags:
+ description: role tags
+ type: dict
+ returned: always
+ sample: '{"Env": "Prod"}'
+'''
+
+import json
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies
+
+
+@AWSRetry.jittered_backoff()
+def _list_policies(client):
+ paginator = client.get_paginator('list_policies')
+ return paginator.paginate().build_full_result()['Policies']
+
+
+def wait_iam_exists(module, client):
+ if module.check_mode:
+ return
+ if not module.params.get('wait'):
+ return
+
+ role_name = module.params.get('name')
+ wait_timeout = module.params.get('wait_timeout')
+
+ delay = min(wait_timeout, 5)
+ max_attempts = wait_timeout // delay
+
+ try:
+ waiter = client.get_waiter('role_exists')
+ waiter.wait(
+ WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts},
+ RoleName=role_name,
+ )
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, msg='Timeout while waiting on IAM role creation')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed while waiting on IAM role creation')
+
+
+def convert_friendly_names_to_arns(module, client, policy_names):
+ if not any(not policy.startswith('arn:') for policy in policy_names):
+ return policy_names
+
+ allpolicies = {}
+ policies = _list_policies(client)
+
+ for policy in policies:
+ allpolicies[policy['PolicyName']] = policy['Arn']
+ allpolicies[policy['Arn']] = policy['Arn']
+ try:
+ return [allpolicies[policy] for policy in policy_names]
+ except KeyError as e:
+ module.fail_json_aws(e, msg="Couldn't find policy")
+
+
+def attach_policies(module, client, policies_to_attach, role_name):
+ if module.check_mode and policies_to_attach:
+ return True
+
+ changed = False
+ for policy_arn in policies_to_attach:
+ try:
+ client.attach_role_policy(RoleName=role_name, PolicyArn=policy_arn, aws_retry=True)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to attach policy {0} to role {1}".format(policy_arn, role_name))
+ return changed
+
+
+def remove_policies(module, client, policies_to_remove, role_name):
+ if module.check_mode and policies_to_remove:
+ return True
+
+ changed = False
+ for policy in policies_to_remove:
+ try:
+ client.detach_role_policy(RoleName=role_name, PolicyArn=policy, aws_retry=True)
+ changed = True
+ except is_boto3_error_code('NoSuchEntityException'):
+ pass
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Unable to detach policy {0} from {1}".format(policy, role_name))
+ return changed
+
+
+def remove_inline_policies(module, client, role_name):
+ current_inline_policies = get_inline_policy_list(module, client, role_name)
+ for policy in current_inline_policies:
+ try:
+ client.delete_role_policy(RoleName=role_name, PolicyName=policy, aws_retry=True)
+ except is_boto3_error_code('NoSuchEntityException'):
+ pass
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Unable to delete policy {0} embedded in {1}".format(policy, role_name))
+
+
+def generate_create_params(module):
+ params = dict()
+ params['Path'] = module.params.get('path')
+ params['RoleName'] = module.params.get('name')
+ params['AssumeRolePolicyDocument'] = module.params.get('assume_role_policy_document')
+ if module.params.get('description') is not None:
+ params['Description'] = module.params.get('description')
+ if module.params.get('max_session_duration') is not None:
+ params['MaxSessionDuration'] = module.params.get('max_session_duration')
+ if module.params.get('boundary') is not None:
+ params['PermissionsBoundary'] = module.params.get('boundary')
+ if module.params.get('tags') is not None:
+ params['Tags'] = ansible_dict_to_boto3_tag_list(module.params.get('tags'))
+
+ return params
+
+
+def create_basic_role(module, client):
+ """
+ Perform the Role creation.
+ Assumes tests for the role existing have already been performed.
+ """
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ try:
+ params = generate_create_params(module)
+ role = client.create_role(aws_retry=True, **params)
+ # 'Description' is documented as key of the role returned by create_role
+ # but appears to be an AWS bug (the value is not returned using the AWS CLI either).
+ # Get the role after creating it.
+ role = get_role_with_backoff(module, client, params['RoleName'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to create role")
+
+ return role
+
+
+def update_role_assumed_policy(module, client, role_name, target_assumed_policy, current_assumed_policy):
+ # Check Assumed Policy document
+ if target_assumed_policy is None or not compare_policies(current_assumed_policy, json.loads(target_assumed_policy)):
+ return False
+
+ if module.check_mode:
+ return True
+
+ try:
+ client.update_assume_role_policy(
+ RoleName=role_name,
+ PolicyDocument=target_assumed_policy,
+ aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to update assume role policy for role {0}".format(role_name))
+ return True
+
+
+def update_role_description(module, client, role_name, target_description, current_description):
+ # Check Description update
+ if target_description is None or current_description == target_description:
+ return False
+
+ if module.check_mode:
+ return True
+
+ try:
+ client.update_role(RoleName=role_name, Description=target_description, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to update description for role {0}".format(role_name))
+ return True
+
+
+def update_role_max_session_duration(module, client, role_name, target_duration, current_duration):
+ # Check MaxSessionDuration update
+ if target_duration is None or current_duration == target_duration:
+ return False
+
+ if module.check_mode:
+ return True
+
+ try:
+ client.update_role(RoleName=role_name, MaxSessionDuration=target_duration, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to update maximum session duration for role {0}".format(role_name))
+ return True
+
+
+def update_role_permissions_boundary(module, client, role_name, target_permissions_boundary, current_permissions_boundary):
+ # Check PermissionsBoundary
+ if target_permissions_boundary is None or target_permissions_boundary == current_permissions_boundary:
+ return False
+
+ if module.check_mode:
+ return True
+
+ if target_permissions_boundary == '':
+ try:
+ client.delete_role_permissions_boundary(RoleName=role_name, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to remove permission boundary for role {0}".format(role_name))
+ else:
+ try:
+ client.put_role_permissions_boundary(RoleName=role_name, PermissionsBoundary=target_permissions_boundary, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to update permission boundary for role {0}".format(role_name))
+ return True
+
+
+def update_managed_policies(module, client, role_name, managed_policies, purge_policies):
+ # Check Managed Policies
+ if managed_policies is None:
+ return False
+
+ # Get list of current attached managed policies
+ current_attached_policies = get_attached_policy_list(module, client, role_name)
+ current_attached_policies_arn_list = [policy['PolicyArn'] for policy in current_attached_policies]
+
+ if len(managed_policies) == 1 and managed_policies[0] is None:
+ managed_policies = []
+
+ policies_to_remove = set(current_attached_policies_arn_list) - set(managed_policies)
+ policies_to_attach = set(managed_policies) - set(current_attached_policies_arn_list)
+
+ changed = False
+ if purge_policies and policies_to_remove:
+ if module.check_mode:
+ return True
+ else:
+ changed |= remove_policies(module, client, policies_to_remove, role_name)
+
+ if policies_to_attach:
+ if module.check_mode:
+ return True
+ else:
+ changed |= attach_policies(module, client, policies_to_attach, role_name)
+
+ return changed
+
+
+def create_or_update_role(module, client):
+
+ role_name = module.params.get('name')
+ assumed_policy = module.params.get('assume_role_policy_document')
+ create_instance_profile = module.params.get('create_instance_profile')
+ description = module.params.get('description')
+ duration = module.params.get('max_session_duration')
+ path = module.params.get('path')
+ permissions_boundary = module.params.get('boundary')
+ purge_tags = module.params.get('purge_tags')
+ tags = ansible_dict_to_boto3_tag_list(module.params.get('tags')) if module.params.get('tags') else None
+ purge_policies = module.params.get('purge_policies')
+ managed_policies = module.params.get('managed_policies')
+ if managed_policies:
+ # Attempt to list the policies early so we don't leave things behind if we can't find them.
+ managed_policies = convert_friendly_names_to_arns(module, client, managed_policies)
+
+ changed = False
+
+ # Get role
+ role = get_role(module, client, role_name)
+
+ # If role is None, create it
+ if role is None:
+ role = create_basic_role(module, client)
+
+ if not module.check_mode and module.params.get('wait'):
+ wait_iam_exists(module, client)
+
+ changed = True
+ else:
+ # Role exists - get current attributes
+ current_assumed_policy = role.get('AssumeRolePolicyDocument')
+ current_description = role.get('Description')
+ current_duration = role.get('MaxSessionDuration')
+ current_permissions_boundary = role.get('PermissionsBoundary', {}).get('PermissionsBoundaryArn', '')
+
+ # Update attributes
+ changed |= update_role_tags(module, client, role_name, tags, purge_tags)
+ changed |= update_role_assumed_policy(module, client, role_name, assumed_policy, current_assumed_policy)
+ changed |= update_role_description(module, client, role_name, description, current_description)
+ changed |= update_role_max_session_duration(module, client, role_name, duration, current_duration)
+ changed |= update_role_permissions_boundary(module, client, role_name, permissions_boundary, current_permissions_boundary)
+
+ if not module.check_mode and module.params.get('wait'):
+ wait_iam_exists(module, client)
+
+ if create_instance_profile:
+ changed |= create_instance_profiles(module, client, role_name, path)
+
+ if not module.check_mode and module.params.get('wait'):
+ wait_iam_exists(module, client)
+
+ changed |= update_managed_policies(module, client, role_name, managed_policies, purge_policies)
+ wait_iam_exists(module, client)
+
+ # Get the role again
+ role = get_role(module, client, role_name)
+ role['AttachedPolicies'] = get_attached_policy_list(module, client, role_name)
+ role['tags'] = get_role_tags(module, client)
+
+ camel_role = camel_dict_to_snake_dict(role, ignore_list=['tags'])
+ camel_role["assume_role_policy_document_raw"] = role.get("AssumeRolePolicyDocument", {})
+ module.exit_json(changed=changed, iam_role=camel_role, **camel_role)
+
+
+def create_instance_profiles(module, client, role_name, path):
+
+ # Fetch existing Profiles
+ try:
+ instance_profiles = client.list_instance_profiles_for_role(RoleName=role_name, aws_retry=True)['InstanceProfiles']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name))
+
+ # Profile already exists
+ if any(p['InstanceProfileName'] == role_name for p in instance_profiles):
+ return False
+
+ if module.check_mode:
+ return True
+
+ # Make sure an instance profile is created
+ try:
+ client.create_instance_profile(InstanceProfileName=role_name, Path=path, aws_retry=True)
+ except is_boto3_error_code('EntityAlreadyExists'):
+ # If the profile already exists, no problem, move on.
+ # Implies someone's changing things at the same time...
+ return False
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(role_name))
+
+ # And attach the role to the profile
+ try:
+ client.add_role_to_instance_profile(InstanceProfileName=role_name, RoleName=role_name, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to attach role {0} to instance profile {0}".format(role_name))
+
+ return True
+
+
+def remove_instance_profiles(module, client, role_name):
+ delete_profiles = module.params.get("delete_instance_profile")
+
+ try:
+ instance_profiles = client.list_instance_profiles_for_role(aws_retry=True, RoleName=role_name)['InstanceProfiles']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name))
+
+ # Remove the role from the instance profile(s)
+ for profile in instance_profiles:
+ profile_name = profile['InstanceProfileName']
+ try:
+ if not module.check_mode:
+ client.remove_role_from_instance_profile(aws_retry=True, InstanceProfileName=profile_name, RoleName=role_name)
+ if profile_name == role_name:
+ if delete_profiles:
+ try:
+ client.delete_instance_profile(InstanceProfileName=profile_name, aws_retry=True)
+ except is_boto3_error_code('NoSuchEntityException'):
+ pass
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Unable to remove instance profile {0}".format(profile_name))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to remove role {0} from instance profile {1}".format(role_name, profile_name))
+
+
+def destroy_role(module, client):
+
+ role_name = module.params.get('name')
+ role = get_role(module, client, role_name)
+
+ if role is None:
+ module.exit_json(changed=False)
+
+ if not module.check_mode:
+ # Before we try to delete the role we need to remove any
+ # - attached instance profiles
+ # - attached managed policies
+ # - embedded inline policies
+ remove_instance_profiles(module, client, role_name)
+ update_managed_policies(module, client, role_name, [], True)
+ remove_inline_policies(module, client, role_name)
+ try:
+ client.delete_role(aws_retry=True, RoleName=role_name)
+ except is_boto3_error_code('NoSuchEntityException'):
+ module.exit_json(changed=False)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Unable to delete role")
+
+ module.exit_json(changed=True)
+
+
+def get_role_with_backoff(module, client, name):
+ try:
+ return AWSRetry.jittered_backoff(catch_extra_error_codes=['NoSuchEntity'])(client.get_role)(RoleName=name)['Role']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to get role {0}".format(name))
+
+
+def get_role(module, client, name):
+ try:
+ return client.get_role(RoleName=name, aws_retry=True)['Role']
+ except is_boto3_error_code('NoSuchEntity'):
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Unable to get role {0}".format(name))
+
+
+def get_attached_policy_list(module, client, name):
+ try:
+ return client.list_attached_role_policies(RoleName=name, aws_retry=True)['AttachedPolicies']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name))
+
+
+def get_inline_policy_list(module, client, name):
+ try:
+ return client.list_role_policies(RoleName=name, aws_retry=True)['PolicyNames']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name))
+
+
+def get_role_tags(module, client):
+ role_name = module.params.get('name')
+ try:
+ return boto3_tag_list_to_ansible_dict(client.list_role_tags(RoleName=role_name, aws_retry=True)['Tags'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to list tags for role {0}".format(role_name))
+
+
+def update_role_tags(module, client, role_name, new_tags, purge_tags):
+ if new_tags is None:
+ return False
+ new_tags = boto3_tag_list_to_ansible_dict(new_tags)
+
+ try:
+ existing_tags = boto3_tag_list_to_ansible_dict(client.list_role_tags(RoleName=role_name, aws_retry=True)['Tags'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, KeyError):
+ existing_tags = {}
+
+ tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags)
+
+ if not module.check_mode:
+ try:
+ if tags_to_remove:
+ client.untag_role(RoleName=role_name, TagKeys=tags_to_remove, aws_retry=True)
+ if tags_to_add:
+ client.tag_role(RoleName=role_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add), aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Unable to set tags for role %s' % role_name)
+
+ changed = bool(tags_to_add) or bool(tags_to_remove)
+ return changed
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ path=dict(type='str', default="/"),
+ assume_role_policy_document=dict(type='json'),
+ managed_policies=dict(type='list', aliases=['managed_policy'], elements='str'),
+ max_session_duration=dict(type='int'),
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ description=dict(type='str'),
+ boundary=dict(type='str', aliases=['boundary_policy_arn']),
+ create_instance_profile=dict(type='bool', default=True),
+ delete_instance_profile=dict(type='bool', default=False),
+ purge_policies=dict(default=True, type='bool', aliases=['purge_policy', 'purge_managed_policies']),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(default=120, type='int'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[('state', 'present', ['assume_role_policy_document'])],
+ supports_check_mode=True)
+
+ module.deprecate("All return values other than iam_role and changed have been deprecated and "
+ "will be removed in a release after 2023-12-01.",
+ date="2023-12-01", collection_name="community.aws")
+
+ module.deprecate("In a release after 2023-12-01 the contents of iam_role.assume_role_policy_document "
+ "will no longer be converted from CamelCase to snake_case. The "
+ "iam_role.assume_role_policy_document_raw return value already returns the "
+ "policy document in this future format.",
+ date="2023-12-01", collection_name="community.aws")
+
+ if module.params.get('boundary'):
+ if module.params.get('create_instance_profile'):
+ module.fail_json(msg="When using a boundary policy, `create_instance_profile` must be set to `false`.")
+ if not module.params.get('boundary').startswith('arn:aws:iam'):
+ module.fail_json(msg="Boundary policy must be an ARN")
+ if module.params.get('max_session_duration'):
+ max_session_duration = module.params.get('max_session_duration')
+ if max_session_duration < 3600 or max_session_duration > 43200:
+ module.fail_json(msg="max_session_duration must be between 1 and 12 hours (3600 and 43200 seconds)")
+ if module.params.get('path'):
+ path = module.params.get('path')
+ if not path.endswith('/') or not path.startswith('/'):
+ module.fail_json(msg="path must begin and end with /")
+
+ client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ create_or_update_role(module, client)
+ elif state == 'absent':
+ destroy_role(module, client)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/iam_role_info.py b/ansible_collections/community/aws/plugins/modules/iam_role_info.py
new file mode 100644
index 000000000..d66be487a
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/iam_role_info.py
@@ -0,0 +1,282 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: iam_role_info
+version_added: 1.0.0
+short_description: Gather information on IAM roles
+description:
+ - Gathers information about IAM roles.
+author:
+ - "Will Thames (@willthames)"
+options:
+ name:
+ description:
+ - Name of a role to search for.
+ - Mutually exclusive with I(path_prefix).
+ aliases:
+ - role_name
+ type: str
+ path_prefix:
+ description:
+ - Prefix of role to restrict IAM role search for.
+ - Mutually exclusive with I(name).
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+- name: find all existing IAM roles
+ community.aws.iam_role_info:
+ register: result
+
+- name: describe a single role
+ community.aws.iam_role_info:
+ name: MyIAMRole
+
+- name: describe all roles matching a path prefix
+ community.aws.iam_role_info:
+ path_prefix: /application/path
+'''
+
+RETURN = '''
+iam_roles:
+ description: List of IAM roles
+ returned: always
+ type: complex
+ contains:
+ arn:
+ description: Amazon Resource Name for IAM role.
+ returned: always
+ type: str
+ sample: arn:aws:iam::123456789012:role/AnsibleTestRole
+ assume_role_policy_document:
+ description:
+ - The policy that grants an entity permission to assume the role
+ - |
+ Note: the case of keys in this dictionary are currently converted from CamelCase to
+ snake_case. In a release after 2023-12-01 this behaviour will change.
+ returned: always
+ type: dict
+ assume_role_policy_document_raw:
+ description: The policy document describing what can assume the role.
+ returned: always
+ type: dict
+ version_added: 5.3.0
+ create_date:
+ description: Date IAM role was created.
+ returned: always
+ type: str
+ sample: '2017-10-23T00:05:08+00:00'
+ inline_policies:
+ description: List of names of inline policies.
+ returned: always
+ type: list
+ sample: []
+ managed_policies:
+ description: List of attached managed policies.
+ returned: always
+ type: complex
+ contains:
+ policy_arn:
+ description: Amazon Resource Name for the policy.
+ returned: always
+ type: str
+ sample: arn:aws:iam::123456789012:policy/AnsibleTestEC2Policy
+ policy_name:
+ description: Name of managed policy.
+ returned: always
+ type: str
+ sample: AnsibleTestEC2Policy
+ instance_profiles:
+ description: List of attached instance profiles.
+ returned: always
+ type: complex
+ contains:
+ arn:
+ description: Amazon Resource Name for the instance profile.
+ returned: always
+ type: str
+ sample: arn:aws:iam::123456789012:instance-profile/AnsibleTestEC2Policy
+ create_date:
+ description: Date instance profile was created.
+ returned: always
+ type: str
+ sample: '2017-10-23T00:05:08+00:00'
+ instance_profile_id:
+ description: Amazon Identifier for the instance profile.
+ returned: always
+ type: str
+ sample: AROAII7ABCD123456EFGH
+ instance_profile_name:
+ description: Name of instance profile.
+ returned: always
+ type: str
+ sample: AnsibleTestEC2Policy
+ path:
+ description: Path of instance profile.
+ returned: always
+ type: str
+ sample: /
+ roles:
+ description: List of roles associated with this instance profile.
+ returned: always
+ type: list
+ sample: []
+ path:
+ description: Path of role.
+ returned: always
+ type: str
+ sample: /
+ role_id:
+ description: Amazon Identifier for the role.
+ returned: always
+ type: str
+ sample: AROAII7ABCD123456EFGH
+ role_name:
+ description: Name of the role.
+ returned: always
+ type: str
+ sample: AnsibleTestRole
+ tags:
+ description: Role tags.
+ type: dict
+ returned: always
+ sample: '{"Env": "Prod"}'
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+@AWSRetry.jittered_backoff()
+def list_iam_roles_with_backoff(client, **kwargs):
+ paginator = client.get_paginator('list_roles')
+ return paginator.paginate(**kwargs).build_full_result()
+
+
+@AWSRetry.jittered_backoff()
+def list_iam_role_policies_with_backoff(client, role_name):
+ paginator = client.get_paginator('list_role_policies')
+ return paginator.paginate(RoleName=role_name).build_full_result()['PolicyNames']
+
+
+@AWSRetry.jittered_backoff()
+def list_iam_attached_role_policies_with_backoff(client, role_name):
+ paginator = client.get_paginator('list_attached_role_policies')
+ return paginator.paginate(RoleName=role_name).build_full_result()['AttachedPolicies']
+
+
+@AWSRetry.jittered_backoff()
+def list_iam_instance_profiles_for_role_with_backoff(client, role_name):
+ paginator = client.get_paginator('list_instance_profiles_for_role')
+ return paginator.paginate(RoleName=role_name).build_full_result()['InstanceProfiles']
+
+
+def describe_iam_role(module, client, role):
+ name = role['RoleName']
+ try:
+ role['InlinePolicies'] = list_iam_role_policies_with_backoff(client, name)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get inline policies for role %s" % name)
+ try:
+ role['ManagedPolicies'] = list_iam_attached_role_policies_with_backoff(client, name)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get managed policies for role %s" % name)
+ try:
+ role['InstanceProfiles'] = list_iam_instance_profiles_for_role_with_backoff(client, name)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get instance profiles for role %s" % name)
+ try:
+ role['tags'] = boto3_tag_list_to_ansible_dict(role['Tags'])
+ del role['Tags']
+ except KeyError:
+ role['tags'] = {}
+ return role
+
+
+def describe_iam_roles(module, client):
+ name = module.params['name']
+ path_prefix = module.params['path_prefix']
+ if name:
+ try:
+ roles = [client.get_role(RoleName=name, aws_retry=True)['Role']]
+ except is_boto3_error_code('NoSuchEntity'):
+ return []
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't get IAM role %s" % name)
+ else:
+ params = dict()
+ if path_prefix:
+ if not path_prefix.startswith('/'):
+ path_prefix = '/' + path_prefix
+ if not path_prefix.endswith('/'):
+ path_prefix = path_prefix + '/'
+ params['PathPrefix'] = path_prefix
+ try:
+ roles = list_iam_roles_with_backoff(client, **params)['Roles']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't list IAM roles")
+ return [normalize_role(describe_iam_role(module, client, role)) for role in roles]
+
+
+def normalize_profile(profile):
+ new_profile = camel_dict_to_snake_dict(profile)
+ if profile.get("Roles"):
+ profile["roles"] = [normalize_role(role) for role in profile.get("Roles")]
+ return new_profile
+
+
+def normalize_role(role):
+ new_role = camel_dict_to_snake_dict(role, ignore_list=['tags'])
+ new_role["assume_role_policy_document_raw"] = role.get("AssumeRolePolicyDocument")
+ if role.get("InstanceProfiles"):
+ role["instance_profiles"] = [normalize_profile(profile) for profile in role.get("InstanceProfiles")]
+ return new_role
+
+
+def main():
+ """
+ Module action handler
+ """
+ argument_spec = dict(
+ name=dict(aliases=['role_name']),
+ path_prefix=dict(),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['name', 'path_prefix']])
+
+ client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+
+ module.deprecate("In a release after 2023-12-01 the contents of assume_role_policy_document "
+ "will no longer be converted from CamelCase to snake_case. The "
+ ".assume_role_policy_document_raw return value already returns the "
+ "policy document in this future format.",
+ date="2023-12-01", collection_name="community.aws")
+
+ module.exit_json(changed=False, iam_roles=describe_iam_roles(module, client))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/iam_saml_federation.py b/ansible_collections/community/aws/plugins/modules/iam_saml_federation.py
new file mode 100644
index 000000000..f79e4c2c6
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/iam_saml_federation.py
@@ -0,0 +1,248 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: iam_saml_federation
+version_added: 1.0.0
+short_description: Maintain IAM SAML federation configuration.
+description:
+ - Provides a mechanism to manage AWS IAM SAML Identity Federation providers (create/update/delete metadata).
+options:
+ name:
+ description:
+ - The name of the provider to create.
+ required: true
+ type: str
+ saml_metadata_document:
+ description:
+ - The XML document generated by an identity provider (IdP) that supports SAML 2.0.
+ type: str
+ state:
+ description:
+ - Whether to create or delete identity provider. If 'present' is specified it will attempt to update the identity provider matching the name field.
+ default: present
+ choices: [ "present", "absent" ]
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+author:
+ - Tony (@axc450)
+ - Aidan Rowe (@aidan-)
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+# It is assumed that their matching environment variables are set.
+# Creates a new iam saml identity provider if not present
+- name: saml provider
+ community.aws.iam_saml_federation:
+ name: example1
+ # the > below opens an indented block, so no escaping/quoting is needed when in the indentation level under this key
+ saml_metadata_document: >
+ <?xml version="1.0"?>...
+ <md:EntityDescriptor
+# Creates a new iam saml identity provider if not present
+- name: saml provider
+ community.aws.iam_saml_federation:
+ name: example2
+ saml_metadata_document: "{{ item }}"
+ with_file: /path/to/idp/metdata.xml
+# Removes iam saml identity provider
+- name: remove saml provider
+ community.aws.iam_saml_federation:
+ name: example3
+ state: absent
+'''
+
+RETURN = '''
+saml_provider:
+ description: Details of the SAML Identity Provider that was created/modified.
+ type: complex
+ returned: present
+ contains:
+ arn:
+ description: The ARN of the identity provider.
+ type: str
+ returned: present
+ sample: "arn:aws:iam::123456789012:saml-provider/my_saml_provider"
+ metadata_document:
+ description: The XML metadata document that includes information about an identity provider.
+ type: str
+ returned: present
+ create_date:
+ description: The date and time when the SAML provider was created in ISO 8601 date-time format.
+ type: str
+ returned: present
+ sample: "2017-02-08T04:36:28+00:00"
+ expire_date:
+ description: The expiration date and time for the SAML provider in ISO 8601 date-time format.
+ type: str
+ returned: present
+ sample: "2017-02-08T04:36:28+00:00"
+'''
+
+try:
+ import botocore.exceptions
+except ImportError:
+ pass
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+class SAMLProviderManager:
+ """Handles SAML Identity Provider configuration"""
+
+ def __init__(self, module):
+ self.module = module
+
+ try:
+ self.conn = module.client('iam')
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Unknown AWS SDK error")
+
+ # use retry decorator for boto3 calls
+ @AWSRetry.jittered_backoff(retries=3, delay=5)
+ def _list_saml_providers(self):
+ return self.conn.list_saml_providers()
+
+ @AWSRetry.jittered_backoff(retries=3, delay=5)
+ def _get_saml_provider(self, arn):
+ return self.conn.get_saml_provider(SAMLProviderArn=arn)
+
+ @AWSRetry.jittered_backoff(retries=3, delay=5)
+ def _update_saml_provider(self, arn, metadata):
+ return self.conn.update_saml_provider(SAMLProviderArn=arn, SAMLMetadataDocument=metadata)
+
+ @AWSRetry.jittered_backoff(retries=3, delay=5)
+ def _create_saml_provider(self, metadata, name):
+ return self.conn.create_saml_provider(SAMLMetadataDocument=metadata, Name=name)
+
+ @AWSRetry.jittered_backoff(retries=3, delay=5)
+ def _delete_saml_provider(self, arn):
+ return self.conn.delete_saml_provider(SAMLProviderArn=arn)
+
+ def _get_provider_arn(self, name):
+ providers = self._list_saml_providers()
+ for p in providers['SAMLProviderList']:
+ provider_name = p['Arn'].split('/', 1)[1]
+ if name == provider_name:
+ return p['Arn']
+
+ return None
+
+ def create_or_update_saml_provider(self, name, metadata):
+ if not metadata:
+ self.module.fail_json(msg="saml_metadata_document must be defined for present state")
+
+ res = {'changed': False}
+ try:
+ arn = self._get_provider_arn(name)
+ except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Could not get the ARN of the identity provider '{0}'".format(name))
+
+ if arn: # see if metadata needs updating
+ try:
+ resp = self._get_saml_provider(arn)
+ except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Could not retrieve the identity provider '{0}'".format(name))
+
+ if metadata.strip() != resp['SAMLMetadataDocument'].strip():
+ # provider needs updating
+ res['changed'] = True
+ if not self.module.check_mode:
+ try:
+ resp = self._update_saml_provider(arn, metadata)
+ res['saml_provider'] = self._build_res(resp['SAMLProviderArn'])
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Could not update the identity provider '{0}'".format(name))
+ else:
+ res['saml_provider'] = self._build_res(arn)
+
+ else: # create
+ res['changed'] = True
+ if not self.module.check_mode:
+ try:
+ resp = self._create_saml_provider(metadata, name)
+ res['saml_provider'] = self._build_res(resp['SAMLProviderArn'])
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Could not create the identity provider '{0}'".format(name))
+
+ self.module.exit_json(**res)
+
+ def delete_saml_provider(self, name):
+ res = {'changed': False}
+ try:
+ arn = self._get_provider_arn(name)
+ except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Could not get the ARN of the identity provider '{0}'".format(name))
+
+ if arn: # delete
+ res['changed'] = True
+ if not self.module.check_mode:
+ try:
+ self._delete_saml_provider(arn)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Could not delete the identity provider '{0}'".format(name))
+
+ self.module.exit_json(**res)
+
+ def _build_res(self, arn):
+ saml_provider = self._get_saml_provider(arn)
+ return {
+ "arn": arn,
+ "metadata_document": saml_provider["SAMLMetadataDocument"],
+ "create_date": saml_provider["CreateDate"].isoformat(),
+ "expire_date": saml_provider["ValidUntil"].isoformat()
+ }
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ saml_metadata_document=dict(default=None, required=False),
+ state=dict(default='present', required=False, choices=['present', 'absent']),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[('state', 'present', ['saml_metadata_document'])]
+ )
+
+ name = module.params['name']
+ state = module.params.get('state')
+ saml_metadata_document = module.params.get('saml_metadata_document')
+
+ sp_man = SAMLProviderManager(module)
+
+ if state == 'present':
+ sp_man.create_or_update_saml_provider(name, saml_metadata_document)
+ elif state == 'absent':
+ sp_man.delete_saml_provider(name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/iam_server_certificate.py b/ansible_collections/community/aws/plugins/modules/iam_server_certificate.py
new file mode 100644
index 000000000..f3d5c5808
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/iam_server_certificate.py
@@ -0,0 +1,397 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: iam_server_certificate
+version_added: 1.0.0
+short_description: Manage IAM server certificates for use on ELBs and CloudFront
+description:
+ - Allows for the management of IAM server certificates.
+options:
+ name:
+ description:
+ - Name of certificate to add, update or remove.
+ required: true
+ type: str
+ new_name:
+ description:
+ - When I(state=present), this will update the name of the cert.
+ - The I(cert), I(key) and I(cert_chain) parameters will be ignored if this is defined.
+ type: str
+ new_path:
+ description:
+ - When I(state=present), this will update the path of the cert.
+ - The I(cert), I(key) and I(cert_chain) parameters will be ignored if this is defined.
+ type: str
+ state:
+ description:
+ - Whether to create (or update) or delete the certificate.
+ - If I(new_path) or I(new_name) is defined, specifying present will attempt to make an update these.
+ required: true
+ choices: [ "present", "absent" ]
+ type: str
+ path:
+ description:
+ - When creating or updating, specify the desired path of the certificate.
+ default: "/"
+ type: str
+ cert_chain:
+ description:
+ - The content of the CA certificate chain in PEM encoded format.
+ type: str
+ cert:
+ description:
+ - The content of the certificate body in PEM encoded format.
+ type: str
+ key:
+ description:
+ - The content of the private key in PEM encoded format.
+ type: str
+ dup_ok:
+ description:
+ - By default the module will not upload a certificate that is already uploaded into AWS.
+ - If I(dup_ok=True), it will upload the certificate as long as the name is unique.
+ - The default value for this value changed in release 5.0.0 to C(true).
+ default: true
+ type: bool
+
+author:
+ - Jonathan I. Davila (@defionscode)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+- name: Basic server certificate upload from local file
+ community.aws.iam_server_certificate:
+ name: very_ssl
+ state: present
+ cert: "{{ lookup('file', 'path/to/cert') }}"
+ key: "{{ lookup('file', 'path/to/key') }}"
+ cert_chain: "{{ lookup('file', 'path/to/certchain') }}"
+
+- name: Server certificate upload using key string
+ community.aws.iam_server_certificate:
+ name: very_ssl
+ state: present
+ path: "/a/cert/path/"
+ cert: "{{ lookup('file', 'path/to/cert') }}"
+ key: "{{ lookup('file', 'path/to/key') }}"
+ cert_chain: "{{ lookup('file', 'path/to/certchain') }}"
+
+- name: Basic rename of existing certificate
+ community.aws.iam_server_certificate:
+ name: very_ssl
+ new_name: new_very_ssl
+ state: present
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by HAS_BOTO
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+@AWSRetry.jittered_backoff()
+def _list_server_certficates():
+ paginator = client.get_paginator('list_server_certificates')
+ return paginator.paginate().build_full_result()['ServerCertificateMetadataList']
+
+
+def check_duplicate_cert(new_cert):
+ orig_cert_names = list(c['ServerCertificateName'] for c in _list_server_certficates())
+ for cert_name in orig_cert_names:
+ cert = get_server_certificate(cert_name)
+ if not cert:
+ continue
+ cert_body = cert.get('certificate_body', None)
+ if not _compare_cert(new_cert, cert_body):
+ continue
+ module.fail_json(
+ changed=False,
+ msg='This certificate already exists under the name {0} and dup_ok=False'.format(cert_name),
+ duplicate_cert=cert,
+ )
+
+
+def _compare_cert(cert_a, cert_b):
+ if not cert_a and not cert_b:
+ return True
+ if not cert_a or not cert_b:
+ return False
+ # Trim out the whitespace before comparing the certs. While this could mean
+ # an invalid cert 'matches' a valid cert, that's better than some stray
+ # whitespace breaking things
+ cert_a.replace('\r', '')
+ cert_a.replace('\n', '')
+ cert_a.replace(' ', '')
+ cert_b.replace('\r', '')
+ cert_b.replace('\n', '')
+ cert_b.replace(' ', '')
+
+ return cert_a == cert_b
+
+
+def update_server_certificate(current_cert):
+ changed = False
+ cert = module.params.get('cert')
+ cert_chain = module.params.get('cert_chain')
+
+ if not _compare_cert(cert, current_cert.get('certificate_body', None)):
+ module.fail_json(msg='Modifying the certificate body is not supported by AWS')
+ if not _compare_cert(cert_chain, current_cert.get('certificate_chain', None)):
+ module.fail_json(msg='Modifying the chaining certificate is not supported by AWS')
+ # We can't compare keys.
+
+ if module.check_mode:
+ return changed
+
+ # For now we can't make any changes. Updates to tagging would go here and
+ # update 'changed'
+
+ return changed
+
+
+def create_server_certificate():
+ cert = module.params.get('cert')
+ key = module.params.get('key')
+ cert_chain = module.params.get('cert_chain')
+
+ if not module.params.get('dup_ok'):
+ check_duplicate_cert(cert)
+
+ path = module.params.get('path')
+ name = module.params.get('name')
+
+ params = dict(
+ ServerCertificateName=name,
+ CertificateBody=cert,
+ PrivateKey=key,
+ )
+
+ if cert_chain:
+ params['CertificateChain'] = cert_chain
+ if path:
+ params['Path'] = path
+
+ if module.check_mode:
+ return True
+
+ try:
+ client.upload_server_certificate(
+ aws_retry=True,
+ **params
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to update server certificate {0}'.format(name))
+
+ return True
+
+
+def rename_server_certificate(current_cert):
+ name = module.params.get('name')
+ new_name = module.params.get('new_name')
+ new_path = module.params.get('new_path')
+
+ changes = dict()
+
+ # Try to be nice, if we've already been renamed exit quietly.
+ if not current_cert:
+ current_cert = get_server_certificate(new_name)
+ else:
+ if new_name:
+ changes['NewServerCertificateName'] = new_name
+
+ cert_metadata = current_cert.get('server_certificate_metadata', {})
+
+ if not current_cert:
+ module.fail_json(msg='Unable to find certificate {0}'.format(name))
+
+ current_path = cert_metadata.get('path', None)
+ if new_path and current_path != new_path:
+ changes['NewPath'] = new_path
+
+ if not changes:
+ return False
+
+ if module.check_mode:
+ return True
+
+ try:
+ client.update_server_certificate(
+ aws_retry=True,
+ ServerCertificateName=name,
+ **changes
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to update server certificate {0}'.format(name),
+ changes=changes)
+
+ return True
+
+
+def delete_server_certificate(current_cert):
+ if not current_cert:
+ return False
+
+ if module.check_mode:
+ return True
+
+ name = module.params.get('name')
+
+ try:
+ result = client.delete_server_certificate(
+ aws_retry=True,
+ ServerCertificateName=name,
+ )
+ except is_boto3_error_code('NoSuchEntity'):
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Failed to delete server certificate {0}'.format(name))
+
+ return True
+
+
+def get_server_certificate(name):
+ if not name:
+ return None
+ try:
+ result = client.get_server_certificate(
+ aws_retry=True,
+ ServerCertificateName=name,
+ )
+ except is_boto3_error_code('NoSuchEntity'):
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Failed to get server certificate {0}'.format(name))
+ cert = dict(camel_dict_to_snake_dict(result.get('ServerCertificate')))
+ return cert
+
+
+def compatability_results(current_cert):
+ compat_results = dict()
+
+ if not current_cert:
+ return compat_results
+
+ metadata = current_cert.get('server_certificate_metadata', {})
+
+ if current_cert.get('certificate_body', None):
+ compat_results['cert_body'] = current_cert.get('certificate_body')
+ if current_cert.get('certificate_chain', None):
+ compat_results['chain_cert_body'] = current_cert.get('certificate_chain')
+ if metadata.get('arn', None):
+ compat_results['arn'] = metadata.get('arn')
+ if metadata.get('expiration', None):
+ compat_results['expiration_date'] = metadata.get('expiration')
+ if metadata.get('path', None):
+ compat_results['cert_path'] = metadata.get('path')
+ if metadata.get('server_certificate_name', None):
+ compat_results['name'] = metadata.get('server_certificate_name')
+ if metadata.get('upload_date', None):
+ compat_results['upload_date'] = metadata.get('upload_date')
+
+ return compat_results
+
+
+def main():
+
+ global module
+ global client
+
+ argument_spec = dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True),
+ cert=dict(),
+ key=dict(no_log=True),
+ cert_chain=dict(),
+ new_name=dict(),
+ path=dict(default='/'),
+ new_path=dict(),
+ dup_ok=dict(type='bool', default=True),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['new_path', 'key'],
+ ['new_path', 'cert'],
+ ['new_path', 'cert_chain'],
+ ['new_name', 'key'],
+ ['new_name', 'cert'],
+ ['new_name', 'cert_chain'],
+ ],
+ supports_check_mode=True,
+ )
+
+ client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+
+ state = module.params.get('state')
+ name = module.params.get('name')
+ path = module.params.get('path')
+ new_name = module.params.get('new_name')
+ new_path = module.params.get('new_path')
+ dup_ok = module.params.get('dup_ok')
+
+ current_cert = get_server_certificate(name)
+
+ results = dict()
+ if state == 'absent':
+ changed = delete_server_certificate(current_cert)
+ if changed:
+ results['deleted_cert'] = name
+ else:
+ msg = 'Certificate with the name {0} already absent'.format(name)
+ results['msg'] = msg
+ else:
+ if new_name or new_path:
+ changed = rename_server_certificate(current_cert)
+ if new_name:
+ name = new_name
+ updated_cert = get_server_certificate(name)
+ elif current_cert:
+ changed = update_server_certificate(current_cert)
+ updated_cert = get_server_certificate(name)
+ else:
+ changed = create_server_certificate()
+ updated_cert = get_server_certificate(name)
+
+ results['server_certificate'] = updated_cert
+ compat_results = compatability_results(updated_cert)
+ if compat_results:
+ results.update(compat_results)
+
+ module.exit_json(
+ changed=changed,
+ **results
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/iam_server_certificate_info.py b/ansible_collections/community/aws/plugins/modules/iam_server_certificate_info.py
new file mode 100644
index 000000000..ee0dc590d
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/iam_server_certificate_info.py
@@ -0,0 +1,161 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: iam_server_certificate_info
+version_added: 1.0.0
+short_description: Retrieve the information of a server certificate
+description:
+ - Retrieve the attributes of a server certificate.
+author: "Allen Sanabria (@linuxdynasty)"
+options:
+ name:
+ description:
+ - The name of the server certificate you are retrieving attributes for.
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+- name: Retrieve server certificate
+ community.aws.iam_server_certificate_info:
+ name: production-cert
+ register: server_cert
+
+- name: Fail if the server certificate name was not found
+ community.aws.iam_server_certificate_info:
+ name: production-cert
+ register: server_cert
+ failed_when: "{{ server_cert.results | length == 0 }}"
+'''
+
+RETURN = '''
+server_certificate_id:
+ description: The 21 character certificate id
+ returned: success
+ type: str
+ sample: "ADWAJXWTZAXIPIMQHMJPO"
+certificate_body:
+ description: The asn1der encoded PEM string
+ returned: success
+ type: str
+ sample: "-----BEGIN CERTIFICATE-----\nbunch of random data\n-----END CERTIFICATE-----"
+server_certificate_name:
+ description: The name of the server certificate
+ returned: success
+ type: str
+ sample: "server-cert-name"
+arn:
+ description: The Amazon resource name of the server certificate
+ returned: success
+ type: str
+ sample: "arn:aws:iam::123456789012:server-certificate/server-cert-name"
+path:
+ description: The path of the server certificate
+ returned: success
+ type: str
+ sample: "/"
+expiration:
+ description: The date and time this server certificate will expire, in ISO 8601 format.
+ returned: success
+ type: str
+ sample: "2017-06-15T12:00:00+00:00"
+upload_date:
+ description: The date and time this server certificate was uploaded, in ISO 8601 format.
+ returned: success
+ type: str
+ sample: "2015-04-25T00:36:40+00:00"
+'''
+
+
+try:
+ import botocore
+ import botocore.exceptions
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+def get_server_certs(iam, name=None):
+ """Retrieve the attributes of a server certificate if it exists or all certs.
+ Args:
+ iam (botocore.client.IAM): The boto3 iam instance.
+
+ Kwargs:
+ name (str): The name of the server certificate.
+
+ Basic Usage:
+ >>> import boto3
+ >>> iam = boto3.client('iam')
+ >>> name = "server-cert-name"
+ >>> results = get_server_certs(iam, name)
+ {
+ "upload_date": "2015-04-25T00:36:40+00:00",
+ "server_certificate_id": "ADWAJXWTZAXIPIMQHMJPO",
+ "certificate_body": "-----BEGIN CERTIFICATE-----\nbunch of random data\n-----END CERTIFICATE-----",
+ "server_certificate_name": "server-cert-name",
+ "expiration": "2017-06-15T12:00:00+00:00",
+ "path": "/",
+ "arn": "arn:aws:iam::123456789012:server-certificate/server-cert-name"
+ }
+ """
+ results = dict()
+ try:
+ if name:
+ server_certs = [iam.get_server_certificate(ServerCertificateName=name)['ServerCertificate']]
+ else:
+ server_certs = iam.list_server_certificates()['ServerCertificateMetadataList']
+
+ for server_cert in server_certs:
+ if not name:
+ server_cert = iam.get_server_certificate(ServerCertificateName=server_cert['ServerCertificateName'])['ServerCertificate']
+ cert_md = server_cert['ServerCertificateMetadata']
+ results[cert_md['ServerCertificateName']] = {
+ 'certificate_body': server_cert['CertificateBody'],
+ 'server_certificate_id': cert_md['ServerCertificateId'],
+ 'server_certificate_name': cert_md['ServerCertificateName'],
+ 'arn': cert_md['Arn'],
+ 'path': cert_md['Path'],
+ 'expiration': cert_md['Expiration'].isoformat(),
+ 'upload_date': cert_md['UploadDate'].isoformat(),
+ }
+
+ except botocore.exceptions.ClientError:
+ pass
+
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ iam = module.client('iam')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ cert_name = module.params.get('name')
+ results = get_server_certs(iam, cert_name)
+ module.exit_json(results=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/inspector_target.py b/ansible_collections/community/aws/plugins/modules/inspector_target.py
new file mode 100644
index 000000000..2ec9e9a0e
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/inspector_target.py
@@ -0,0 +1,246 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Dennis Conrad for Sainsbury's
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: inspector_target
+version_added: 1.0.0
+short_description: Create, Update and Delete Amazon Inspector Assessment Targets
+description:
+ - Creates, updates, or deletes Amazon Inspector Assessment Targets and manages
+ the required Resource Groups.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_inspector_target).
+ The usage did not change.
+author:
+ - "Dennis Conrad (@dennisconrad)"
+options:
+ name:
+ description:
+ - The user-defined name that identifies the assessment target.
+ - The name must be unique within the AWS account.
+ required: true
+ type: str
+ state:
+ description:
+ - The state of the assessment target.
+ choices:
+ - absent
+ - present
+ default: present
+ type: str
+ tags:
+ description:
+ - Tags of the EC2 instances to be added to the assessment target.
+ - Required if I(state=present).
+ type: dict
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+- name: Create my_target Assessment Target
+ community.aws.inspector_target:
+ name: my_target
+ tags:
+ role: scan_target
+
+- name: Update Existing my_target Assessment Target with Additional Tags
+ community.aws.inspector_target:
+ name: my_target
+ tags:
+ env: dev
+ role: scan_target
+
+- name: Delete my_target Assessment Target
+ community.aws.inspector_target:
+ name: my_target
+ state: absent
+'''
+
+RETURN = '''
+arn:
+ description: The ARN that specifies the Amazon Inspector assessment target.
+ returned: success
+ type: str
+ sample: "arn:aws:inspector:eu-west-1:123456789012:target/0-O4LnL7n1"
+created_at:
+ description: The time at which the assessment target was created.
+ returned: success
+ type: str
+ sample: "2018-01-29T13:48:51.958000+00:00"
+name:
+ description: The name of the Amazon Inspector assessment target.
+ returned: success
+ type: str
+ sample: "my_target"
+resource_group_arn:
+ description: The ARN that specifies the resource group that is associated
+ with the assessment target.
+ returned: success
+ type: str
+ sample: "arn:aws:inspector:eu-west-1:123456789012:resourcegroup/0-qY4gDel8"
+tags:
+ description: The tags of the resource group that is associated with the
+ assessment target.
+ returned: success
+ type: list
+ sample: {"role": "scan_target", "env": "dev"}
+updated_at:
+ description: The time at which the assessment target was last updated.
+ returned: success
+ type: str
+ sample: "2018-01-29T13:48:51.958000+00:00"
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (
+ ansible_dict_to_boto3_tag_list,
+ boto3_tag_list_to_ansible_dict,
+ camel_dict_to_snake_dict,
+ compare_aws_tags,
+)
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ state=dict(choices=['absent', 'present'], default='present'),
+ tags=dict(type='dict'),
+ )
+
+ required_if = [['state', 'present', ['tags']]]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=False,
+ required_if=required_if,
+ )
+
+ name = module.params.get('name')
+ state = module.params.get('state').lower()
+ tags = module.params.get('tags')
+ if tags:
+ tags = ansible_dict_to_boto3_tag_list(tags, 'key', 'value')
+
+ client = module.client('inspector')
+
+ try:
+ existing_target_arn = client.list_assessment_targets(
+ filter={'assessmentTargetNamePattern': name},
+ ).get('assessmentTargetArns')[0]
+
+ existing_target = camel_dict_to_snake_dict(
+ client.describe_assessment_targets(
+ assessmentTargetArns=[existing_target_arn],
+ ).get('assessmentTargets')[0]
+ )
+
+ existing_resource_group_arn = existing_target.get('resource_group_arn')
+ existing_resource_group_tags = client.describe_resource_groups(
+ resourceGroupArns=[existing_resource_group_arn],
+ ).get('resourceGroups')[0].get('tags')
+
+ target_exists = True
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e:
+ module.fail_json_aws(e, msg="trying to retrieve targets")
+ except IndexError:
+ target_exists = False
+
+ if state == 'present' and target_exists:
+ ansible_dict_tags = boto3_tag_list_to_ansible_dict(tags)
+ ansible_dict_existing_tags = boto3_tag_list_to_ansible_dict(
+ existing_resource_group_tags
+ )
+ tags_to_add, tags_to_remove = compare_aws_tags(
+ ansible_dict_tags,
+ ansible_dict_existing_tags
+ )
+ if not (tags_to_add or tags_to_remove):
+ existing_target.update({'tags': ansible_dict_existing_tags})
+ module.exit_json(changed=False, **existing_target)
+ else:
+ try:
+ updated_resource_group_arn = client.create_resource_group(
+ resourceGroupTags=tags,
+ ).get('resourceGroupArn')
+
+ client.update_assessment_target(
+ assessmentTargetArn=existing_target_arn,
+ assessmentTargetName=name,
+ resourceGroupArn=updated_resource_group_arn,
+ )
+
+ updated_target = camel_dict_to_snake_dict(
+ client.describe_assessment_targets(
+ assessmentTargetArns=[existing_target_arn],
+ ).get('assessmentTargets')[0]
+ )
+
+ updated_target.update({'tags': ansible_dict_tags})
+ module.exit_json(changed=True, **updated_target)
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e:
+ module.fail_json_aws(e, msg="trying to update target")
+
+ elif state == 'present' and not target_exists:
+ try:
+ new_resource_group_arn = client.create_resource_group(
+ resourceGroupTags=tags,
+ ).get('resourceGroupArn')
+
+ new_target_arn = client.create_assessment_target(
+ assessmentTargetName=name,
+ resourceGroupArn=new_resource_group_arn,
+ ).get('assessmentTargetArn')
+
+ new_target = camel_dict_to_snake_dict(
+ client.describe_assessment_targets(
+ assessmentTargetArns=[new_target_arn],
+ ).get('assessmentTargets')[0]
+ )
+
+ new_target.update({'tags': boto3_tag_list_to_ansible_dict(tags)})
+ module.exit_json(changed=True, **new_target)
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e:
+ module.fail_json_aws(e, msg="trying to create target")
+
+ elif state == 'absent' and target_exists:
+ try:
+ client.delete_assessment_target(
+ assessmentTargetArn=existing_target_arn,
+ )
+ module.exit_json(changed=True)
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e:
+ module.fail_json_aws(e, msg="trying to delete target")
+
+ elif state == 'absent' and not target_exists:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/kinesis_stream.py b/ansible_collections/community/aws/plugins/modules/kinesis_stream.py
new file mode 100644
index 000000000..e4c5d76df
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/kinesis_stream.py
@@ -0,0 +1,1262 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: kinesis_stream
+version_added: 1.0.0
+short_description: Manage a Kinesis Stream.
+description:
+ - Create or Delete a Kinesis Stream.
+ - Update the retention period of a Kinesis Stream.
+ - Update Tags on a Kinesis Stream.
+ - Enable/disable server side encryption on a Kinesis Stream.
+author: Allen Sanabria (@linuxdynasty)
+options:
+ name:
+ description:
+ - The name of the Kinesis Stream you are managing.
+ required: true
+ type: str
+ shards:
+ description:
+ - The number of shards you want to have with this stream.
+ - This is required when I(state=present)
+ type: int
+ retention_period:
+ description:
+ - The length of time (in hours) data records are accessible after they are added to
+ the stream.
+ - The default retention period is 24 hours and can not be less than 24 hours.
+ - The maximum retention period is 168 hours.
+ - The retention period can be modified during any point in time.
+ type: int
+ state:
+ description:
+ - Create or Delete the Kinesis Stream.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ wait:
+ description:
+ - Wait for operation to complete before returning.
+ default: true
+ type: bool
+ wait_timeout:
+ description:
+ - How many seconds to wait for an operation to complete before timing out.
+ default: 300
+ type: int
+ tags:
+ description:
+ - "A dictionary of resource tags of the form: C({ tag1: value1, tag2: value2 })."
+ aliases: [ "resource_tags" ]
+ type: dict
+ encryption_state:
+ description:
+ - Enable or Disable encryption on the Kinesis Stream.
+ choices: [ 'enabled', 'disabled' ]
+ type: str
+ encryption_type:
+ description:
+ - The type of encryption.
+ - Defaults to C(KMS)
+ choices: ['KMS', 'NONE']
+ type: str
+ key_id:
+ description:
+ - The GUID or alias for the KMS key.
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Basic creation example:
+- name: Set up Kinesis Stream with 10 shards and wait for the stream to become ACTIVE
+ community.aws.kinesis_stream:
+ name: test-stream
+ shards: 10
+ wait: true
+ wait_timeout: 600
+ register: test_stream
+
+# Basic creation example with tags:
+- name: Set up Kinesis Stream with 10 shards, tag the environment, and wait for the stream to become ACTIVE
+ community.aws.kinesis_stream:
+ name: test-stream
+ shards: 10
+ tags:
+ Env: development
+ wait: true
+ wait_timeout: 600
+ register: test_stream
+
+# Basic creation example with tags and increase the retention period from the default 24 hours to 48 hours:
+- name: Set up Kinesis Stream with 10 shards, tag the environment, increase the retention period and wait for the stream to become ACTIVE
+ community.aws.kinesis_stream:
+ name: test-stream
+ retention_period: 48
+ shards: 10
+ tags:
+ Env: development
+ wait: true
+ wait_timeout: 600
+ register: test_stream
+
+# Basic delete example:
+- name: Delete Kinesis Stream test-stream and wait for it to finish deleting.
+ community.aws.kinesis_stream:
+ name: test-stream
+ state: absent
+ wait: true
+ wait_timeout: 600
+ register: test_stream
+
+# Basic enable encryption example:
+- name: Encrypt Kinesis Stream test-stream.
+ community.aws.kinesis_stream:
+ name: test-stream
+ state: present
+ shards: 1
+ encryption_state: enabled
+ encryption_type: KMS
+ key_id: alias/aws/kinesis
+ wait: true
+ wait_timeout: 600
+ register: test_stream
+
+# Basic disable encryption example:
+- name: Encrypt Kinesis Stream test-stream.
+ community.aws.kinesis_stream:
+ name: test-stream
+ state: present
+ shards: 1
+ encryption_state: disabled
+ encryption_type: KMS
+ key_id: alias/aws/kinesis
+ wait: true
+ wait_timeout: 600
+ register: test_stream
+'''
+
+RETURN = '''
+stream_name:
+ description: The name of the Kinesis Stream.
+ returned: when state == present.
+ type: str
+ sample: "test-stream"
+stream_arn:
+ description: The amazon resource identifier
+ returned: when state == present.
+ type: str
+ sample: "arn:aws:kinesis:east-side:123456789:stream/test-stream"
+stream_status:
+ description: The current state of the Kinesis Stream.
+ returned: when state == present.
+ type: str
+ sample: "ACTIVE"
+retention_period_hours:
+ description: Number of hours messages will be kept for a Kinesis Stream.
+ returned: when state == present.
+ type: int
+ sample: 24
+tags:
+ description: Dictionary containing all the tags associated with the Kinesis stream.
+ returned: when state == present.
+ type: dict
+ sample: {
+ "Name": "Splunk",
+ "Env": "development"
+ }
+'''
+
+import time
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+
+
+def get_tags(client, stream_name):
+ """Retrieve the tags for a Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): Name of the Kinesis stream.
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >> get_tags(client, stream_name)
+
+ Returns:
+ Tuple (bool, str, dict)
+ """
+ err_msg = ''
+ success = False
+ params = {
+ 'StreamName': stream_name,
+ }
+ results = dict()
+ try:
+ results = (
+ client.list_tags_for_stream(**params)['Tags']
+ )
+ success = True
+ except botocore.exceptions.ClientError as e:
+ err_msg = to_native(e)
+
+ return success, err_msg, boto3_tag_list_to_ansible_dict(results)
+
+
+def find_stream(client, stream_name):
+ """Retrieve a Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): Name of the Kinesis stream.
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+
+ Returns:
+ Tuple (bool, str, dict)
+ """
+ err_msg = ''
+ success = False
+ params = {
+ 'StreamName': stream_name,
+ }
+ results = dict()
+ has_more_shards = True
+ shards = list()
+ try:
+ while has_more_shards:
+ results = (
+ client.describe_stream(**params)['StreamDescription']
+ )
+ shards.extend(results.pop('Shards'))
+ has_more_shards = results['HasMoreShards']
+ if has_more_shards:
+ params['ExclusiveStartShardId'] = shards[-1]['ShardId']
+ results['Shards'] = shards
+ num_closed_shards = len([s for s in shards if 'EndingSequenceNumber' in s['SequenceNumberRange']])
+ results['OpenShardsCount'] = len(shards) - num_closed_shards
+ results['ClosedShardsCount'] = num_closed_shards
+ results['ShardsCount'] = len(shards)
+ success = True
+ except botocore.exceptions.ClientError as e:
+ err_msg = to_native(e)
+
+ return success, err_msg, results
+
+
+def wait_for_status(client, stream_name, status, wait_timeout=300,
+ check_mode=False):
+ """Wait for the status to change for a Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ stream_name (str): The name of the kinesis stream.
+ status (str): The status to wait for.
+ examples. status=available, status=deleted
+
+ Kwargs:
+ wait_timeout (int): Number of seconds to wait, until this timeout is reached.
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> wait_for_status(client, stream_name, 'ACTIVE', 300)
+
+ Returns:
+ Tuple (bool, str, dict)
+ """
+ polling_increment_secs = 5
+ wait_timeout = time.time() + wait_timeout
+ status_achieved = False
+ stream = dict()
+ err_msg = ""
+
+ while wait_timeout > time.time():
+ try:
+ find_success, find_msg, stream = (
+ find_stream(client, stream_name)
+ )
+ if check_mode:
+ status_achieved = True
+ break
+
+ elif status != 'DELETING':
+ if find_success and stream:
+ if stream.get('StreamStatus') == status:
+ status_achieved = True
+ break
+
+ else:
+ if not find_success:
+ status_achieved = True
+ break
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = to_native(e)
+
+ time.sleep(polling_increment_secs)
+
+ if not status_achieved:
+ err_msg = "Wait time out reached, while waiting for results"
+ else:
+ err_msg = "Status {0} achieved successfully".format(status)
+
+ return status_achieved, err_msg, stream
+
+
+def tags_action(client, stream_name, tags, action='create', check_mode=False):
+ """Create or delete multiple tags from a Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ resource_id (str): The Amazon resource id.
+ tags (list): List of dictionaries.
+ examples.. [{Name: "", Values: [""]}]
+
+ Kwargs:
+ action (str): The action to perform.
+ valid actions == create and delete
+ default=create
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> resource_id = 'pcx-123345678'
+ >>> tags = {'env': 'development'}
+ >>> update_tags(client, resource_id, tags)
+ [True, '']
+
+ Returns:
+ List (bool, str)
+ """
+ success = False
+ err_msg = ""
+ params = {'StreamName': stream_name}
+ try:
+ if not check_mode:
+ if action == 'create':
+ params['Tags'] = tags
+ client.add_tags_to_stream(**params)
+ success = True
+ elif action == 'delete':
+ params['TagKeys'] = tags
+ client.remove_tags_from_stream(**params)
+ success = True
+ else:
+ err_msg = 'Invalid action {0}'.format(action)
+ else:
+ if action == 'create':
+ success = True
+ elif action == 'delete':
+ success = True
+ else:
+ err_msg = 'Invalid action {0}'.format(action)
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = to_native(e)
+
+ return success, err_msg
+
+
+def update_tags(client, stream_name, tags, check_mode=False):
+ """Update tags for an amazon resource.
+ Args:
+ resource_id (str): The Amazon resource id.
+ tags (dict): Dictionary of tags you want applied to the Kinesis stream.
+
+ Kwargs:
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> stream_name = 'test-stream'
+ >>> tags = {'env': 'development'}
+ >>> update_tags(client, stream_name, tags)
+ [True, '']
+
+ Return:
+ Tuple (bool, str)
+ """
+ success = False
+ changed = False
+ err_msg = ''
+ tag_success, tag_msg, current_tags = (
+ get_tags(client, stream_name)
+ )
+
+ tags_to_set, tags_to_delete = compare_aws_tags(
+ current_tags, tags,
+ purge_tags=True,
+ )
+ if tags_to_delete:
+ delete_success, delete_msg = (
+ tags_action(
+ client, stream_name, tags_to_delete, action='delete',
+ check_mode=check_mode
+ )
+ )
+ if not delete_success:
+ return delete_success, changed, delete_msg
+ tag_msg = 'Tags removed'
+
+ if tags_to_set:
+ create_success, create_msg = (
+ tags_action(
+ client, stream_name, tags_to_set, action='create',
+ check_mode=check_mode
+ )
+ )
+ if create_success:
+ changed = True
+ return create_success, changed, create_msg
+
+ return success, changed, err_msg
+
+
+def stream_action(client, stream_name, shard_count=1, action='create',
+ timeout=300, check_mode=False):
+ """Create or Delete an Amazon Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ shard_count (int): Number of shards this stream will use.
+ action (str): The action to perform.
+ valid actions == create and delete
+ default=create
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> shard_count = 20
+ >>> stream_action(client, stream_name, shard_count, action='create')
+
+ Returns:
+ List (bool, str)
+ """
+ success = False
+ err_msg = ''
+ params = {
+ 'StreamName': stream_name
+ }
+ try:
+ if not check_mode:
+ if action == 'create':
+ params['ShardCount'] = shard_count
+ client.create_stream(**params)
+ success = True
+ elif action == 'delete':
+ client.delete_stream(**params)
+ success = True
+ else:
+ err_msg = 'Invalid action {0}'.format(action)
+ else:
+ if action == 'create':
+ success = True
+ elif action == 'delete':
+ success = True
+ else:
+ err_msg = 'Invalid action {0}'.format(action)
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = to_native(e)
+
+ return success, err_msg
+
+
+def stream_encryption_action(client, stream_name, action='start_encryption', encryption_type='', key_id='',
+ timeout=300, check_mode=False):
+ """Create, Encrypt or Delete an Amazon Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ shard_count (int): Number of shards this stream will use.
+ action (str): The action to perform.
+ valid actions == create and delete
+ default=create
+ encryption_type (str): NONE or KMS
+ key_id (str): The GUID or alias for the KMS key
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> shard_count = 20
+ >>> stream_action(client, stream_name, shard_count, action='create', encryption_type='KMS',key_id='alias/aws')
+
+ Returns:
+ List (bool, str)
+ """
+ success = False
+ err_msg = ''
+ params = {
+ 'StreamName': stream_name
+ }
+ try:
+ if not check_mode:
+ if action == 'start_encryption':
+ params['EncryptionType'] = encryption_type
+ params['KeyId'] = key_id
+ client.start_stream_encryption(**params)
+ success = True
+ elif action == 'stop_encryption':
+ params['EncryptionType'] = encryption_type
+ params['KeyId'] = key_id
+ client.stop_stream_encryption(**params)
+ success = True
+ else:
+ err_msg = 'Invalid encryption action {0}'.format(action)
+ else:
+ if action == 'start_encryption':
+ success = True
+ elif action == 'stop_encryption':
+ success = True
+ else:
+ err_msg = 'Invalid encryption action {0}'.format(action)
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = to_native(e)
+
+ return success, err_msg
+
+
+def retention_action(client, stream_name, retention_period=24,
+ action='increase', check_mode=False):
+ """Increase or Decrease the retention of messages in the Kinesis stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ retention_period (int): This is how long messages will be kept before
+ they are discarded. This can not be less than 24 hours.
+ action (str): The action to perform.
+ valid actions == create and delete
+ default=create
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> retention_period = 48
+ >>> retention_action(client, stream_name, retention_period, action='increase')
+
+ Returns:
+ Tuple (bool, str)
+ """
+ success = False
+ err_msg = ''
+ params = {
+ 'StreamName': stream_name
+ }
+ try:
+ if not check_mode:
+ if action == 'increase':
+ params['RetentionPeriodHours'] = retention_period
+ client.increase_stream_retention_period(**params)
+ success = True
+ err_msg = (
+ 'Retention Period increased successfully to {0}'.format(retention_period)
+ )
+ elif action == 'decrease':
+ params['RetentionPeriodHours'] = retention_period
+ client.decrease_stream_retention_period(**params)
+ success = True
+ err_msg = (
+ 'Retention Period decreased successfully to {0}'.format(retention_period)
+ )
+ else:
+ err_msg = 'Invalid action {0}'.format(action)
+ else:
+ if action == 'increase':
+ success = True
+ elif action == 'decrease':
+ success = True
+ else:
+ err_msg = 'Invalid action {0}'.format(action)
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = to_native(e)
+
+ return success, err_msg
+
+
+def update_shard_count(client, stream_name, number_of_shards=1, check_mode=False):
+ """Increase or Decrease the number of shards in the Kinesis stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ number_of_shards (int): Number of shards this stream will use.
+ default=1
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> number_of_shards = 3
+ >>> update_shard_count(client, stream_name, number_of_shards)
+
+ Returns:
+ Tuple (bool, str)
+ """
+ success = True
+ err_msg = ''
+ params = {
+ 'StreamName': stream_name,
+ 'ScalingType': 'UNIFORM_SCALING'
+ }
+ if not check_mode:
+ params['TargetShardCount'] = number_of_shards
+ try:
+ client.update_shard_count(**params)
+ except botocore.exceptions.ClientError as e:
+ return False, str(e)
+
+ return success, err_msg
+
+
+def update(client, current_stream, stream_name, number_of_shards=1, retention_period=None,
+ tags=None, wait=False, wait_timeout=300, check_mode=False):
+ """Update an Amazon Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ number_of_shards (int): Number of shards this stream will use.
+ default=1
+ retention_period (int): This is how long messages will be kept before
+ they are discarded. This can not be less than 24 hours.
+ tags (dict): The tags you want applied.
+ wait (bool): Wait until Stream is ACTIVE.
+ default=False
+ wait_timeout (int): How long to wait until this operation is considered failed.
+ default=300
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> current_stream = {
+ 'ShardCount': 3,
+ 'HasMoreShards': True,
+ 'RetentionPeriodHours': 24,
+ 'StreamName': 'test-stream',
+ 'StreamARN': 'arn:aws:kinesis:us-west-2:123456789:stream/test-stream',
+ 'StreamStatus': "ACTIVE'
+ }
+ >>> stream_name = 'test-stream'
+ >>> retention_period = 48
+ >>> number_of_shards = 10
+ >>> update(client, current_stream, stream_name,
+ number_of_shards, retention_period )
+
+ Returns:
+ Tuple (bool, bool, str)
+ """
+ success = True
+ changed = False
+ err_msg = ''
+ if retention_period:
+ if wait:
+ wait_success, wait_msg, current_stream = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ if not wait_success:
+ return wait_success, False, wait_msg
+
+ if current_stream.get('StreamStatus') == 'ACTIVE':
+ retention_changed = False
+ if retention_period > current_stream['RetentionPeriodHours']:
+ retention_changed, retention_msg = (
+ retention_action(
+ client, stream_name, retention_period, action='increase',
+ check_mode=check_mode
+ )
+ )
+
+ elif retention_period < current_stream['RetentionPeriodHours']:
+ retention_changed, retention_msg = (
+ retention_action(
+ client, stream_name, retention_period, action='decrease',
+ check_mode=check_mode
+ )
+ )
+
+ elif retention_period == current_stream['RetentionPeriodHours']:
+ retention_msg = (
+ 'Retention {0} is the same as {1}'
+ .format(
+ retention_period,
+ current_stream['RetentionPeriodHours']
+ )
+ )
+ success = True
+
+ if retention_changed:
+ success = True
+ changed = True
+
+ err_msg = retention_msg
+ if changed and wait:
+ wait_success, wait_msg, current_stream = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ if not wait_success:
+ return wait_success, False, wait_msg
+ elif changed and not wait:
+ stream_found, stream_msg, current_stream = (
+ find_stream(client, stream_name)
+ )
+ if stream_found:
+ if current_stream['StreamStatus'] != 'ACTIVE':
+ err_msg = (
+ 'Retention Period for {0} is in the process of updating'
+ .format(stream_name)
+ )
+ return success, changed, err_msg
+ else:
+ err_msg = (
+ 'StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}'
+ .format(current_stream.get('StreamStatus', 'UNKNOWN'))
+ )
+ return success, changed, err_msg
+
+ if current_stream['OpenShardsCount'] != number_of_shards:
+ success, err_msg = (
+ update_shard_count(client, stream_name, number_of_shards, check_mode=check_mode)
+ )
+
+ if not success:
+ return success, changed, err_msg
+
+ changed = True
+
+ if wait:
+ wait_success, wait_msg, current_stream = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ if not wait_success:
+ return wait_success, changed, wait_msg
+ else:
+ stream_found, stream_msg, current_stream = (
+ find_stream(client, stream_name)
+ )
+ if stream_found and current_stream['StreamStatus'] != 'ACTIVE':
+ err_msg = (
+ 'Number of shards for {0} is in the process of updating'
+ .format(stream_name)
+ )
+ return success, changed, err_msg
+
+ if tags:
+ tag_success, tag_changed, err_msg = (
+ update_tags(client, stream_name, tags, check_mode=check_mode)
+ )
+ changed |= tag_changed
+ if wait:
+ success, err_msg, status_stream = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ if success and changed:
+ err_msg = 'Kinesis Stream {0} updated successfully.'.format(stream_name)
+ elif success and not changed:
+ err_msg = 'Kinesis Stream {0} did not change.'.format(stream_name)
+
+ return success, changed, err_msg
+
+
+def create_stream(client, stream_name, number_of_shards=1, retention_period=None,
+ tags=None, wait=False, wait_timeout=300, check_mode=False):
+ """Create an Amazon Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ number_of_shards (int): Number of shards this stream will use.
+ default=1
+ retention_period (int): Can not be less than 24 hours
+ default=None
+ tags (dict): The tags you want applied.
+ default=None
+ wait (bool): Wait until Stream is ACTIVE.
+ default=False
+ wait_timeout (int): How long to wait until this operation is considered failed.
+ default=300
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> number_of_shards = 10
+ >>> tags = {'env': 'test'}
+ >>> create_stream(client, stream_name, number_of_shards, tags=tags)
+
+ Returns:
+ Tuple (bool, bool, str, dict)
+ """
+ success = False
+ changed = False
+ err_msg = ''
+ results = dict()
+
+ stream_found, stream_msg, current_stream = (
+ find_stream(client, stream_name)
+ )
+
+ if stream_found and current_stream.get('StreamStatus') == 'DELETING' and wait:
+ wait_success, wait_msg, current_stream = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+
+ if stream_found and current_stream.get('StreamStatus') != 'DELETING':
+ success, changed, err_msg = update(
+ client, current_stream, stream_name, number_of_shards,
+ retention_period, tags, wait, wait_timeout, check_mode=check_mode
+ )
+ else:
+ create_success, create_msg = (
+ stream_action(
+ client, stream_name, number_of_shards, action='create',
+ check_mode=check_mode
+ )
+ )
+ if not create_success:
+ changed = True
+ err_msg = 'Failed to create Kinesis stream: {0}'.format(create_msg)
+ return False, True, err_msg, {}
+ else:
+ changed = True
+ if wait:
+ wait_success, wait_msg, results = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ err_msg = (
+ 'Kinesis Stream {0} is in the process of being created'
+ .format(stream_name)
+ )
+ if not wait_success:
+ return wait_success, True, wait_msg, results
+ else:
+ err_msg = (
+ 'Kinesis Stream {0} created successfully'
+ .format(stream_name)
+ )
+
+ if tags:
+ changed, err_msg = (
+ tags_action(
+ client, stream_name, tags, action='create',
+ check_mode=check_mode
+ )
+ )
+ if changed:
+ success = True
+ if not success:
+ return success, changed, err_msg, results
+
+ stream_found, stream_msg, current_stream = (
+ find_stream(client, stream_name)
+ )
+ if retention_period and current_stream.get('StreamStatus') == 'ACTIVE':
+ changed, err_msg = (
+ retention_action(
+ client, stream_name, retention_period, action='increase',
+ check_mode=check_mode
+ )
+ )
+ if changed:
+ success = True
+ if not success:
+ return success, changed, err_msg, results
+ else:
+ err_msg = (
+ 'StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}'
+ .format(current_stream.get('StreamStatus', 'UNKNOWN'))
+ )
+ success = create_success
+ changed = True
+
+ if success:
+ stream_found, stream_msg, results = (
+ find_stream(client, stream_name)
+ )
+ tag_success, tag_msg, current_tags = (
+ get_tags(client, stream_name)
+ )
+ if check_mode:
+ current_tags = tags
+
+ if not current_tags:
+ current_tags = dict()
+
+ results = camel_dict_to_snake_dict(results)
+ results['tags'] = current_tags
+
+ return success, changed, err_msg, results
+
+
+def delete_stream(client, stream_name, wait=False, wait_timeout=300,
+ check_mode=False):
+ """Delete an Amazon Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ wait (bool): Wait until Stream is ACTIVE.
+ default=False
+ wait_timeout (int): How long to wait until this operation is considered failed.
+ default=300
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> delete_stream(client, stream_name)
+
+ Returns:
+ Tuple (bool, bool, str, dict)
+ """
+ success = False
+ changed = False
+ err_msg = ''
+ results = dict()
+ stream_found, stream_msg, current_stream = (
+ find_stream(client, stream_name)
+ )
+ if stream_found:
+ success, err_msg = (
+ stream_action(
+ client, stream_name, action='delete', check_mode=check_mode
+ )
+ )
+ if success:
+ changed = True
+ if wait:
+ success, err_msg, results = (
+ wait_for_status(
+ client, stream_name, 'DELETING', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ err_msg = 'Stream {0} deleted successfully'.format(stream_name)
+ if not success:
+ return success, True, err_msg, results
+ else:
+ err_msg = (
+ 'Stream {0} is in the process of being deleted'
+ .format(stream_name)
+ )
+ else:
+ success = True
+ changed = False
+ err_msg = 'Stream {0} does not exist'.format(stream_name)
+
+ return success, changed, err_msg, results
+
+
+def start_stream_encryption(client, stream_name, encryption_type='', key_id='',
+ wait=False, wait_timeout=300, check_mode=False):
+ """Start encryption on an Amazon Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ encryption_type (str): KMS or NONE
+ key_id (str): KMS key GUID or alias
+ wait (bool): Wait until Stream is ACTIVE.
+ default=False
+ wait_timeout (int): How long to wait until this operation is considered failed.
+ default=300
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> key_id = 'alias/aws'
+ >>> encryption_type = 'KMS'
+ >>> start_stream_encryption(client, stream_name,encryption_type,key_id)
+
+ Returns:
+ Tuple (bool, bool, str, dict)
+ """
+ success = False
+ changed = False
+ err_msg = ''
+ params = {
+ 'StreamName': stream_name
+ }
+
+ results = dict()
+ stream_found, stream_msg, current_stream = (
+ find_stream(client, stream_name)
+ )
+ if stream_found:
+ if (current_stream.get("EncryptionType") == encryption_type and current_stream.get("KeyId") == key_id):
+ changed = False
+ success = True
+ err_msg = 'Kinesis Stream {0} encryption already configured.'.format(stream_name)
+ else:
+ success, err_msg = (
+ stream_encryption_action(
+ client, stream_name, action='start_encryption', encryption_type=encryption_type, key_id=key_id, check_mode=check_mode
+ )
+ )
+ if success:
+ changed = True
+ if wait:
+ success, err_msg, results = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ err_msg = 'Kinesis Stream {0} encryption started successfully.'.format(stream_name)
+ if not success:
+ return success, True, err_msg, results
+ else:
+ err_msg = (
+ 'Kinesis Stream {0} is in the process of starting encryption.'.format(stream_name)
+ )
+ else:
+ success = True
+ changed = False
+ err_msg = 'Kinesis Stream {0} does not exist'.format(stream_name)
+
+ if success:
+ stream_found, stream_msg, results = (
+ find_stream(client, stream_name)
+ )
+ tag_success, tag_msg, current_tags = (
+ get_tags(client, stream_name)
+ )
+ if not current_tags:
+ current_tags = dict()
+
+ results = camel_dict_to_snake_dict(results)
+ results['tags'] = current_tags
+
+ return success, changed, err_msg, results
+
+
+def stop_stream_encryption(client, stream_name, encryption_type='', key_id='',
+ wait=True, wait_timeout=300, check_mode=False):
+ """Stop encryption on an Amazon Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ encryption_type (str): KMS or NONE
+ key_id (str): KMS key GUID or alias
+ wait (bool): Wait until Stream is ACTIVE.
+ default=False
+ wait_timeout (int): How long to wait until this operation is considered failed.
+ default=300
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> stop_stream_encryption(client, stream_name,encryption_type, key_id)
+
+ Returns:
+ Tuple (bool, bool, str, dict)
+ """
+ success = False
+ changed = False
+ err_msg = ''
+ params = {
+ 'StreamName': stream_name
+ }
+
+ results = dict()
+ stream_found, stream_msg, current_stream = (
+ find_stream(client, stream_name)
+ )
+ if stream_found:
+ if current_stream.get('EncryptionType') == 'KMS':
+ success, err_msg = (
+ stream_encryption_action(
+ client, stream_name, action='stop_encryption', key_id=key_id, encryption_type=encryption_type, check_mode=check_mode
+ )
+ )
+ changed = success
+ if wait:
+ success, err_msg, results = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ if not success:
+ return success, True, err_msg, results
+ err_msg = 'Kinesis Stream {0} encryption stopped successfully.'.format(stream_name)
+ else:
+ err_msg = (
+ 'Stream {0} is in the process of stopping encryption.'.format(stream_name)
+ )
+ elif current_stream.get('EncryptionType') == 'NONE':
+ success = True
+ err_msg = 'Kinesis Stream {0} encryption already stopped.'.format(stream_name)
+ else:
+ success = True
+ changed = False
+ err_msg = 'Stream {0} does not exist.'.format(stream_name)
+
+ if success:
+ stream_found, stream_msg, results = (
+ find_stream(client, stream_name)
+ )
+ tag_success, tag_msg, current_tags = (
+ get_tags(client, stream_name)
+ )
+ if not current_tags:
+ current_tags = dict()
+
+ results = camel_dict_to_snake_dict(results)
+ results['tags'] = current_tags
+
+ return success, changed, err_msg, results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ shards=dict(default=None, required=False, type='int'),
+ retention_period=dict(default=None, required=False, type='int'),
+ tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']),
+ wait=dict(default=True, required=False, type='bool'),
+ wait_timeout=dict(default=300, required=False, type='int'),
+ state=dict(default='present', choices=['present', 'absent']),
+ encryption_type=dict(required=False, choices=['NONE', 'KMS']),
+ key_id=dict(required=False, type='str'),
+ encryption_state=dict(required=False, choices=['enabled', 'disabled']),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ retention_period = module.params.get('retention_period')
+ stream_name = module.params.get('name')
+ shards = module.params.get('shards')
+ state = module.params.get('state')
+ tags = module.params.get('tags')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ encryption_type = module.params.get('encryption_type')
+ key_id = module.params.get('key_id')
+ encryption_state = module.params.get('encryption_state')
+
+ if state == 'present' and not shards:
+ module.fail_json(msg='Shards is required when state == present.')
+
+ if retention_period:
+ if retention_period < 24:
+ module.fail_json(msg='Retention period can not be less than 24 hours.')
+
+ check_mode = module.check_mode
+ try:
+ client = module.client('kinesis')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ if state == 'present':
+ success, changed, err_msg, results = (
+ create_stream(
+ client, stream_name, shards, retention_period, tags,
+ wait, wait_timeout, check_mode
+ )
+ )
+ if encryption_state == 'enabled':
+ success, changed, err_msg, results = (
+ start_stream_encryption(
+ client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode
+ )
+ )
+ elif encryption_state == 'disabled':
+ success, changed, err_msg, results = (
+ stop_stream_encryption(
+ client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode
+ )
+ )
+ elif state == 'absent':
+ success, changed, err_msg, results = (
+ delete_stream(client, stream_name, wait, wait_timeout, check_mode)
+ )
+
+ if success:
+ module.exit_json(
+ success=success, changed=changed, msg=err_msg, **results
+ )
+ else:
+ module.fail_json(
+ success=success, changed=changed, msg=err_msg, result=results
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/lightsail.py b/ansible_collections/community/aws/plugins/modules/lightsail.py
new file mode 100644
index 000000000..5e4035154
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/lightsail.py
@@ -0,0 +1,340 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lightsail
+version_added: 1.0.0
+short_description: Manage instances in AWS Lightsail
+description:
+ - Manage instances in AWS Lightsail.
+ - Instance tagging is not yet supported in this module.
+author:
+ - "Nick Ball (@nickball)"
+ - "Prasad Katti (@prasadkatti)"
+options:
+ state:
+ description:
+ - Indicate desired state of the target.
+ - I(rebooted) and I(restarted) are aliases.
+ default: present
+ choices: ['present', 'absent', 'running', 'restarted', 'rebooted', 'stopped']
+ type: str
+ name:
+ description: Name of the instance.
+ required: true
+ type: str
+ zone:
+ description:
+ - AWS availability zone in which to launch the instance.
+ - Required when I(state=present)
+ type: str
+ blueprint_id:
+ description:
+ - ID of the instance blueprint image.
+ - Required when I(state=present)
+ type: str
+ bundle_id:
+ description:
+ - Bundle of specification info for the instance.
+ - Required when I(state=present).
+ type: str
+ user_data:
+ description:
+ - Launch script that can configure the instance with additional data.
+ type: str
+ default: ''
+ key_pair_name:
+ description:
+ - Name of the key pair to use with the instance.
+ - If I(state=present) and a key_pair_name is not provided, the default keypair from the region will be used.
+ type: str
+ wait:
+ description:
+ - Wait for the instance to be in state 'running' before returning.
+ - If I(wait=false) an ip_address may not be returned.
+ - Has no effect when I(state=rebooted) or I(state=absent).
+ type: bool
+ default: true
+ wait_timeout:
+ description:
+ - How long before I(wait) gives up, in seconds.
+ default: 300
+ type: int
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+
+EXAMPLES = '''
+- name: Create a new Lightsail instance
+ community.aws.lightsail:
+ state: present
+ name: my_instance
+ region: us-east-1
+ zone: us-east-1a
+ blueprint_id: ubuntu_16_04
+ bundle_id: nano_1_0
+ key_pair_name: id_rsa
+ user_data: " echo 'hello world' > /home/ubuntu/test.txt"
+ register: my_instance
+
+- name: Delete an instance
+ community.aws.lightsail:
+ state: absent
+ region: us-east-1
+ name: my_instance
+
+'''
+
+RETURN = '''
+changed:
+ description: if a snapshot has been modified/created
+ returned: always
+ type: bool
+ sample:
+ changed: true
+instance:
+ description: instance data
+ returned: always
+ type: dict
+ sample:
+ arn: "arn:aws:lightsail:us-east-1:123456789012:Instance/1fef0175-d6c8-480e-84fa-214f969cda87"
+ blueprint_id: "ubuntu_16_04"
+ blueprint_name: "Ubuntu"
+ bundle_id: "nano_1_0"
+ created_at: "2017-03-27T08:38:59.714000-04:00"
+ hardware:
+ cpu_count: 1
+ ram_size_in_gb: 0.5
+ is_static_ip: false
+ location:
+ availability_zone: "us-east-1a"
+ region_name: "us-east-1"
+ name: "my_instance"
+ networking:
+ monthly_transfer:
+ gb_per_month_allocated: 1024
+ ports:
+ - access_direction: "inbound"
+ access_from: "Anywhere (0.0.0.0/0)"
+ access_type: "public"
+ common_name: ""
+ from_port: 80
+ protocol: tcp
+ to_port: 80
+ - access_direction: "inbound"
+ access_from: "Anywhere (0.0.0.0/0)"
+ access_type: "public"
+ common_name: ""
+ from_port: 22
+ protocol: tcp
+ to_port: 22
+ private_ip_address: "172.26.8.14"
+ public_ip_address: "34.207.152.202"
+ resource_type: "Instance"
+ ssh_key_name: "keypair"
+ state:
+ code: 16
+ name: running
+ support_code: "123456789012/i-0997c97831ee21e33"
+ username: "ubuntu"
+'''
+
+import time
+
+try:
+ import botocore
+except ImportError:
+ # will be caught by AnsibleAWSModule
+ pass
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+
+
+def find_instance_info(module, client, instance_name, fail_if_not_found=False):
+
+ try:
+ res = client.get_instance(instanceName=instance_name)
+ except is_boto3_error_code('NotFoundException') as e:
+ if fail_if_not_found:
+ module.fail_json_aws(e)
+ return None
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+ return res['instance']
+
+
+def wait_for_instance_state(module, client, instance_name, states):
+ """
+ `states` is a list of instance states that we are waiting for.
+ """
+
+ wait_timeout = module.params.get('wait_timeout')
+ wait_max = time.time() + wait_timeout
+ while wait_max > time.time():
+ try:
+ instance = find_instance_info(module, client, instance_name)
+ if instance['state']['name'] in states:
+ break
+ time.sleep(5)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+ else:
+ module.fail_json(msg='Timed out waiting for instance "{0}" to get to one of the following states -'
+ ' {1}'.format(instance_name, states))
+
+
+def create_instance(module, client, instance_name):
+
+ inst = find_instance_info(module, client, instance_name)
+ if inst:
+ module.exit_json(changed=False, instance=camel_dict_to_snake_dict(inst))
+ else:
+ create_params = {'instanceNames': [instance_name],
+ 'availabilityZone': module.params.get('zone'),
+ 'blueprintId': module.params.get('blueprint_id'),
+ 'bundleId': module.params.get('bundle_id'),
+ 'userData': module.params.get('user_data')}
+
+ key_pair_name = module.params.get('key_pair_name')
+ if key_pair_name:
+ create_params['keyPairName'] = key_pair_name
+
+ try:
+ client.create_instances(**create_params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+ wait = module.params.get('wait')
+ if wait:
+ desired_states = ['running']
+ wait_for_instance_state(module, client, instance_name, desired_states)
+ inst = find_instance_info(module, client, instance_name, fail_if_not_found=True)
+
+ module.exit_json(changed=True, instance=camel_dict_to_snake_dict(inst))
+
+
+def delete_instance(module, client, instance_name):
+
+ changed = False
+
+ inst = find_instance_info(module, client, instance_name)
+ if inst is None:
+ module.exit_json(changed=changed, instance={})
+
+ # Wait for instance to exit transition state before deleting
+ desired_states = ['running', 'stopped']
+ wait_for_instance_state(module, client, instance_name, desired_states)
+
+ try:
+ client.delete_instance(instanceName=instance_name)
+ changed = True
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+ module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(inst))
+
+
+def restart_instance(module, client, instance_name):
+ """
+ Reboot an existing instance
+ Wait will not apply here as this is an OS-level operation
+ """
+
+ changed = False
+
+ inst = find_instance_info(module, client, instance_name, fail_if_not_found=True)
+
+ try:
+ client.reboot_instance(instanceName=instance_name)
+ changed = True
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+ module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(inst))
+
+
+def start_or_stop_instance(module, client, instance_name, state):
+ """
+ Start or stop an existing instance
+ """
+
+ changed = False
+
+ inst = find_instance_info(module, client, instance_name, fail_if_not_found=True)
+
+ # Wait for instance to exit transition state before state change
+ desired_states = ['running', 'stopped']
+ wait_for_instance_state(module, client, instance_name, desired_states)
+
+ # Try state change
+ if inst and inst['state']['name'] != state:
+ try:
+ if state == 'running':
+ client.start_instance(instanceName=instance_name)
+ else:
+ client.stop_instance(instanceName=instance_name)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+ changed = True
+ # Grab current instance info
+ inst = find_instance_info(module, client, instance_name)
+
+ wait = module.params.get('wait')
+ if wait:
+ desired_states = [state]
+ wait_for_instance_state(module, client, instance_name, desired_states)
+ inst = find_instance_info(module, client, instance_name, fail_if_not_found=True)
+
+ module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(inst))
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'stopped', 'running', 'restarted',
+ 'rebooted']),
+ zone=dict(type='str'),
+ blueprint_id=dict(type='str'),
+ bundle_id=dict(type='str'),
+ key_pair_name=dict(type='str'),
+ user_data=dict(type='str', default=''),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(default=300, type='int'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[['state', 'present', ('zone', 'blueprint_id', 'bundle_id')]])
+
+ client = module.client('lightsail')
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+
+ if state == 'present':
+ create_instance(module, client, name)
+ elif state == 'absent':
+ delete_instance(module, client, name)
+ elif state in ('running', 'stopped'):
+ start_or_stop_instance(module, client, name, state)
+ elif state in ('restarted', 'rebooted'):
+ restart_instance(module, client, name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/lightsail_static_ip.py b/ansible_collections/community/aws/plugins/modules/lightsail_static_ip.py
new file mode 100644
index 000000000..799ff629d
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/lightsail_static_ip.py
@@ -0,0 +1,149 @@
+#!/usr/bin/python
+
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lightsail_static_ip
+version_added: 4.1.0
+short_description: Manage static IP addresses in AWS Lightsail
+description:
+ - Manage static IP addresses in AWS Lightsail.
+author:
+ - "Daniel Cotton (@danielcotton)"
+options:
+ state:
+ description:
+ - Describes the desired state.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ name:
+ description: Name of the static IP.
+ required: true
+ type: str
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+
+EXAMPLES = '''
+- name: Provision a Lightsail static IP
+ community.aws.lightsail_static_ip:
+ state: present
+ name: my_static_ip
+ register: my_ip
+
+- name: Remove a static IP
+ community.aws.lightsail_static_ip:
+ state: absent
+ name: my_static_ip
+'''
+
+RETURN = '''
+static_ip:
+ description: static_ipinstance data
+ returned: always
+ type: dict
+ sample:
+ arn: "arn:aws:lightsail:ap-southeast-2:123456789012:StaticIp/d8f47672-c261-4443-a484-4a2ec983db9a"
+ created_at: "2021-02-28T00:04:05.202000+10:30"
+ ip_address: "192.0.2.5"
+ is_attached: false
+ location:
+ availability_zone: all
+ region_name: ap-southeast-2
+ name: "static_ip"
+ resource_type: StaticIp
+ support_code: "123456789012/192.0.2.5"
+'''
+
+try:
+ import botocore
+except ImportError:
+ # will be caught by AnsibleAWSModule
+ pass
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+
+
+def find_static_ip_info(module, client, static_ip_name, fail_if_not_found=False):
+
+ try:
+ res = client.get_static_ip(staticIpName=static_ip_name)
+ except is_boto3_error_code('NotFoundException') as e:
+ if fail_if_not_found:
+ module.fail_json_aws(e)
+ return None
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+ return res['staticIp']
+
+
+def create_static_ip(module, client, static_ip_name):
+
+ inst = find_static_ip_info(module, client, static_ip_name)
+ if inst:
+ module.exit_json(changed=False, static_ip=camel_dict_to_snake_dict(inst))
+ else:
+ create_params = {'staticIpName': static_ip_name}
+
+ try:
+ client.allocate_static_ip(**create_params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+ inst = find_static_ip_info(module, client, static_ip_name, fail_if_not_found=True)
+
+ module.exit_json(changed=True, static_ip=camel_dict_to_snake_dict(inst))
+
+
+def delete_static_ip(module, client, static_ip_name):
+
+ inst = find_static_ip_info(module, client, static_ip_name)
+ if inst is None:
+ module.exit_json(changed=False, static_ip={})
+
+ changed = False
+ try:
+ client.release_static_ip(staticIpName=static_ip_name)
+ changed = True
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+ module.exit_json(changed=changed, static_ip=camel_dict_to_snake_dict(inst))
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ client = module.client('lightsail')
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+
+ if state == 'present':
+ create_static_ip(module, client, name)
+ elif state == 'absent':
+ delete_static_ip(module, client, name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/msk_cluster.py b/ansible_collections/community/aws/plugins/modules/msk_cluster.py
new file mode 100644
index 000000000..75c7fa829
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/msk_cluster.py
@@ -0,0 +1,848 @@
+#!/usr/bin/python
+# Copyright: (c) 2021, Daniil Kupchenko (@oukooveu)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r"""
+---
+module: msk_cluster
+short_description: Manage Amazon MSK clusters
+version_added: "2.0.0"
+description:
+ - Create, delete and modify Amazon MSK (Managed Streaming for Apache Kafka) clusters.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_msk_cluster).
+ The usage did not change.
+author:
+ - Daniil Kupchenko (@oukooveu)
+options:
+ state:
+ description: Create (C(present)) or delete (C(absent)) cluster.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+ name:
+ description: The name of the cluster.
+ required: true
+ type: str
+ version:
+ description:
+ - The version of Apache Kafka.
+ - This version should exist in given configuration.
+ - This parameter is required when I(state=present).
+ type: str
+ configuration_arn:
+ description:
+ - ARN of the configuration to use.
+ - This parameter is required when I(state=present).
+ type: str
+ configuration_revision:
+ description:
+ - The revision of the configuration to use.
+ - This parameter is required when I(state=present).
+ type: int
+ nodes:
+ description: The number of broker nodes in the cluster. Should be greater or equal to two.
+ type: int
+ default: 3
+ instance_type:
+ description:
+ - The type of Amazon EC2 instances to use for Kafka brokers.
+ choices:
+ - kafka.t3.small
+ - kafka.m5.large
+ - kafka.m5.xlarge
+ - kafka.m5.2xlarge
+ - kafka.m5.4xlarge
+ default: kafka.t3.small
+ type: str
+ ebs_volume_size:
+ description: The size in GiB of the EBS volume for the data drive on each broker node.
+ type: int
+ default: 100
+ subnets:
+ description:
+ - The list of subnets to connect to in the client virtual private cloud (VPC).
+ AWS creates elastic network interfaces inside these subnets. Client applications use
+ elastic network interfaces to produce and consume data.
+ - Client subnets can't be in Availability Zone us-east-1e.
+ - This parameter is required when I(state=present).
+ type: list
+ elements: str
+ security_groups:
+ description:
+ - The AWS security groups to associate with the elastic network interfaces in order to specify
+ who can connect to and communicate with the Amazon MSK cluster.
+ If you don't specify a security group, Amazon MSK uses the default security group associated with the VPC.
+ type: list
+ elements: str
+ encryption:
+ description:
+ - Includes all encryption-related information.
+ - Effective only for new cluster and can not be updated.
+ type: dict
+ suboptions:
+ kms_key_id:
+ description:
+ - The ARN of the AWS KMS key for encrypting data at rest. If you don't specify a KMS key, MSK creates one for you and uses it.
+ default: Null
+ type: str
+ in_transit:
+ description: The details for encryption in transit.
+ type: dict
+ suboptions:
+ in_cluster:
+ description:
+ - When set to true, it indicates that data communication among the broker nodes of the cluster is encrypted.
+ When set to false, the communication happens in plaintext.
+ type: bool
+ default: True
+ client_broker:
+ description:
+ - Indicates the encryption setting for data in transit between clients and brokers. The following are the possible values.
+ TLS means that client-broker communication is enabled with TLS only.
+ TLS_PLAINTEXT means that client-broker communication is enabled for both TLS-encrypted, as well as plaintext data.
+ PLAINTEXT means that client-broker communication is enabled in plaintext only.
+ choices:
+ - TLS
+ - TLS_PLAINTEXT
+ - PLAINTEXT
+ type: str
+ default: TLS
+ authentication:
+ description:
+ - Includes all client authentication related information.
+ - Effective only for new cluster and can not be updated.
+ type: dict
+ suboptions:
+ tls_ca_arn:
+ description: List of ACM Certificate Authority ARNs.
+ type: list
+ elements: str
+ sasl_scram:
+ description: SASL/SCRAM authentication is enabled or not.
+ type: bool
+ sasl_iam:
+ version_added: 5.5.0
+ description: IAM authentication is enabled or not.
+ type: bool
+ unauthenticated:
+ version_added: 5.5.0
+ description: Option to explicitly turn on or off authentication
+ type: bool
+ default: True
+ enhanced_monitoring:
+ description: Specifies the level of monitoring for the MSK cluster.
+ choices:
+ - DEFAULT
+ - PER_BROKER
+ - PER_TOPIC_PER_BROKER
+ - PER_TOPIC_PER_PARTITION
+ default: DEFAULT
+ type: str
+ open_monitoring:
+ description: The settings for open monitoring.
+ type: dict
+ suboptions:
+ jmx_exporter:
+ description: Indicates whether you want to enable or disable the JMX Exporter.
+ type: bool
+ default: False
+ node_exporter:
+ description: Indicates whether you want to enable or disable the Node Exporter.
+ type: bool
+ default: False
+ logging:
+ description: Logging configuration.
+ type: dict
+ suboptions:
+ cloudwatch:
+ description: Details of the CloudWatch Logs destination for broker logs.
+ type: dict
+ suboptions:
+ enabled:
+ description: Specifies whether broker logs get sent to the specified CloudWatch Logs destination.
+ type: bool
+ default: False
+ log_group:
+ description: The CloudWatch log group that is the destination for broker logs.
+ type: str
+ required: False
+ firehose:
+ description: Details of the Kinesis Data Firehose delivery stream that is the destination for broker logs.
+ type: dict
+ suboptions:
+ enabled:
+ description: Specifies whether broker logs get send to the specified Kinesis Data Firehose delivery stream.
+ type: bool
+ default: False
+ delivery_stream:
+ description: The Kinesis Data Firehose delivery stream that is the destination for broker logs.
+ type: str
+ required: False
+ s3:
+ description: Details of the Amazon S3 destination for broker logs.
+ type: dict
+ suboptions:
+ enabled:
+ description: Specifies whether broker logs get sent to the specified Amazon S3 destination.
+ type: bool
+ default: False
+ bucket:
+ description: The name of the S3 bucket that is the destination for broker logs.
+ type: str
+ required: False
+ prefix:
+ description: The S3 prefix that is the destination for broker logs.
+ type: str
+ required: False
+ wait:
+ description: Whether to wait for the cluster to be available or deleted.
+ type: bool
+ default: false
+ wait_timeout:
+ description: How many seconds to wait. Cluster creation can take up to 20-30 minutes.
+ type: int
+ default: 3600
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+ - amazon.aws.tags
+notes:
+ - All operations are time consuming, for example create takes 20-30 minutes,
+ update kafka version -- more than one hour, update configuration -- 10-15 minutes;
+ - Cluster's brokers get evenly distributed over a number of availability zones
+ that's equal to the number of subnets.
+"""
+
+EXAMPLES = r"""
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- community.aws.msk_cluster:
+ name: kafka-cluster
+ state: present
+ version: 2.6.1
+ nodes: 6
+ ebs_volume_size: "{{ aws_msk_options.ebs_volume_size }}"
+ subnets:
+ - subnet-e3b48ce7c25861eeb
+ - subnet-2990c8b25b07ddd43
+ - subnet-d9fbeaf46c54bfab6
+ wait: true
+ wait_timeout: 1800
+ configuration_arn: arn:aws:kafka:us-east-1:123456789012:configuration/kafka-cluster-configuration/aaaaaaaa-bbbb-4444-3333-ccccccccc-1
+ configuration_revision: 1
+
+- community.aws.msk_cluster:
+ name: kafka-cluster
+ state: absent
+"""
+
+RETURN = r"""
+# These are examples of possible return values, and in general should use other names for return values.
+
+bootstrap_broker_string:
+ description: A list of brokers that a client application can use to bootstrap.
+ type: complex
+ contains:
+ plain:
+ description: A string containing one or more hostname:port pairs.
+ type: str
+ tls:
+ description: A string containing one or more DNS names (or IP) and TLS port pairs.
+ type: str
+ returned: I(state=present) and cluster state is I(ACTIVE)
+cluster_info:
+ description: Description of the MSK cluster.
+ type: dict
+ returned: I(state=present)
+response:
+ description: The response from actual API call.
+ type: dict
+ returned: always
+ sample: {}
+"""
+
+import time
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (
+ camel_dict_to_snake_dict,
+ compare_aws_tags,
+ AWSRetry,
+)
+
+
+@AWSRetry.jittered_backoff(retries=5, delay=5)
+def list_clusters_with_backoff(client, cluster_name):
+ paginator = client.get_paginator("list_clusters")
+ return paginator.paginate(ClusterNameFilter=cluster_name).build_full_result()
+
+
+@AWSRetry.jittered_backoff(retries=5, delay=5)
+def list_nodes_with_backoff(client, cluster_arn):
+ paginator = client.get_paginator("list_nodes")
+ return paginator.paginate(ClusterArn=cluster_arn).build_full_result()
+
+
+def find_cluster_by_name(client, module, cluster_name):
+ try:
+ cluster_list = list_clusters_with_backoff(client, cluster_name).get("ClusterInfoList", [])
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e:
+ module.fail_json_aws(e, "Failed to find kafka cluster by name")
+ if cluster_list:
+ if len(cluster_list) != 1:
+ module.fail_json(msg="Found more than one cluster with name '{0}'".format(cluster_name))
+ return cluster_list[0]
+ return {}
+
+
+def get_cluster_state(client, module, arn):
+ try:
+ response = client.describe_cluster(ClusterArn=arn, aws_retry=True)
+ except client.exceptions.NotFoundException:
+ return "DELETED"
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e:
+ module.fail_json_aws(e, "Failed to get kafka cluster state")
+ return response["ClusterInfo"]["State"]
+
+
+def get_cluster_version(client, module, arn):
+ try:
+ response = client.describe_cluster(ClusterArn=arn, aws_retry=True)
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e:
+ module.fail_json_aws(e, "Failed to get kafka cluster version")
+ return response["ClusterInfo"]["CurrentVersion"]
+
+
+def wait_for_cluster_state(client, module, arn, state="ACTIVE"):
+ # As of 2021-06 boto3 doesn't offer any built in waiters
+ start = time.time()
+ timeout = int(module.params.get("wait_timeout"))
+ check_interval = 60
+ while True:
+ current_state = get_cluster_state(client, module, arn)
+ if current_state == state:
+ return
+ if time.time() - start > timeout:
+ module.fail_json(
+ msg="Timeout waiting for cluster {0} (desired state is '{1}')".format(
+ current_state, state
+ )
+ )
+ time.sleep(check_interval)
+
+
+def prepare_create_options(module):
+ """
+ Return data structure for cluster create operation
+ """
+
+ c_params = {
+ "ClusterName": module.params["name"],
+ "KafkaVersion": module.params["version"],
+ "ConfigurationInfo": {
+ "Arn": module.params["configuration_arn"],
+ "Revision": module.params["configuration_revision"],
+ },
+ "NumberOfBrokerNodes": module.params["nodes"],
+ "BrokerNodeGroupInfo": {
+ "ClientSubnets": module.params["subnets"],
+ "InstanceType": module.params["instance_type"],
+ }
+ }
+
+ if module.params["security_groups"] and len(module.params["security_groups"]) != 0:
+ c_params["BrokerNodeGroupInfo"]["SecurityGroups"] = module.params.get("security_groups")
+
+ if module.params["ebs_volume_size"]:
+ c_params["BrokerNodeGroupInfo"]["StorageInfo"] = {
+ "EbsStorageInfo": {
+ "VolumeSize": module.params.get("ebs_volume_size")
+ }
+ }
+
+ if module.params["encryption"]:
+ c_params["EncryptionInfo"] = {}
+ if module.params["encryption"].get("kms_key_id"):
+ c_params["EncryptionInfo"]["EncryptionAtRest"] = {
+ "DataVolumeKMSKeyId": module.params["encryption"]["kms_key_id"]
+ }
+ c_params["EncryptionInfo"]["EncryptionInTransit"] = {
+ "ClientBroker": module.params["encryption"]["in_transit"].get("client_broker", "TLS"),
+ "InCluster": module.params["encryption"]["in_transit"].get("in_cluster", True)
+ }
+
+ if module.params["authentication"]:
+ c_params["ClientAuthentication"] = {}
+ if module.params["authentication"].get("sasl_scram") or module.params["authentication"].get("sasl_iam"):
+ sasl = {}
+ if module.params["authentication"].get("sasl_scram"):
+ sasl["Scram"] = {"Enabled": True}
+ if module.params["authentication"].get("sasl_iam"):
+ sasl["Iam"] = {"Enabled": True}
+ c_params["ClientAuthentication"]["Sasl"] = sasl
+ if module.params["authentication"].get("tls_ca_arn"):
+ c_params["ClientAuthentication"]["Tls"] = {
+ "CertificateAuthorityArnList": module.params["authentication"]["tls_ca_arn"],
+ "Enabled": True,
+ }
+ if module.params["authentication"].get("unauthenticated"):
+ c_params["ClientAuthentication"] = {
+ "Unauthenticated": {"Enabled": True},
+ }
+
+ c_params.update(prepare_enhanced_monitoring_options(module))
+ c_params.update(prepare_open_monitoring_options(module))
+ c_params.update(prepare_logging_options(module))
+
+ return c_params
+
+
+def prepare_enhanced_monitoring_options(module):
+ m_params = {}
+ m_params["EnhancedMonitoring"] = module.params["enhanced_monitoring"] or "DEFAULT"
+ return m_params
+
+
+def prepare_open_monitoring_options(module):
+ m_params = {}
+ open_monitoring = module.params["open_monitoring"] or {}
+ m_params["OpenMonitoring"] = {
+ "Prometheus": {
+ "JmxExporter": {
+ "EnabledInBroker": open_monitoring.get("jmx_exporter", False)
+ },
+ "NodeExporter": {
+ "EnabledInBroker": open_monitoring.get("node_exporter", False)
+ }
+ }
+ }
+ return m_params
+
+
+def prepare_logging_options(module):
+ l_params = {}
+ logging = module.params["logging"] or {}
+ if logging.get("cloudwatch"):
+ l_params["CloudWatchLogs"] = {
+ "Enabled": module.params["logging"]["cloudwatch"].get("enabled"),
+ "LogGroup": module.params["logging"]["cloudwatch"].get("log_group")
+ }
+ else:
+ l_params["CloudWatchLogs"] = {
+ "Enabled": False
+ }
+ if logging.get("firehose"):
+ l_params["Firehose"] = {
+ "Enabled": module.params["logging"]["firehose"].get("enabled"),
+ "DeliveryStream": module.params["logging"]["firehose"].get("delivery_stream")
+ }
+ else:
+ l_params["Firehose"] = {
+ "Enabled": False
+ }
+ if logging.get("s3"):
+ l_params["S3"] = {
+ "Enabled": module.params["logging"]["s3"].get("enabled"),
+ "Bucket": module.params["logging"]["s3"].get("bucket"),
+ "Prefix": module.params["logging"]["s3"].get("prefix")
+ }
+ else:
+ l_params["S3"] = {
+ "Enabled": False
+ }
+ return {
+ "LoggingInfo": {
+ "BrokerLogs": l_params
+ }
+ }
+
+
+def create_or_update_cluster(client, module):
+ """
+ Create new or update existing cluster
+ """
+
+ changed = False
+ response = {}
+
+ cluster = find_cluster_by_name(client, module, module.params["name"])
+
+ if not cluster:
+
+ changed = True
+
+ if module.check_mode:
+ return True, {}
+
+ create_params = prepare_create_options(module)
+
+ try:
+ response = client.create_cluster(aws_retry=True, **create_params)
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e:
+ module.fail_json_aws(e, "Failed to create kafka cluster")
+
+ if module.params.get("wait"):
+ wait_for_cluster_state(client, module, arn=response["ClusterArn"], state="ACTIVE")
+
+ else:
+
+ response["ClusterArn"] = cluster["ClusterArn"]
+ response["changes"] = {}
+
+ # prepare available update methods definitions with current/target values and options
+ msk_cluster_changes = {
+ "broker_count": {
+ "current_value": cluster["NumberOfBrokerNodes"],
+ "target_value": module.params.get("nodes"),
+ "update_params": {
+ "TargetNumberOfBrokerNodes": module.params.get("nodes")
+ }
+ },
+ "broker_storage": {
+ "current_value": cluster["BrokerNodeGroupInfo"]["StorageInfo"]["EbsStorageInfo"]["VolumeSize"],
+ "target_value": module.params.get("ebs_volume_size"),
+ "update_params": {
+ "TargetBrokerEBSVolumeInfo": [
+ {"KafkaBrokerNodeId": "All", "VolumeSizeGB": module.params.get("ebs_volume_size")}
+ ]
+ }
+ },
+ "broker_type": {
+ "current_value": cluster["BrokerNodeGroupInfo"]["InstanceType"],
+ "target_value": module.params.get("instance_type"),
+ "update_params": {
+ "TargetInstanceType": module.params.get("instance_type")
+ }
+ },
+ "cluster_configuration": {
+ "current_value": {
+ "arn": cluster["CurrentBrokerSoftwareInfo"]["ConfigurationArn"],
+ "revision": cluster["CurrentBrokerSoftwareInfo"]["ConfigurationRevision"],
+ },
+ "target_value": {
+ "arn": module.params.get("configuration_arn"),
+ "revision": module.params.get("configuration_revision"),
+ },
+ "update_params": {
+ "ConfigurationInfo": {
+ "Arn": module.params.get("configuration_arn"),
+ "Revision": module.params.get("configuration_revision")
+ }
+ }
+ },
+ "cluster_kafka_version": {
+ "current_value": cluster["CurrentBrokerSoftwareInfo"]["KafkaVersion"],
+ "target_value": module.params.get("version"),
+ "update_params": {
+ "TargetKafkaVersion": module.params.get("version")
+ }
+ },
+ "enhanced_monitoring": {
+ "current_value": cluster["EnhancedMonitoring"],
+ "target_value": module.params.get("enhanced_monitoring"),
+ "update_method": "update_monitoring",
+ "update_params": prepare_enhanced_monitoring_options(module)
+ },
+ "open_monitoring": {
+ "current_value": {
+ "OpenMonitoring": cluster["OpenMonitoring"]
+ },
+ "target_value": prepare_open_monitoring_options(module),
+ "update_method": "update_monitoring",
+ "update_params": prepare_open_monitoring_options(module)
+ },
+ "logging": {
+ "current_value": {
+ "LoggingInfo": cluster["LoggingInfo"]
+ },
+ "target_value": prepare_logging_options(module),
+ "update_method": "update_monitoring",
+ "update_params": prepare_logging_options(module)
+ }
+ }
+
+ for method, options in msk_cluster_changes.items():
+
+ if 'botocore_version' in options:
+ if not module.botocore_at_least(options["botocore_version"]):
+ continue
+
+ try:
+ update_method = getattr(client, options.get("update_method", "update_" + method))
+ except AttributeError as e:
+ module.fail_json_aws(e, "There is no update method 'update_{0}'".format(method))
+
+ if options["current_value"] != options["target_value"]:
+ changed = True
+ if module.check_mode:
+ return True, {}
+
+ # need to get cluster version and check for the state because
+ # there can be several updates requested but only one in time can be performed
+ version = get_cluster_version(client, module, cluster["ClusterArn"])
+ state = get_cluster_state(client, module, cluster["ClusterArn"])
+ if state != "ACTIVE":
+ if module.params["wait"]:
+ wait_for_cluster_state(client, module, arn=cluster["ClusterArn"], state="ACTIVE")
+ else:
+ module.fail_json(
+ msg="Cluster can be updated only in active state, current state is '{0}'. check cluster state or use wait option".format(
+ state
+ )
+ )
+ try:
+ response["changes"][method] = update_method(
+ ClusterArn=cluster["ClusterArn"],
+ CurrentVersion=version,
+ **options["update_params"]
+ )
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e:
+ module.fail_json_aws(
+ e, "Failed to update cluster via 'update_{0}'".format(method)
+ )
+
+ if module.params["wait"]:
+ wait_for_cluster_state(client, module, arn=cluster["ClusterArn"], state="ACTIVE")
+
+ changed |= update_cluster_tags(client, module, response["ClusterArn"])
+
+ return changed, response
+
+
+def update_cluster_tags(client, module, arn):
+ new_tags = module.params.get('tags')
+ if new_tags is None:
+ return False
+ purge_tags = module.params.get('purge_tags')
+
+ try:
+ existing_tags = client.list_tags_for_resource(ResourceArn=arn, aws_retry=True)['Tags']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to retrieve tags for cluster '{0}'".format(arn))
+
+ tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags)
+
+ if not module.check_mode:
+ try:
+ if tags_to_remove:
+ client.untag_resource(ResourceArn=arn, TagKeys=tags_to_remove, aws_retry=True)
+ if tags_to_add:
+ client.tag_resource(ResourceArn=arn, Tags=tags_to_add, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to set tags for cluster '{0}'".format(arn))
+
+ changed = bool(tags_to_add) or bool(tags_to_remove)
+ return changed
+
+
+def delete_cluster(client, module):
+
+ cluster = find_cluster_by_name(client, module, module.params["name"])
+
+ if module.check_mode:
+ if cluster:
+ return True, cluster
+ else:
+ return False, {}
+
+ if not cluster:
+ return False, {}
+
+ try:
+ response = client.delete_cluster(
+ ClusterArn=cluster["ClusterArn"],
+ CurrentVersion=cluster["CurrentVersion"],
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, "Failed to delete kafka cluster")
+
+ if module.params["wait"]:
+ wait_for_cluster_state(client, module, arn=cluster["ClusterArn"], state="DELETED")
+
+ response["bootstrap_broker_string"] = {}
+
+ return True, response
+
+
+def main():
+
+ module_args = dict(
+ name=dict(type="str", required=True),
+ state=dict(type="str", choices=["present", "absent"], default="present"),
+ version=dict(type="str"),
+ configuration_arn=dict(type="str"),
+ configuration_revision=dict(type="int"),
+ nodes=dict(type="int", default=3),
+ instance_type=dict(
+ choices=[
+ "kafka.t3.small",
+ "kafka.m5.large",
+ "kafka.m5.xlarge",
+ "kafka.m5.2xlarge",
+ "kafka.m5.4xlarge",
+ ],
+ default="kafka.t3.small",
+ ),
+ ebs_volume_size=dict(type="int", default=100),
+ subnets=dict(type="list", elements="str"),
+ security_groups=dict(type="list", elements="str", required=False),
+ encryption=dict(
+ type="dict",
+ options=dict(
+ kms_key_id=dict(type="str", required=False),
+ in_transit=dict(
+ type="dict",
+ options=dict(
+ in_cluster=dict(type="bool", default=True),
+ client_broker=dict(
+ choices=["TLS", "TLS_PLAINTEXT", "PLAINTEXT"],
+ default="TLS"
+ ),
+ ),
+ ),
+ ),
+ ),
+ authentication=dict(
+ type="dict",
+ options=dict(
+ tls_ca_arn=dict(type="list", elements="str", required=False),
+ sasl_scram=dict(type="bool", required=False),
+ sasl_iam=dict(type="bool", required=False),
+ unauthenticated=dict(type="bool", default=True, required=False),
+ ),
+ ),
+ enhanced_monitoring=dict(
+ choices=[
+ "DEFAULT",
+ "PER_BROKER",
+ "PER_TOPIC_PER_BROKER",
+ "PER_TOPIC_PER_PARTITION",
+ ],
+ default="DEFAULT",
+ required=False,
+ ),
+ open_monitoring=dict(
+ type="dict",
+ options=dict(
+ jmx_exporter=dict(type="bool", default=False),
+ node_exporter=dict(type="bool", default=False),
+ ),
+ ),
+ logging=dict(
+ type="dict",
+ options=dict(
+ cloudwatch=dict(
+ type="dict",
+ options=dict(
+ enabled=dict(type="bool", default=False),
+ log_group=dict(type="str", required=False),
+ ),
+ ),
+ firehose=dict(
+ type="dict",
+ options=dict(
+ enabled=dict(type="bool", default=False),
+ delivery_stream=dict(type="str", required=False),
+ ),
+ ),
+ s3=dict(
+ type="dict",
+ options=dict(
+ enabled=dict(type="bool", default=False),
+ bucket=dict(type="str", required=False),
+ prefix=dict(type="str", required=False),
+ ),
+ ),
+ ),
+ ),
+ wait=dict(type="bool", default=False),
+ wait_timeout=dict(type="int", default=3600),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=module_args,
+ required_if=[['state', 'present', ['version', 'configuration_arn', 'configuration_revision', 'subnets']]],
+ supports_check_mode=True
+ )
+
+ client = module.client("kafka", retry_decorator=AWSRetry.jittered_backoff())
+
+ if module.params["state"] == "present":
+ if len(module.params["subnets"]) < 2:
+ module.fail_json(
+ msg="At least two client subnets should be provided"
+ )
+ if int(module.params["nodes"]) % int(len(module.params["subnets"])) != 0:
+ module.fail_json(
+ msg="The number of broker nodes must be a multiple of availability zones in the subnets parameter"
+ )
+ if len(module.params["name"]) > 64:
+ module.fail_json(
+ module.fail_json(msg='Cluster name "{0}" exceeds 64 character limit'.format(module.params["name"]))
+ )
+ changed, response = create_or_update_cluster(client, module)
+ elif module.params["state"] == "absent":
+ changed, response = delete_cluster(client, module)
+
+ cluster_info = {}
+ bootstrap_broker_string = {}
+ if response.get("ClusterArn") and module.params["state"] == "present":
+ try:
+ cluster_info = client.describe_cluster(ClusterArn=response["ClusterArn"], aws_retry=True)[
+ "ClusterInfo"
+ ]
+ if cluster_info.get("State") == "ACTIVE":
+ brokers = client.get_bootstrap_brokers(ClusterArn=response["ClusterArn"], aws_retry=True)
+ if brokers.get("BootstrapBrokerString"):
+ bootstrap_broker_string["plain"] = brokers["BootstrapBrokerString"]
+ if brokers.get("BootstrapBrokerStringTls"):
+ bootstrap_broker_string["tls"] = brokers["BootstrapBrokerStringTls"]
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e:
+ module.fail_json_aws(
+ e,
+ "Can not obtain information about cluster {0}".format(
+ response["ClusterArn"]
+ ),
+ )
+
+ module.exit_json(
+ changed=changed,
+ bootstrap_broker_string=bootstrap_broker_string,
+ cluster_info=camel_dict_to_snake_dict(cluster_info),
+ response=camel_dict_to_snake_dict(response),
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/msk_config.py b/ansible_collections/community/aws/plugins/modules/msk_config.py
new file mode 100644
index 000000000..812eba16d
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/msk_config.py
@@ -0,0 +1,313 @@
+#!/usr/bin/python
+# Copyright: (c) 2021, Daniil Kupchenko (@oukooveu)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r"""
+---
+module: msk_config
+short_description: Manage Amazon MSK cluster configurations
+version_added: "2.0.0"
+description:
+ - Create, delete and modify Amazon MSK (Managed Streaming for Apache Kafka) cluster configurations.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_msk_config).
+ The usage did not change.
+author:
+ - Daniil Kupchenko (@oukooveu)
+options:
+ state:
+ description: Create (C(present)) or delete (C(absent)) cluster configuration.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ name:
+ description: The name of the configuration.
+ required: true
+ type: str
+ description:
+ description: The description of the configuration.
+ type: str
+ default: ''
+ config:
+ description: Contents of the server.properties file.
+ type: dict
+ default: {}
+ aliases: ['configuration']
+ kafka_versions:
+ description:
+ - The versions of Apache Kafka with which you can use this MSK configuration.
+ - Required when I(state=present).
+ type: list
+ elements: str
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+"""
+
+EXAMPLES = r"""
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- community.aws.msk_config:
+ name: kafka-cluster-configuration
+ state: present
+ kafka_versions:
+ - 2.6.0
+ - 2.6.1
+ config:
+ auto.create.topics.enable: false
+ num.partitions: 1
+ default.replication.factor: 3
+ zookeeper.session.timeout.ms: 18000
+
+- community.aws.msk_config:
+ name: kafka-cluster-configuration
+ state: absent
+"""
+
+RETURN = r"""
+# These are examples of possible return values, and in general should use other names for return values.
+
+arn:
+ description: The Amazon Resource Name (ARN) of the configuration.
+ type: str
+ returned: I(state=present)
+ sample: "arn:aws:kafka:<region>:<account>:configuration/<name>/<resource-id>"
+revision:
+ description: The revision number.
+ type: int
+ returned: I(state=present)
+ sample: 1
+server_properties:
+ description: Contents of the server.properties file.
+ type: str
+ returned: I(state=present)
+ sample: "default.replication.factor=3\nnum.io.threads=8\nzookeeper.session.timeout.ms=18000"
+response:
+ description: The response from actual API call.
+ type: dict
+ returned: always
+ sample: {}
+"""
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (
+ camel_dict_to_snake_dict,
+ AWSRetry,
+)
+
+
+def dict_to_prop(d):
+ """convert dictionary to multi-line properties"""
+ if len(d) == 0:
+ return ""
+ return "\n".join("{0}={1}".format(k, v) for k, v in d.items())
+
+
+def prop_to_dict(p):
+ """convert properties to dictionary"""
+ if len(p) == 0:
+ return {}
+ r_dict = {}
+ for s in p.decode().split("\n"):
+ kv = s.split("=")
+ r_dict[kv[0].strip()] = kv[1].strip()
+ return r_dict
+ # python >= 2.7 is required:
+ # return {
+ # k.strip(): v.strip() for k, v in (i.split("=") for i in p.decode().split("\n"))
+ # }
+
+
+@AWSRetry.jittered_backoff(retries=5, delay=5)
+def get_configurations_with_backoff(client):
+ paginator = client.get_paginator("list_configurations")
+ return paginator.paginate().build_full_result()
+
+
+def find_active_config(client, module):
+ """
+ looking for configuration by name
+ """
+
+ name = module.params["name"]
+
+ try:
+ all_configs = get_configurations_with_backoff(client)["Configurations"]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="failed to obtain kafka configurations")
+
+ active_configs = list(
+ item
+ for item in all_configs
+ if item["Name"] == name and item["State"] == "ACTIVE"
+ )
+
+ if active_configs:
+ if len(active_configs) == 1:
+ return active_configs[0]
+ else:
+ module.fail_json_aws(
+ msg="found more than one active config with name '{0}'".format(name)
+ )
+
+ return None
+
+
+def get_configuration_revision(client, module, arn, revision):
+ try:
+ return client.describe_configuration_revision(Arn=arn, Revision=revision, aws_retry=True)
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e:
+ module.fail_json_aws(e, "failed to describe kafka configuration revision")
+
+
+def is_configuration_changed(module, current):
+ """
+ compare configuration's description and properties
+ python 2.7+ version:
+ prop_module = {str(k): str(v) for k, v in module.params.get("config").items()}
+ """
+ prop_module = {}
+ for k, v in module.params.get("config").items():
+ prop_module[str(k)] = str(v)
+ if prop_to_dict(current.get("ServerProperties", "")) == prop_module:
+ if current.get("Description", "") == module.params.get("description"):
+ return False
+ return True
+
+
+def create_config(client, module):
+ """create new or update existing configuration"""
+
+ config = find_active_config(client, module)
+
+ # create new configuration
+ if not config:
+
+ if module.check_mode:
+ return True, {}
+
+ try:
+ response = client.create_configuration(
+ Name=module.params.get("name"),
+ Description=module.params.get("description"),
+ KafkaVersions=module.params.get("kafka_versions"),
+ ServerProperties=dict_to_prop(module.params.get("config")).encode(),
+ aws_retry=True
+ )
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e:
+ module.fail_json_aws(e, "failed to create kafka configuration")
+
+ # update existing configuration (creates new revision)
+ else:
+ # it's required because 'config' doesn't contain 'ServerProperties'
+ response = get_configuration_revision(client, module, arn=config["Arn"], revision=config["LatestRevision"]["Revision"])
+
+ if not is_configuration_changed(module, response):
+ return False, response
+
+ if module.check_mode:
+ return True, {}
+
+ try:
+ response = client.update_configuration(
+ Arn=config["Arn"],
+ Description=module.params.get("description"),
+ ServerProperties=dict_to_prop(module.params.get("config")).encode(),
+ aws_retry=True
+ )
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e:
+ module.fail_json_aws(e, "failed to update kafka configuration")
+
+ arn = response["Arn"]
+ revision = response["LatestRevision"]["Revision"]
+
+ result = get_configuration_revision(client, module, arn=arn, revision=revision)
+
+ return True, result
+
+
+def delete_config(client, module):
+ """delete configuration"""
+
+ config = find_active_config(client, module)
+
+ if module.check_mode:
+ if config:
+ return True, config
+ else:
+ return False, {}
+
+ if config:
+ try:
+ response = client.delete_configuration(Arn=config["Arn"], aws_retry=True)
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e:
+ module.fail_json_aws(e, "failed to delete the kafka configuration")
+ return True, response
+
+ return False, {}
+
+
+def main():
+
+ module_args = dict(
+ name=dict(type="str", required=True),
+ description=dict(type="str", default=""),
+ state=dict(choices=["present", "absent"], default="present"),
+ config=dict(type="dict", aliases=["configuration"], default={}),
+ kafka_versions=dict(type="list", elements="str"),
+ )
+
+ module = AnsibleAWSModule(argument_spec=module_args, supports_check_mode=True)
+
+ client = module.client("kafka", retry_decorator=AWSRetry.jittered_backoff())
+
+ if module.params["state"] == "present":
+ changed, response = create_config(client, module)
+
+ elif module.params["state"] == "absent":
+ changed, response = delete_config(client, module)
+
+ # return some useless staff in check mode if configuration doesn't exists
+ # can be useful when these options are referenced by other modules during check mode run
+ if module.check_mode and not response.get("Arn"):
+ arn = "arn:aws:kafka:region:account:configuration/name/id"
+ revision = 1
+ server_properties = ""
+ else:
+ arn = response.get("Arn")
+ revision = response.get("Revision")
+ server_properties = response.get("ServerProperties", "")
+
+ module.exit_json(
+ changed=changed,
+ arn=arn,
+ revision=revision,
+ server_properties=server_properties,
+ response=camel_dict_to_snake_dict(response),
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/networkfirewall.py b/ansible_collections/community/aws/plugins/modules/networkfirewall.py
new file mode 100644
index 000000000..9bb6ebb75
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/networkfirewall.py
@@ -0,0 +1,352 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: networkfirewall
+short_description: manage AWS Network Firewall firewalls
+version_added: 4.0.0
+description:
+ - A module for creating, updating and deleting AWS Network Firewall firewalls.
+options:
+ arn:
+ description:
+ - The ARN of the firewall.
+ - Exactly one of I(arn) or I(name) must be provided.
+ required: false
+ type: str
+ aliases: ['firewall_arn']
+ name:
+ description:
+ - The name of the firewall.
+ - Cannot be updated after creation.
+ - Exactly one of I(arn) or I(name) must be provided.
+ required: false
+ type: str
+ aliases: ['firewall_name']
+ state:
+ description:
+ - Create or remove the firewall.
+ required: false
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ description:
+ description:
+ - A description for the firewall.
+ required: false
+ type: str
+ delete_protection:
+ description:
+ - When I(delete_protection=True), the firewall is protected from deletion.
+ - Defaults to C(false) when not provided on creation.
+ type: bool
+ required: false
+ policy_change_protection:
+ description:
+ - When I(policy_change_protection=True), the firewall is protected from
+ changes to which policy is attached to the firewall.
+ - Defaults to C(false) when not provided on creation.
+ type: bool
+ required: false
+ aliases: ['firewall_policy_change_protection']
+ subnet_change_protection:
+ description:
+ - When I(subnet_change_protection=True), the firewall is protected from
+ changes to which subnets is attached to the firewall.
+ - Defaults to C(false) when not provided on creation.
+ type: bool
+ required: false
+ wait:
+ description:
+ - On creation, whether to wait for the firewall to reach the C(READY)
+ state.
+ - On deletion, whether to wait for the firewall to reach the C(DELETED)
+ state.
+ - On update, whether to wait for the firewall to reach the C(IN_SYNC)
+ configuration synchronization state.
+ type: bool
+ required: false
+ default: true
+ wait_timeout:
+ description:
+ - Maximum time, in seconds, to wait for the firewall to reach the
+ expected state.
+ - Defaults to 600 seconds.
+ type: int
+ required: false
+ subnets:
+ description:
+ - The ID of the subnets to which the firewall will be associated.
+ - Required when creating a new firewall.
+ type: list
+ elements: str
+ required: false
+ purge_subnets:
+ description:
+ - If I(purge_subnets=true), existing subnets will be removed from the
+ firewall as necessary to match exactly what is defined by I(subnets).
+ type: bool
+ required: false
+ default: true
+ policy:
+ description:
+ - The ARN of the Network Firewall policy to use for the firewall.
+ - Required when creating a new firewall.
+ type: str
+ required: false
+ aliases: ['firewall_policy_arn']
+
+author:
+ - Mark Chappell (@tremble)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+ - amazon.aws.tags
+'''
+
+EXAMPLES = '''
+# Create an AWS Network Firewall
+- community.aws.networkfirewall:
+ name: 'ExampleFirewall'
+ state: present
+ policy: 'ExamplePolicy'
+ subnets:
+ - 'subnet-123456789abcdef01'
+
+# Create an AWS Network Firewall with various options, don't wait for creation
+# to finish.
+- community.aws.networkfirewall:
+ name: 'ExampleFirewall'
+ state: present
+ delete_protection: True
+ description: "An example Description"
+ policy: 'ExamplePolicy'
+ policy_change_protection: True
+ subnets:
+ - 'subnet-123456789abcdef01'
+ - 'subnet-abcdef0123456789a'
+ subnet_change_protection: True
+ tags:
+ ExampleTag: Example Value
+ another_tag: another_example
+ wait: false
+
+
+# Delete an AWS Network Firewall
+- community.aws.networkfirewall:
+ state: absent
+ name: 'ExampleFirewall'
+'''
+
+RETURN = '''
+firewall:
+ description: The full details of the firewall
+ returned: success
+ type: dict
+ contains:
+ firewall:
+ description: The details of the firewall
+ type: dict
+ returned: success
+ contains:
+ delete_protection:
+ description: A flag indicating whether it is possible to delete the firewall.
+ type: str
+ returned: success
+ example: true
+ description:
+ description: A description of the firewall.
+ type: str
+ returned: success
+ example: "Description"
+ firewall_arn:
+ description: The ARN of the firewall.
+ type: str
+ returned: success
+ example: "arn:aws:network-firewall:us-east-1:123456789012:firewall/ExampleFirewall"
+ firewall_id:
+ description: A unique ID for the firewall.
+ type: str
+ returned: success
+ example: "12345678-abcd-1234-abcd-123456789abc"
+ firewall_name:
+ description: The name of the firewall.
+ type: str
+ returned: success
+ example: "ExampleFirewall"
+ firewall_policy_arn:
+ description: The ARN of the firewall policy used by the firewall.
+ type: str
+ returned: success
+ example: "arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/ExamplePolicy"
+ firewall_policy_change_protection:
+ description:
+ - A flag indicating whether it is possible to change which firewall
+ policy is used by the firewall.
+ type: bool
+ returned: success
+ example: false
+ subnet_change_protection:
+ description:
+ - A flag indicating whether it is possible to change which subnets
+ the firewall endpoints are in.
+ type: bool
+ returned: success
+ example: true
+ subnets:
+ description: A list of the subnets the firewall endpoints are in.
+ type: list
+ elements: str
+ example: ["subnet-12345678", "subnet-87654321"]
+ subnet_mappings:
+ description: A list representing the subnets the firewall endpoints are in.
+ type: list
+ elements: dict
+ contains:
+ subnet_id:
+ description: The ID of the subnet.
+ type: str
+ returned: success
+ example: "subnet-12345678"
+ tags:
+ description: The tags associated with the firewall.
+ type: dict
+ returned: success
+ example: '{"SomeTag": "SomeValue"}'
+ vpc_id:
+ description: The ID of the VPC that the firewall is used by.
+ type: str
+ returned: success
+ example: "vpc-0123456789abcdef0"
+ firewall_metadata:
+ description: Metadata about the firewall
+ type: dict
+ returned: success
+ contains:
+ configuration_sync_state_summary:
+ description:
+ - A short summary of the synchronization status of the
+ policy and rule groups.
+ type: str
+ returned: success
+ example: "IN_SYNC"
+ status:
+ description:
+ - A short summary of the status of the firewall endpoints.
+ type: str
+ returned: success
+ example: "READY"
+ sync_states:
+ description:
+ - A description, broken down by availability zone, of the status
+ of the firewall endpoints as well as the synchronization status
+ of the policies and rule groups.
+ type: dict
+ returned: success
+ example:
+ {
+ "us-east-1a": {
+ "attachment": {
+ "endpoint_id": "vpce-123456789abcdef01",
+ "status": "READY",
+ "subnet_id": "subnet-12345678"
+ },
+ "config": {
+ "arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/Ansible-Example": {
+ "sync_status": "IN_SYNC",
+ "update_token": "abcdef01-0000-0000-0000-123456789abc"
+ },
+ "arn:aws:network-firewall:us-east-1:123456789012:stateful-rulegroup/ExampleDomainList": {
+ "sync_status": "IN_SYNC",
+ "update_token": "12345678-0000-0000-0000-abcdef012345"
+ }
+ }
+ }
+ }
+'''
+
+
+from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallManager
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(type='str', required=False, aliases=['firewall_name']),
+ arn=dict(type='str', required=False, aliases=['firewall_arn']),
+ state=dict(type='str', required=False, default='present', choices=['present', 'absent']),
+ description=dict(type='str', required=False),
+ tags=dict(type='dict', required=False, aliases=['resource_tags']),
+ purge_tags=dict(type='bool', required=False, default=True),
+ wait=dict(type='bool', required=False, default=True),
+ wait_timeout=dict(type='int', required=False),
+ subnet_change_protection=dict(type='bool', required=False),
+ policy_change_protection=dict(type='bool', required=False, aliases=['firewall_policy_change_protection']),
+ delete_protection=dict(type='bool', required=False),
+ subnets=dict(type='list', elements='str', required=False),
+ purge_subnets=dict(type='bool', required=False, default=True),
+ policy=dict(type='str', required=False, aliases=['firewall_policy_arn']),
+ )
+
+ mutually_exclusive = [
+ ('arn', 'name',)
+ ]
+ required_one_of = [
+ ('arn', 'name',)
+ ]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of,
+ )
+
+ arn = module.params.get('arn')
+ name = module.params.get('name')
+ state = module.params.get('state')
+
+ manager = NetworkFirewallManager(module, name=name, arn=arn)
+ manager.set_wait(module.params.get('wait', None))
+ manager.set_wait_timeout(module.params.get('wait_timeout', None))
+
+ if state == 'absent':
+ manager.set_delete_protection(module.params.get('delete_protection', None))
+ manager.delete()
+ else:
+ if not manager.original_resource:
+ if not module.params.get('subnets', None):
+ module.fail_json('The subnets parameter must be provided on creation.')
+ if not module.params.get('policy', None):
+ module.fail_json('The policy parameter must be provided on creation.')
+ manager.set_description(module.params.get('description', None))
+ manager.set_tags(module.params.get('tags', None), module.params.get('purge_tags', None))
+ manager.set_subnet_change_protection(module.params.get('subnet_change_protection', None))
+ manager.set_policy_change_protection(module.params.get('policy_change_protection', None))
+ manager.set_delete_protection(module.params.get('delete_protection', None))
+ manager.set_subnets(module.params.get('subnets', None), module.params.get('purge_subnets', None))
+ manager.set_policy(module.params.get('policy', None))
+ manager.flush_changes()
+
+ results = dict(
+ changed=manager.changed,
+ firewall=manager.updated_resource,
+ )
+ if manager.changed:
+ diff = dict(
+ before=manager.original_resource,
+ after=manager.updated_resource,
+ )
+ results['diff'] = diff
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/networkfirewall_info.py b/ansible_collections/community/aws/plugins/modules/networkfirewall_info.py
new file mode 100644
index 000000000..85df6b026
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/networkfirewall_info.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: networkfirewall_info
+short_description: describe AWS Network Firewall firewalls
+version_added: 4.0.0
+description:
+ - A module for describing AWS Network Firewall firewalls.
+options:
+ arn:
+ description:
+ - The ARN of the Network Firewall.
+ - Mutually exclusive with I(name) and I(vpc_ids).
+ required: false
+ type: str
+ name:
+ description:
+ - The name of the Network Firewall.
+ - Mutually exclusive with I(arn) and I(vpc_ids).
+ required: false
+ type: str
+ vpc_ids:
+ description:
+ - A List of VPCs to retrieve the firewalls for.
+ - Mutually exclusive with I(name) and I(arn).
+ required: false
+ type: list
+ elements: str
+ aliases: ['vpcs', 'vpc_id']
+
+author: Mark Chappell (@tremble)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+
+# Describe all firewalls in an account
+- community.aws.networkfirewall_info: {}
+
+# Describe a firewall by ARN
+- community.aws.networkfirewall_info:
+ arn: arn:aws:network-firewall:us-east-1:123456789012:firewall/ExampleFirewall
+
+# Describe a firewall by name
+- community.aws.networkfirewall_info:
+ name: ExampleFirewall
+'''
+
+RETURN = '''
+firewall_list:
+ description: A list of ARNs of the matching firewalls.
+ type: list
+ elements: str
+ returned: When a firewall name isn't specified
+ example: ['arn:aws:network-firewall:us-east-1:123456789012:firewall/Example1',
+ 'arn:aws:network-firewall:us-east-1:123456789012:firewall/Example2']
+
+firewalls:
+ description: The details of the firewalls
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ firewall:
+ description: The details of the firewall
+ type: dict
+ returned: success
+ contains:
+ delete_protection:
+ description: A flag indicating whether it is possible to delete the firewall.
+ type: str
+ returned: success
+ example: true
+ description:
+ description: A description of the firewall.
+ type: str
+ returned: success
+ example: "Description"
+ firewall_arn:
+ description: The ARN of the firewall.
+ type: str
+ returned: success
+ example: "arn:aws:network-firewall:us-east-1:123456789012:firewall/ExampleFirewall"
+ firewall_id:
+ description: A unique ID for the firewall.
+ type: str
+ returned: success
+ example: "12345678-abcd-1234-abcd-123456789abc"
+ firewall_name:
+ description: The name of the firewall.
+ type: str
+ returned: success
+ example: "ExampleFirewall"
+ firewall_policy_arn:
+ description: The ARN of the firewall policy used by the firewall.
+ type: str
+ returned: success
+ example: "arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/ExamplePolicy"
+ firewall_policy_change_protection:
+ description:
+ - A flag indicating whether it is possible to change which firewall
+ policy is used by the firewall.
+ type: bool
+ returned: success
+ example: false
+ subnet_change_protection:
+ description:
+ - A flag indicating whether it is possible to change which subnets
+ the firewall endpoints are in.
+ type: bool
+ returned: success
+ example: true
+ subnet_mappings:
+ description: A list of the subnets the firewall endpoints are in.
+ type: list
+ elements: dict
+ contains:
+ subnet_id:
+ description: The ID of the subnet.
+ type: str
+ returned: success
+ example: "subnet-12345678"
+ tags:
+ description: The tags associated with the firewall.
+ type: dict
+ returned: success
+ example: '{"SomeTag": "SomeValue"}'
+ vpc_id:
+ description: The ID of the VPC that the firewall is used by.
+ type: str
+ returned: success
+ example: "vpc-0123456789abcdef0"
+ firewall_metadata:
+ description: Metadata about the firewall
+ type: dict
+ returned: success
+ contains:
+ configuration_sync_state_summary:
+ description:
+ - A short summary of the synchronization status of the
+ policy and rule groups.
+ type: str
+ returned: success
+ example: "IN_SYNC"
+ status:
+ description:
+ - A short summary of the status of the firewall endpoints.
+ type: str
+ returned: success
+ example: "READY"
+ sync_states:
+ description:
+ - A description, broken down by availability zone, of the status
+ of the firewall endpoints as well as the synchronization status
+ of the policies and rule groups.
+ type: dict
+ returned: success
+ example:
+ {
+ "us-east-1a": {
+ "attachment": {
+ "endpoint_id": "vpce-123456789abcdef01",
+ "status": "READY",
+ "subnet_id": "subnet-12345678"
+ },
+ "config": {
+ "arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/Ansible-Example": {
+ "sync_status": "IN_SYNC",
+ "update_token": "abcdef01-0000-0000-0000-123456789abc"
+ },
+ "arn:aws:network-firewall:us-east-1:123456789012:stateful-rulegroup/ExampleDomainList": {
+ "sync_status": "IN_SYNC",
+ "update_token": "12345678-0000-0000-0000-abcdef012345"
+ }
+ }
+ }
+ }
+'''
+
+
+from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallManager
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(type='str', required=False),
+ arn=dict(type='str', required=False),
+ vpc_ids=dict(type='list', required=False, elements='str', aliases=['vpcs', 'vpc_id']),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ('arn', 'name', 'vpc_ids',),
+ ],
+ )
+
+ arn = module.params.get('arn')
+ name = module.params.get('name')
+ vpcs = module.params.get('vpc_ids')
+
+ manager = NetworkFirewallManager(module)
+
+ results = dict(changed=False)
+
+ if name or arn:
+ firewall = manager.get_firewall(name=name, arn=arn)
+ if firewall:
+ results['firewalls'] = [firewall]
+ else:
+ results['firewalls'] = []
+ else:
+ if vpcs:
+ firewall_list = manager.list(vpc_ids=vpcs)
+ else:
+ firewall_list = manager.list()
+ results['firewall_list'] = firewall_list
+ firewalls = [manager.get_firewall(arn=f) for f in firewall_list]
+ results['firewalls'] = firewalls
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/networkfirewall_policy.py b/ansible_collections/community/aws/plugins/modules/networkfirewall_policy.py
new file mode 100644
index 000000000..1026138a6
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/networkfirewall_policy.py
@@ -0,0 +1,437 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: networkfirewall_policy
+short_description: manage AWS Network Firewall policies
+version_added: 4.0.0
+description:
+ - A module for creating, updating and deleting AWS Network Firewall policies.
+options:
+ arn:
+ description:
+ - The ARN of the Network Firewall policy.
+ - Exactly one of I(arn) or I(name) must be provided.
+ required: false
+ type: str
+ name:
+ description:
+ - The name of the Network Firewall policy.
+ - Cannot be updated after creation.
+ - Exactly one of I(arn) or I(name) must be provided.
+ required: false
+ type: str
+ state:
+ description:
+ - Create or remove the Network Firewall policy.
+ required: false
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ description:
+ description:
+ - A description for the Network Firewall policy.
+ required: false
+ type: str
+ stateful_rule_groups:
+ description:
+ - A list of names or ARNs of stateful firewall rule groups.
+ required: false
+ type: list
+ elements: str
+ aliases: ['stateful_groups']
+ stateless_rule_groups:
+ description:
+ - A list of names or ARNs of stateless firewall rule groups.
+ required: false
+ type: list
+ elements: str
+ aliases: ['stateless_groups']
+ stateless_default_actions:
+ description:
+ - Actions to take on a packet if it doesn't match any of the stateless
+ rules in the policy.
+ - Common actions are C(aws:pass), C(aws:drop) and C(aws:forward_to_sfe).
+ - When creating a new policy defaults to C(aws:forward_to_sfe).
+ required: false
+ type: list
+ elements: str
+ stateless_fragment_default_actions:
+ description:
+ - Actions to take on a fragmented UDP packet if it doesn't match any
+ of the stateless rules in the policy.
+ - Common actions are C(aws:pass), C(aws:drop) and C(aws:forward_to_sfe).
+ - When creating a new policy defaults to C(aws:forward_to_sfe).
+ required: false
+ type: list
+ elements: str
+ stateful_default_actions:
+ description:
+ - Actions to take on a packet if it doesn't match any of the stateful
+ rules in the policy.
+ - Common actions are C(aws:drop_strict), C(aws:drop_established),
+ C(aws:alert_strict) and C(aws:alert_established).
+ - Only valid for policies where I(strict_rule_order=true).
+ - When creating a new policy defaults to C(aws:drop_strict).
+ - I(stateful_default_actions) requires botocore>=1.21.52.
+ required: false
+ type: list
+ elements: str
+ stateful_rule_order:
+ description:
+ - Indicates how to manage the order of stateful rule evaluation for the policy.
+ - When I(strict_rule_order='strict') rules and rule groups are evaluated in
+ the order that they're defined.
+ - Cannot be updated after creation.
+ - I(stateful_rule_order) requires botocore>=1.21.52.
+ required: false
+ type: str
+ choices: ['default', 'strict']
+ aliases: ['rule_order']
+ stateless_custom_actions:
+ description:
+ - A list of dictionaries defining custom actions which can be used in
+ I(stateless_default_actions) and I(stateless_fragment_default_actions).
+ required: false
+ type: list
+ elements: dict
+ aliases: ['custom_stateless_actions']
+ suboptions:
+ name:
+ description:
+ - The name of the custom action.
+ required: true
+ type: str
+ publish_metric_dimension_value:
+ description:
+ - When the custom action is used, metrics will have a dimension of
+ C(CustomAction) the value of which is set to
+ I(publish_metric_dimension_value).
+ required: false
+ type: str
+ aliases: ['publish_metric_dimension_values']
+ purge_stateless_custom_actions:
+ description:
+ - If I(purge_stateless_custom_actions=true), existing custom actions will
+ be purged from the resource to match exactly what is defined by
+ the I(stateless_custom_actions) parameter.
+ type: bool
+ required: false
+ default: True
+ aliases: ['purge_custom_stateless_actions']
+ wait:
+ description:
+ - Whether to wait for the firewall policy to reach the
+ C(ACTIVE) or C(DELETED) state before the module returns.
+ type: bool
+ required: false
+ default: true
+ wait_timeout:
+ description:
+ - Maximum time, in seconds, to wait for the firewall policy
+ to reach the expected state.
+ - Defaults to 600 seconds.
+ type: int
+ required: false
+
+
+author:
+ - Mark Chappell (@tremble)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+ - amazon.aws.tags
+'''
+
+EXAMPLES = '''
+# Create an AWS Network Firewall Policy with default rule order
+- community.aws.networkfirewall_policy:
+ stateful_rule_order: 'default'
+ state: present
+ name: 'ExamplePolicy'
+
+# Create an AWS Network Firewall Policy with strict rule order
+- community.aws.networkfirewall_policy:
+ stateful_rule_order: 'strict'
+ state: present
+ name: 'ExampleStrictPolicy'
+
+
+# Create an AWS Network Firewall Policy that defaults to dropping all packets
+- community.aws.networkfirewall_policy:
+ stateful_rule_order: 'strict'
+ state: present
+ name: 'ExampleDropPolicy'
+ stateful_default_actions:
+ - 'aws:drop_strict'
+ stateful_rule_groups:
+ - 'ExampleStrictRuleGroup'
+ - 'arn:aws:network-firewall:us-east-1:aws-managed:stateful-rulegroup/BotNetCommandAndControlDomainsStrictOrder'
+
+# Delete an AWS Network Firewall Policy
+- community.aws.networkfirewall_policy:
+ state: absent
+ name: 'ExampleDropPolicy'
+'''
+
+RETURN = '''
+policy:
+ description: The details of the policy
+ type: dict
+ returned: success
+ contains:
+ policy:
+ description: The details of the policy
+ type: dict
+ returned: success
+ contains:
+ stateful_engine_options:
+ description:
+ - Extra options describing how the stateful rules should be handled.
+ type: dict
+ returned: success
+ contains:
+ rule_order:
+ description:
+ - How rule group evaluation will be ordered.
+ - For more information on rule evaluation ordering see the AWS documentation
+ U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-rule-evaluation-order.html).
+ type: str
+ returned: success
+ example: 'DEFAULT_ACTION_ORDER'
+ stateful_rule_group_references:
+ description: Information about the stateful rule groups attached to the policy.
+ type: list
+ elements: dict
+ returned: success
+ contains:
+ resource_arn:
+ description: The ARN of the rule group.
+ type: str
+ returned: success
+ example: 'arn:aws:network-firewall:us-east-1:aws-managed:stateful-rulegroup/AbusedLegitMalwareDomainsActionOrder'
+ priority:
+ description:
+ - An integer that indicates the order in which to run the stateful rule groups in a single policy.
+ - This only applies to policies that specify the STRICT_ORDER rule order in the stateful engine options settings.
+ type: int
+ returned: success
+ example: 1234
+ stateless_custom_actions:
+ description:
+ - A description of additional custom actions available for use as
+ default rules to apply to stateless packets.
+ type: list
+ elements: dict
+ returned: success
+ contains:
+ action_name:
+ description: A name for the action.
+ type: str
+ returned: success
+ example: 'ExampleAction'
+ action_definition:
+ description: The action to perform.
+ type: dict
+ returned: success
+ contains:
+ publish_metric_action:
+ description:
+ - Definition of a custom metric to be published to CloudWatch.
+ - U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/monitoring-cloudwatch.html)
+ type: dict
+ returned: success
+ contains:
+ dimensions:
+ description:
+ - The values of the CustomAction dimension to set on the metrics.
+ - The dimensions of a metric are used to identify unique
+ streams of data.
+ type: list
+ elements: dict
+ returned: success
+ contains:
+ value:
+ description: A value of the CustomAction dimension to set on the metrics.
+ type: str
+ returned: success
+ example: 'ExampleRule'
+ stateless_default_actions:
+ description: The default actions to take on a packet that doesn't match any stateful rules.
+ type: list
+ elements: str
+ returned: success
+ example: ['aws:alert_strict']
+ stateless_fragment_default_actions:
+ description: The actions to take on a packet if it doesn't match any of the stateless rules in the policy.
+ type: list
+ elements: str
+ returned: success
+ example: ['aws:pass']
+ stateless_rule_group_references:
+ description: Information about the stateful rule groups attached to the policy.
+ type: list
+ elements: dict
+ returned: success
+ contains:
+ resource_arn:
+ description: The ARN of the rule group.
+ type: str
+ returned: success
+ example: 'arn:aws:network-firewall:us-east-1:123456789012:stateless-rulegroup/ExampleGroup'
+ priority:
+ description:
+ - An integer that indicates the order in which to run the stateless rule groups in a single policy.
+ type: str
+ returned: success
+ example: 12345
+ policy_metadata:
+ description: Metadata about the policy
+ type: dict
+ returned: success
+ contains:
+ consumed_stateful_rule_capacity:
+ description: The total number of capacity units used by the stateful rule groups.
+ type: int
+ returned: success
+ example: 165
+ consumed_stateless_rule_capacity:
+ description: The total number of capacity units used by the stateless rule groups.
+ type: int
+ returned: success
+ example: 2010
+ firewall_policy_arn:
+ description: The ARN of the policy.
+ type: str
+ returned: success
+ example: arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/ExamplePolicy
+ firewall_policy_id:
+ description: The unique ID of the policy.
+ type: str
+ returned: success
+ example: 12345678-abcd-1234-5678-123456789abc
+ firewall_policy_name:
+ description: The name of the policy.
+ type: str
+ returned: success
+ example: ExamplePolicy
+ firewall_policy_status:
+ description: The current status of the policy.
+ type: str
+ returned: success
+ example: ACTIVE
+ number_of_associations:
+ description: The number of firewalls the policy is associated to.
+ type: int
+ returned: success
+ example: 1
+ tags:
+ description: A dictionary representing the tags associated with the policy.
+ type: dict
+ returned: success
+ example: {'tagName': 'Some Value'}
+'''
+
+
+from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallPolicyManager
+
+
+def main():
+
+ custom_action_options = dict(
+ name=dict(type='str', required=True),
+ # Poorly documented, but "publishMetricAction.dimensions ... must have length less than or equal to 1"
+ publish_metric_dimension_value=dict(type='str', required=False, aliases=['publish_metric_dimension_values']),
+ # NetworkFirewallPolicyManager can cope with a list for future-proofing
+ # publish_metric_dimension_values=dict(type='list', elements='str', required=False, aliases=['publish_metric_dimension_value']),
+ )
+
+ argument_spec = dict(
+ name=dict(type='str', required=False),
+ arn=dict(type='str', required=False),
+ state=dict(type='str', required=False, default='present', choices=['present', 'absent']),
+ description=dict(type='str', required=False),
+ tags=dict(type='dict', required=False, aliases=['resource_tags']),
+ purge_tags=dict(type='bool', required=False, default=True),
+ stateful_rule_groups=dict(type='list', elements='str', required=False, aliases=['stateful_groups']),
+ stateless_rule_groups=dict(type='list', elements='str', required=False, aliases=['stateless_groups']),
+ stateful_default_actions=dict(type='list', elements='str', required=False),
+ stateless_default_actions=dict(type='list', elements='str', required=False),
+ stateless_fragment_default_actions=dict(type='list', elements='str', required=False),
+ stateful_rule_order=dict(type='str', required=False, choices=['strict', 'default'], aliases=['rule_order']),
+ stateless_custom_actions=dict(type='list', elements='dict', required=False,
+ options=custom_action_options, aliases=['custom_stateless_actions']),
+ purge_stateless_custom_actions=dict(type='bool', required=False, default=True, aliases=['purge_custom_stateless_actions']),
+ wait=dict(type='bool', required=False, default=True),
+ wait_timeout=dict(type='int', required=False),
+ )
+
+ mutually_exclusive = [
+ ('arn', 'name',)
+ ]
+ required_one_of = [
+ ('arn', 'name',)
+ ]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of,
+ )
+
+ arn = module.params.get('arn')
+ name = module.params.get('name')
+ state = module.params.get('state')
+
+ manager = NetworkFirewallPolicyManager(module, name=name, arn=arn)
+ manager.set_wait(module.params.get('wait', None))
+ manager.set_wait_timeout(module.params.get('wait_timeout', None))
+
+ rule_order = module.params.get('stateful_rule_order')
+ if rule_order and rule_order != "default":
+ module.require_botocore_at_least('1.21.52', reason='to set the rule order')
+ if module.params.get('stateful_default_actions'):
+ module.require_botocore_at_least(
+ '1.21.52', reason='to set the default actions for stateful flows')
+
+ if state == 'absent':
+ manager.delete()
+ else:
+ manager.set_description(module.params.get('description', None))
+ manager.set_tags(module.params.get('tags', None), module.params.get('purge_tags', None))
+ # Actions need to be defined before potentially consuming them
+ manager.set_custom_stateless_actions(
+ module.params.get('stateless_custom_actions', None),
+ module.params.get('purge_stateless_custom_actions', True)),
+ manager.set_stateful_rule_order(module.params.get('stateful_rule_order', None))
+ manager.set_stateful_rule_groups(module.params.get('stateful_rule_groups', None))
+ manager.set_stateless_rule_groups(module.params.get('stateless_rule_groups', None))
+ manager.set_stateful_default_actions(module.params.get('stateful_default_actions', None))
+ manager.set_stateless_default_actions(module.params.get('stateless_default_actions', None))
+ manager.set_stateless_fragment_default_actions(module.params.get('stateless_fragment_default_actions', None))
+
+ manager.flush_changes()
+
+ results = dict(
+ changed=manager.changed,
+ policy=manager.updated_resource,
+ )
+ if manager.changed:
+ diff = dict(
+ before=manager.original_resource,
+ after=manager.updated_resource,
+ )
+ results['diff'] = diff
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/networkfirewall_policy_info.py b/ansible_collections/community/aws/plugins/modules/networkfirewall_policy_info.py
new file mode 100644
index 000000000..1f170f5b3
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/networkfirewall_policy_info.py
@@ -0,0 +1,260 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: networkfirewall_policy_info
+short_description: describe AWS Network Firewall policies
+version_added: 4.0.0
+description:
+ - A module for describing AWS Network Firewall policies.
+options:
+ arn:
+ description:
+ - The ARN of the Network Firewall policy.
+ - Mutually exclusive with I(name).
+ required: false
+ type: str
+ name:
+ description:
+ - The name of the Network Firewall policy.
+ - Mutually exclusive with I(arn).
+ required: false
+ type: str
+
+author: Mark Chappell (@tremble)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+
+# Describe all Firewall policies in an account
+- community.aws.networkfirewall_policy_info: {}
+
+# Describe a Firewall policy by ARN
+- community.aws.networkfirewall_policy_info:
+ arn: arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/ExamplePolicy
+
+# Describe a Firewall policy by name
+- community.aws.networkfirewall_policy_info:
+ name: ExamplePolicy
+'''
+
+RETURN = '''
+policy_list:
+ description: A list of ARNs of the matching policies.
+ type: list
+ elements: str
+ returned: When a policy name isn't specified
+ example: ['arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/Example1',
+ 'arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/Example2']
+
+policies:
+ description: The details of the policies
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ policy:
+ description: The details of the policy
+ type: dict
+ returned: success
+ contains:
+ stateful_engine_options:
+ description:
+ - Extra options describing how the stateful rules should be handled.
+ type: dict
+ returned: success
+ contains:
+ rule_order:
+ description:
+ - How rule group evaluation will be ordered.
+ - For more information on rule evaluation ordering see the AWS documentation
+ U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-rule-evaluation-order.html).
+ type: str
+ returned: success
+ example: 'DEFAULT_ACTION_ORDER'
+ stateful_rule_group_references:
+ description: Information about the stateful rule groups attached to the policy.
+ type: list
+ elements: dict
+ returned: success
+ contains:
+ resource_arn:
+ description: The ARN of the rule group.
+ type: str
+ returned: success
+ example: 'arn:aws:network-firewall:us-east-1:aws-managed:stateful-rulegroup/AbusedLegitMalwareDomainsActionOrder'
+ priority:
+ description:
+ - An integer that indicates the order in which to run the stateful rule groups in a single policy.
+ - This only applies to policies that specify the STRICT_ORDER rule order in the stateful engine options settings.
+ type: int
+ returned: success
+ example: 1234
+ stateless_custom_actions:
+ description:
+ - A description of additional custom actions available for use as
+ default rules to apply to stateless packets.
+ type: list
+ elements: dict
+ returned: success
+ contains:
+ action_name:
+ description: A name for the action.
+ type: str
+ returned: success
+ example: 'ExampleAction'
+ action_definition:
+ description: The action to perform.
+ type: dict
+ returned: success
+ contains:
+ publish_metric_action:
+ description:
+ - Definition of a custom metric to be published to CloudWatch.
+ - U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/monitoring-cloudwatch.html)
+ type: dict
+ returned: success
+ contains:
+ dimensions:
+ description:
+ - The values of the CustomAction dimension to set on the metrics.
+ - The dimensions of a metric are used to identify unique
+ streams of data.
+ type: list
+ elements: dict
+ returned: success
+ contains:
+ value:
+ description: A value of the CustomAction dimension to set on the metrics.
+ type: str
+ returned: success
+ example: 'ExampleRule'
+ stateless_default_actions:
+ description: The default actions to take on a packet that doesn't match any stateful rules.
+ type: list
+ elements: str
+ returned: success
+ example: ['aws:alert_strict']
+ stateless_fragment_default_actions:
+ description: The actions to take on a packet if it doesn't match any of the stateless rules in the policy.
+ type: list
+ elements: str
+ returned: success
+ example: ['aws:pass']
+ stateless_rule_group_references:
+ description: Information about the stateful rule groups attached to the policy.
+ type: list
+ elements: dict
+ returned: success
+ contains:
+ resource_arn:
+ description: The ARN of the rule group.
+ type: str
+ returned: success
+ example: 'arn:aws:network-firewall:us-east-1:123456789012:stateless-rulegroup/ExampleGroup'
+ priority:
+ description:
+ - An integer that indicates the order in which to run the stateless rule groups in a single policy.
+ type: str
+ returned: success
+ example: 12345
+ policy_metadata:
+ description: Metadata about the policy
+ type: dict
+ returned: success
+ contains:
+ consumed_stateful_rule_capacity:
+ description: The total number of capacity units used by the stateful rule groups.
+ type: int
+ returned: success
+ example: 165
+ consumed_stateless_rule_capacity:
+ description: The total number of capacity units used by the stateless rule groups.
+ type: int
+ returned: success
+ example: 2010
+ firewall_policy_arn:
+ description: The ARN of the policy.
+ type: str
+ returned: success
+ example: arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/ExamplePolicy
+ firewall_policy_id:
+ description: The unique ID of the policy.
+ type: str
+ returned: success
+ example: 12345678-abcd-1234-5678-123456789abc
+ firewall_policy_name:
+ description: The name of the policy.
+ type: str
+ returned: success
+ example: ExamplePolicy
+ firewall_policy_status:
+ description: The current status of the policy.
+ type: str
+ returned: success
+ example: ACTIVE
+ number_of_associations:
+ description: The number of firewalls the policy is associated to.
+ type: int
+ returned: success
+ example: 1
+ tags:
+ description: A dictionary representing the tags associated with the policy.
+ type: dict
+ returned: success
+ example: {'tagName': 'Some Value'}
+'''
+
+
+from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallPolicyManager
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(type='str', required=False),
+ arn=dict(type='str', required=False),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ('arn', 'name',),
+ ],
+ )
+
+ arn = module.params.get('arn')
+ name = module.params.get('name')
+
+ manager = NetworkFirewallPolicyManager(module)
+
+ results = dict(changed=False)
+
+ if name or arn:
+ policy = manager.get_policy(name=name, arn=arn)
+ if policy:
+ results['policies'] = [policy]
+ else:
+ results['policies'] = []
+ else:
+ policy_list = manager.list()
+ results['policy_list'] = policy_list
+ policies = [manager.get_policy(arn=p) for p in policy_list]
+ results['policies'] = policies
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group.py b/ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group.py
new file mode 100644
index 000000000..c8e2ea38b
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group.py
@@ -0,0 +1,831 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: networkfirewall_rule_group
+short_description: create, delete and modify AWS Network Firewall rule groups
+version_added: 4.0.0
+description:
+ - A module for managing AWS Network Firewall rule groups.
+ - U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/index.html)
+ - Currently only supports C(stateful) firewall groups.
+options:
+ arn:
+ description:
+ - The ARN of the Network Firewall rule group.
+ - Exactly one of I(arn) and I(name) must be provided.
+ required: false
+ type: str
+ name:
+ description:
+ - The name of the Network Firewall rule group.
+ - When I(name) is set, I(rule_type) must also be set.
+ required: false
+ type: str
+ rule_type:
+ description:
+ - Indicates whether the rule group is stateless or stateful.
+ - Stateless rulesets are currently not supported.
+ - Required if I(name) is set.
+ required: false
+ aliases: ['type' ]
+ choices: ['stateful']
+# choices: ['stateful', 'stateless']
+ type: str
+ state:
+ description:
+ - Create or remove the Network Firewall rule group.
+ required: false
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ capacity:
+ description:
+ - The maximum operating resources that this rule group can use.
+ - Once a rule group is created this parameter is immutable.
+ - See also the AWS documentation about how capacityis calculated
+ U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/nwfw-rule-group-capacity.html)
+ - This option is mandatory when creating a new rule group.
+ type: int
+ required: false
+ rule_order:
+ description:
+ - Indicates how to manage the order of the rule evaluation for the rule group.
+ - Once a rule group is created this parameter is immutable.
+ - Mutually exclusive with I(rule_type=stateless).
+ - For more information on how rules are evaluated read the AWS documentation
+ U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-rule-evaluation-order.html).
+ - I(rule_order) requires botocore>=1.23.23.
+ type: str
+ required: false
+ choices: ['default', 'strict']
+ aliases: ['stateful_rule_order']
+ description:
+ description:
+ - A description of the AWS Network Firewall rule group.
+ type: str
+ ip_variables:
+ description:
+ - A dictionary mapping variable names to a list of IP addresses and address ranges, in CIDR notation.
+ - For example C({EXAMPLE_HOSTS:["192.0.2.0/24", "203.0.113.42"]}).
+ - Mutually exclusive with I(domain_list).
+ type: dict
+ required: false
+ aliases: ['ip_set_variables']
+ purge_ip_variables:
+ description:
+ - Whether to purge variable names not mentioned in the I(ip_variables)
+ dictionary.
+ - To remove all IP Set Variables it is necessary to explicitly set I(ip_variables={})
+ and I(purge_port_variables=true).
+ type: bool
+ default: true
+ required: false
+ aliases: ['purge_ip_set_variables']
+ port_variables:
+ description:
+ - A dictionary mapping variable names to a list of ports.
+ - For example C({SECURE_PORTS:["22", "443"]}).
+ type: dict
+ required: false
+ aliases: ['port_set_variables']
+ purge_port_variables:
+ description:
+ - Whether to purge variable names not mentioned in the I(port_variables)
+ dictionary.
+ - To remove all Port Set Variables it is necessary to explicitly set I(port_variables={})
+ and I(purge_port_variables=true).
+ type: bool
+ required: false
+ default: true
+ aliases: ['purge_port_set_variables']
+ rule_strings:
+ description:
+ - Rules in Suricata format.
+ - If I(rule_strings) is specified, it must include at least one entry.
+ - For more information read the AWS documentation
+ U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-limitations-caveats.html)
+ and the Suricata documentation
+ U(https://suricata.readthedocs.io/en/suricata-6.0.0/rules/intro.html).
+ - Mutually exclusive with I(rule_type=stateless).
+ - Mutually exclusive with I(domain_list) and I(rule_list).
+ - Exactly one of I(rule_strings), I(domain_list) or I(rule_list) must be
+ specified at creation time.
+ type: list
+ elements: str
+ required: false
+ domain_list:
+ description:
+ - Inspection criteria for a domain list rule group.
+ - When set overwrites all Domain List settings with the new configuration.
+ - For more information about domain name based filtering
+ read the AWS documentation
+ U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/stateful-rule-groups-domain-names.html).
+ - Mutually exclusive with I(rule_type=stateless).
+ - Mutually exclusive with I(ip_variables), I(rule_list) and I(rule_strings).
+ - Exactly one of I(rule_strings), I(domain_list) or I(rule_list) must be
+ specified at creation time.
+ type: dict
+ required: false
+ suboptions:
+ domain_names:
+ description:
+ - A list of domain names to look for in the traffic flow.
+ type: list
+ elements: str
+ required: true
+ filter_http:
+ description:
+ - Whether HTTP traffic should be inspected (uses the host header).
+ type: bool
+ required: false
+ default: false
+ filter_https:
+ description:
+ - Whether HTTPS traffic should be inspected (uses the SNI).
+ type: bool
+ required: false
+ default: false
+ action:
+ description:
+ - Action to perform on traffic that matches the rule match settings.
+ type: str
+ required: true
+ choices: ['allow', 'deny']
+ source_ips:
+ description:
+ - Used to expand the local network definition beyond the CIDR range
+ of the VPC where you deploy Network Firewall.
+ type: list
+ elements: str
+ required: false
+ rule_list:
+ description:
+ - Inspection criteria to be used for a 5-tuple based rule group.
+ - When set overwrites all existing 5-tuple rules with the new configuration.
+ - Mutually exclusive with I(domain_list) and I(rule_strings).
+ - Mutually exclusive with I(rule_type=stateless).
+ - Exactly one of I(rule_strings), I(domain_list) or I(rule_list) must be
+ specified at creation time.
+ - For more information about valid values see the AWS documentation
+ U(https://docs.aws.amazon.com/network-firewall/latest/APIReference/API_StatefulRule.html)
+ and
+ U(https://docs.aws.amazon.com/network-firewall/latest/APIReference/API_Header.html).
+ - 'Note: Idempotency when comparing AWS Web UI and Ansiible managed rules can not be guaranteed'
+ type: list
+ elements: dict
+ required: false
+ aliases: ['stateful_rule_list']
+ suboptions:
+ action:
+ description:
+ - What Network Firewall should do with the packets in a traffic flow when the flow matches.
+ type: str
+ required: true
+ choices: ['pass', 'drop', 'alert']
+ protocol:
+ description:
+ - The protocol to inspect for. To specify all, you can use C(IP), because all traffic on AWS is C(IP).
+ type: str
+ required: true
+ source:
+ description:
+ - The source IP address or address range to inspect for, in CIDR notation.
+ - To match with any address, specify C(ANY).
+ type: str
+ required: true
+ source_port:
+ description:
+ - The source port to inspect for.
+ - To match with any port, specify C(ANY).
+ type: str
+ required: true
+ direction:
+ description:
+ - The direction of traffic flow to inspect.
+ - If set to C(any), the inspection matches both traffic going from the
+ I(source) to the I(destination) and from the I(destination) to the
+ I(source).
+ - If set to C(forward), the inspection only matches traffic going from the
+ I(source) to the I(destination).
+ type: str
+ required: false
+ default: 'forward'
+ choices: ['forward', 'any']
+ destination:
+ description:
+ - The destination IP address or address range to inspect for, in CIDR notation.
+ - To match with any address, specify C(ANY).
+ type: str
+ required: true
+ destination_port:
+ description:
+ - The source port to inspect for.
+ - To match with any port, specify C(ANY).
+ type: str
+ required: true
+ sid:
+ description:
+ - The signature ID of the rule.
+ - A unique I(sid) must be passed for all rules.
+ type: int
+ required: true
+ rule_options:
+ description:
+ - Additional options for the rule.
+ - 5-tuple based rules are converted by AWS into Suricata rules, for more
+ complex options requirements where order matters consider using I(rule_strings).
+ - A dictionary mapping Suricata RuleOptions names to a list of values.
+ - The examples section contains some examples of using rule_options.
+ - For more information read the AWS documentation
+ U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-limitations-caveats.html)
+ and the Suricata documentation
+ U(https://suricata.readthedocs.io/en/suricata-6.0.0/rules/intro.html).
+ type: dict
+ required: false
+ wait:
+ description:
+ - Whether to wait for the firewall rule group to reach the
+ C(ACTIVE) or C(DELETED) state before the module returns.
+ type: bool
+ required: false
+ default: true
+ wait_timeout:
+ description:
+ - Maximum time, in seconds, to wait for the firewall rule group
+ to reach the expected state.
+ - Defaults to 600 seconds.
+ type: int
+ required: false
+
+
+author:
+ - Mark Chappell (@tremble)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+ - amazon.aws.tags
+'''
+
+EXAMPLES = '''
+# Create a rule group
+- name: Create a minimal AWS Network Firewall Rule Group
+ community.aws.networkfirewall_rule_group:
+ name: 'MinimalGroup'
+ type: 'stateful'
+ capacity: 200
+ rule_strings:
+ - 'pass tcp any any -> any any (sid:1000001;)'
+
+# Create an example rule group using rule_list
+- name: Create 5-tuple Rule List based rule group
+ community.aws.networkfirewall_rule_group:
+ name: 'ExampleGroup'
+ type: 'stateful'
+ description: 'My description'
+ rule_order: default
+ capacity: 100
+ rule_list:
+ - sid: 1
+ direction: forward
+ action: pass
+ protocol: IP
+ source: any
+ source_port: any
+ destination: any
+ destination_port: any
+
+# Create an example rule group using rule_list
+- name: Create 5-tuple Rule List based rule group
+ community.aws.networkfirewall_rule_group:
+ name: 'ExampleGroup'
+ type: 'stateful'
+ description: 'My description'
+ ip_variables:
+ SOURCE_IPS: ['203.0.113.0/24', '198.51.100.42']
+ DESTINATION_IPS: ['192.0.2.0/24', '198.51.100.48']
+ port_variables:
+ HTTP_PORTS: [80, 8080]
+ rule_order: default
+ capacity: 100
+ rule_list:
+ # Allow 'Destination Unreachable' traffic
+ - sid: 1
+ action: pass
+ protocol: icmp
+ source: any
+ source_port: any
+ destination: any
+ destination_port: any
+ rule_options:
+ itype: 3
+ - sid: 2
+ action: drop
+ protocol: tcp
+ source: "$SOURCE_IPS"
+ source_port: any
+ destination: "$DESTINATION_IPS"
+ destination_port: "$HTTP_PORTS"
+ rule_options:
+ urilen: ["20<>40"]
+ # Where only a keyword is needed, add the keword, but no value
+ http_uri:
+ # Settings where Suricata expects raw strings (like the content
+ # keyword) will need to have the double-quotes explicitly escaped and
+ # passed because there's no practical way to distinguish between them
+ # and flags.
+ content: '"index.php"'
+
+# Create an example rule group using Suricata rule strings
+- name: Create Suricata rule string based rule group
+ community.aws.networkfirewall_rule_group:
+ name: 'ExampleSuricata'
+ type: 'stateful'
+ description: 'My description'
+ capacity: 200
+ ip_variables:
+ EXAMPLE_IP: ['203.0.113.0/24', '198.51.100.42']
+ ANOTHER_EXAMPLE: ['192.0.2.0/24', '198.51.100.48']
+ port_variables:
+ EXAMPLE_PORT: [443, 22]
+ rule_strings:
+ - 'pass tcp any any -> $EXAMPLE_IP $EXAMPLE_PORT (sid:1000001;)'
+ - 'pass udp any any -> $ANOTHER_EXAMPLE any (sid:1000002;)'
+
+# Create an example Domain List based rule group
+- name: Create Domain List based rule group
+ community.aws.networkfirewall_rule_group:
+ name: 'ExampleDomainList'
+ type: 'stateful'
+ description: 'My description'
+ capacity: 100
+ domain_list:
+ domain_names:
+ - 'example.com'
+ - '.example.net'
+ filter_https: True
+ filter_http: True
+ action: allow
+ source_ips: '192.0.2.0/24'
+
+# Update the description of a rule group
+- name: Update the description of a rule group
+ community.aws.networkfirewall_rule_group:
+ name: 'MinimalGroup'
+ type: 'stateful'
+ description: 'Another description'
+
+# Update IP Variables for a rule group
+- name: Update IP Variables
+ community.aws.networkfirewall_rule_group:
+ name: 'ExampleGroup'
+ type: 'stateful'
+ ip_variables:
+ EXAMPLE_IP: ['192.0.2.0/24', '203.0.113.0/24', '198.51.100.42']
+ purge_ip_variables: false
+
+# Delete a rule group
+- name: Delete a rule group
+ community.aws.networkfirewall_rule_group:
+ name: 'MinimalGroup'
+ type: 'stateful'
+ state: absent
+
+'''
+
+RETURN = '''
+rule_group:
+ description: Details of the rules in the rule group
+ type: dict
+ returned: success
+ contains:
+ rule_group:
+ description: Details of the rules in the rule group
+ type: dict
+ returned: success
+ contains:
+ rule_variables:
+ description: Settings that are available for use in the rules in the rule group.
+ returned: When rule variables are attached to the rule group.
+ type: complex
+ contains:
+ ip_sets:
+ description: A dictionary mapping variable names to IP addresses in CIDR format.
+ returned: success
+ type: dict
+ example: ['192.0.2.0/24']
+ port_sets:
+ description: A dictionary mapping variable names to ports
+ returned: success
+ type: dict
+ example: ['42']
+ stateful_rule_options:
+ description: Additional options governing how Network Firewall handles stateful rules.
+ returned: When the rule group is either "rules string" or "rules list" based.
+ type: dict
+ contains:
+ rule_order:
+ description: The order in which rules will be evaluated.
+ returned: success
+ type: str
+ example: 'DEFAULT_ACTION_ORDER'
+ rules_source:
+ description: Inspection criteria used for a 5-tuple based rule group.
+ returned: success
+ type: dict
+ contains:
+ stateful_rules:
+ description: A list of dictionaries describing the rules that the rule group is comprised of.
+ returned: When the rule group is "rules list" based.
+ type: list
+ elements: dict
+ contains:
+ action:
+ description: What action to perform when a flow matches the rule criteria.
+ returned: success
+ type: str
+ example: 'PASS'
+ header:
+ description: A description of the criteria used for the rule.
+ returned: success
+ type: dict
+ contains:
+ protocol:
+ description: The protocol to inspect for.
+ returned: success
+ type: str
+ example: 'IP'
+ source:
+ description: The source address or range of addresses to inspect for.
+ returned: success
+ type: str
+ example: '203.0.113.98'
+ source_port:
+ description: The source port to inspect for.
+ returned: success
+ type: str
+ example: '42'
+ destination:
+ description: The destination address or range of addresses to inspect for.
+ returned: success
+ type: str
+ example: '198.51.100.0/24'
+ destination_port:
+ description: The destination port to inspect for.
+ returned: success
+ type: str
+ example: '6666:6667'
+ direction:
+ description: The direction of traffic flow to inspect.
+ returned: success
+ type: str
+ example: 'FORWARD'
+ rule_options:
+ description: Additional Suricata RuleOptions settings for the rule.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ keyword:
+ description: The keyword for the setting.
+ returned: success
+ type: str
+ example: 'sid:1'
+ settings:
+ description: A list of values passed to the setting.
+ returned: When values are available
+ type: list
+ elements: str
+ rules_string:
+ description: A string describing the rules that the rule group is comprised of.
+ returned: When the rule group is "rules string" based.
+ type: str
+ rules_source_list:
+ description: A description of the criteria for a domain list rule group.
+ returned: When the rule group is "domain list" based.
+ type: dict
+ contains:
+ targets:
+ description: A list of domain names to be inspected for.
+ returned: success
+ type: list
+ elements: str
+ example: ['abc.example.com', '.example.net']
+ target_types:
+ description: The protocols to be inspected by the rule group.
+ returned: success
+ type: list
+ elements: str
+ example: ['TLS_SNI', 'HTTP_HOST']
+ generated_rules_type:
+ description: Whether the rule group allows or denies access to the domains in the list.
+ returned: success
+ type: str
+ example: 'ALLOWLIST'
+ stateless_rules_and_custom_actions:
+ description: A description of the criteria for a stateless rule group.
+ returned: When the rule group is a stateless rule group.
+ type: dict
+ contains:
+ stateless_rules:
+ description: A list of stateless rules for use in a stateless rule group.
+ type: list
+ elements: dict
+ contains:
+ rule_definition:
+ description: Describes the stateless 5-tuple inspection criteria and actions for the rule.
+ returned: success
+ type: dict
+ contains:
+ match_attributes:
+ description: Describes the stateless 5-tuple inspection criteria for the rule.
+ returned: success
+ type: dict
+ contains:
+ sources:
+ description: The source IP addresses and address ranges to inspect for.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ address_definition:
+ description: An IP address or a block of IP addresses in CIDR notation.
+ returned: success
+ type: str
+ example: '192.0.2.3'
+ destinations:
+ description: The destination IP addresses and address ranges to inspect for.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ address_definition:
+ description: An IP address or a block of IP addresses in CIDR notation.
+ returned: success
+ type: str
+ example: '192.0.2.3'
+ source_ports:
+ description: The source port ranges to inspect for.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ from_port:
+ description: The lower limit of the port range.
+ returned: success
+ type: int
+ to_port:
+ description: The upper limit of the port range.
+ returned: success
+ type: int
+ destination_ports:
+ description: The destination port ranges to inspect for.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ from_port:
+ description: The lower limit of the port range.
+ returned: success
+ type: int
+ to_port:
+ description: The upper limit of the port range.
+ returned: success
+ type: int
+ protocols:
+ description: The IANA protocol numbers of the protocols to inspect for.
+ returned: success
+ type: list
+ elements: int
+ example: [6]
+ tcp_flags:
+ description: The TCP flags and masks to inspect for.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ flags:
+ description: Used with masks to define the TCP flags that flows are inspected for.
+ returned: success
+ type: list
+ elements: str
+ masks:
+ description: The set of flags considered during inspection.
+ returned: success
+ type: list
+ elements: str
+ actions:
+ description: The actions to take when a flow matches the rule.
+ returned: success
+ type: list
+ elements: str
+ example: ['aws:pass', 'CustomActionName']
+ priority:
+ description: Indicates the order in which to run this rule relative to all of the rules that are defined for a stateless rule group.
+ returned: success
+ type: int
+ custom_actions:
+ description: A list of individual custom action definitions that are available for use in stateless rules.
+ type: list
+ elements: dict
+ contains:
+ action_name:
+ description: The name for the custom action.
+ returned: success
+ type: str
+ action_definition:
+ description: The custom action associated with the action name.
+ returned: success
+ type: dict
+ contains:
+ publish_metric_action:
+ description: The description of an action which publishes to CloudWatch.
+ returned: When the action publishes to CloudWatch.
+ type: dict
+ contains:
+ dimensions:
+ description: The value to use in an Amazon CloudWatch custom metric dimension.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ value:
+ description: The value to use in the custom metric dimension.
+ returned: success
+ type: str
+ rule_group_metadata:
+ description: Details of the rules in the rule group
+ type: dict
+ returned: success
+ contains:
+ capacity:
+ description: The maximum operating resources that this rule group can use.
+ type: int
+ returned: success
+ consumed_capacity:
+ description: The number of capacity units currently consumed by the rule group rules.
+ type: int
+ returned: success
+ description:
+ description: A description of the rule group.
+ type: str
+ returned: success
+ number_of_associations:
+ description: The number of firewall policies that use this rule group.
+ type: int
+ returned: success
+ rule_group_arn:
+ description: The ARN for the rule group
+ type: int
+ returned: success
+ example: 'arn:aws:network-firewall:us-east-1:123456789012:stateful-rulegroup/ExampleGroup'
+ rule_group_id:
+ description: A unique identifier for the rule group.
+ type: int
+ returned: success
+ example: '12345678-abcd-1234-abcd-123456789abc'
+ rule_group_name:
+ description: The name of the rule group.
+ type: str
+ returned: success
+ rule_group_status:
+ description: The current status of a rule group.
+ type: str
+ returned: success
+ example: 'DELETING'
+ tags:
+ description: A dictionary representing the tags associated with the rule group.
+ type: dict
+ returned: success
+ type:
+ description: Whether the rule group is stateless or stateful.
+ type: str
+ returned: success
+ example: 'STATEFUL'
+'''
+
+
+from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallRuleManager
+
+
+def main():
+
+ domain_list_spec = dict(
+ domain_names=dict(type='list', elements='str', required=True),
+ filter_http=dict(type='bool', required=False, default=False),
+ filter_https=dict(type='bool', required=False, default=False),
+ action=dict(type='str', required=True, choices=['allow', 'deny']),
+ source_ips=dict(type='list', elements='str', required=False),
+ )
+
+ rule_list_spec = dict(
+ action=dict(type='str', required=True, choices=['pass', 'drop', 'alert']),
+ protocol=dict(type='str', required=True),
+ source=dict(type='str', required=True),
+ source_port=dict(type='str', required=True),
+ direction=dict(type='str', required=False, default='forward', choices=['forward', 'any']),
+ destination=dict(type='str', required=True),
+ destination_port=dict(type='str', required=True),
+ sid=dict(type='int', required=True),
+ rule_options=dict(type='dict', required=False),
+ )
+
+ argument_spec = dict(
+ arn=dict(type='str', required=False),
+ name=dict(type='str', required=False),
+ rule_type=dict(type='str', required=False, aliases=['type'], choices=['stateful']),
+ # rule_type=dict(type='str', required=True, aliases=['type'], choices=['stateless', 'stateful']),
+ state=dict(type='str', required=False, choices=['present', 'absent'], default='present'),
+ capacity=dict(type='int', required=False),
+ rule_order=dict(type='str', required=False, aliases=['stateful_rule_order'], choices=['default', 'strict']),
+ description=dict(type='str', required=False),
+ ip_variables=dict(type='dict', required=False, aliases=['ip_set_variables']),
+ purge_ip_variables=dict(type='bool', required=False, aliases=['purge_ip_set_variables'], default=True),
+ port_variables=dict(type='dict', required=False, aliases=['port_set_variables']),
+ purge_port_variables=dict(type='bool', required=False, aliases=['purge_port_set_variables'], default=True),
+ rule_strings=dict(type='list', elements='str', required=False),
+ domain_list=dict(type='dict', options=domain_list_spec, required=False),
+ rule_list=dict(type='list', elements='dict', aliases=['stateful_rule_list'], options=rule_list_spec, required=False),
+ tags=dict(type='dict', required=False, aliases=['resource_tags']),
+ purge_tags=dict(type='bool', required=False, default=True),
+ wait=dict(type='bool', required=False, default=True),
+ wait_timeout=dict(type='int', required=False),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ('name', 'arn'),
+ ('rule_strings', 'domain_list', 'rule_list'),
+ ('domain_list', 'ip_variables'),
+ ],
+ required_together=[
+ ('name', 'rule_type'),
+ ],
+ required_one_of=[
+ ('name', 'arn'),
+ ],
+ )
+
+ module.require_botocore_at_least('1.19.20')
+
+ state = module.params.get('state')
+ name = module.params.get('name')
+ arn = module.params.get('arn')
+ rule_type = module.params.get('rule_type')
+
+ if rule_type == 'stateless':
+ if module.params.get('rule_order'):
+ module.fail_json('rule_order can not be set for stateless rule groups')
+ if module.params.get('rule_strings'):
+ module.fail_json('rule_strings can only be used for stateful rule groups')
+ if module.params.get('rule_list'):
+ module.fail_json('rule_list can only be used for stateful rule groups')
+ if module.params.get('domain_list'):
+ module.fail_json('domain_list can only be used for stateful rule groups')
+
+ if module.params.get('rule_order'):
+ module.require_botocore_at_least('1.23.23', reason='to set the rule order')
+
+ manager = NetworkFirewallRuleManager(module, arn=arn, name=name, rule_type=rule_type)
+ manager.set_wait(module.params.get('wait', None))
+ manager.set_wait_timeout(module.params.get('wait_timeout', None))
+
+ if state == 'absent':
+ manager.delete()
+ else:
+ manager.set_description(module.params.get('description'))
+ manager.set_capacity(module.params.get('capacity'))
+ manager.set_rule_order(module.params.get('rule_order'))
+ manager.set_ip_variables(module.params.get('ip_variables'), module.params.get('purge_ip_variables'))
+ manager.set_port_variables(module.params.get('port_variables'), module.params.get('purge_port_variables'))
+ manager.set_rule_string(module.params.get('rule_strings'))
+ manager.set_domain_list(module.params.get('domain_list'))
+ manager.set_rule_list(module.params.get('rule_list'))
+ manager.set_tags(module.params.get('tags'), module.params.get('purge_tags'))
+
+ manager.flush_changes()
+
+ results = dict(
+ changed=manager.changed,
+ rule_group=manager.updated_resource,
+ )
+ if manager.changed:
+ diff = dict(
+ before=manager.original_resource,
+ after=manager.updated_resource,
+ )
+ results['diff'] = diff
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group_info.py b/ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group_info.py
new file mode 100644
index 000000000..a9cec3778
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group_info.py
@@ -0,0 +1,449 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: networkfirewall_rule_group_info
+short_description: describe AWS Network Firewall rule groups
+version_added: 4.0.0
+description:
+ - A module for describing AWS Network Firewall rule groups.
+options:
+ arn:
+ description:
+ - The ARN of the Network Firewall rule group.
+ - At time of writing AWS does not support describing Managed Rules.
+ required: false
+ type: str
+ name:
+ description:
+ - The name of the Network Firewall rule group.
+ required: false
+ type: str
+ rule_type:
+ description:
+ - Indicates whether the rule group is stateless or stateful.
+ - Required if I(name) is provided.
+ required: false
+ aliases: ['type' ]
+ choices: ['stateful', 'stateless']
+ type: str
+ scope:
+ description:
+ - The scope of the request.
+ - When I(scope='account') returns a description of all rule groups in the account.
+ - When I(scope='managed') returns a list of available managed rule group arns.
+ - By default searches only at the account scope.
+ - I(scope='managed') requires botocore>=1.23.23.
+ required: false
+ choices: ['managed', 'account']
+ type: str
+
+author: Mark Chappell (@tremble)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+
+# Describe all Rule Groups in an account (excludes managed groups)
+- community.aws.networkfirewall_rule_group_info: {}
+
+# List the available Managed Rule groups (AWS doesn't support describing the
+# groups)
+- community.aws.networkfirewall_rule_group_info:
+ scope: managed
+
+# Describe a Rule Group by ARN
+- community.aws.networkfirewall_rule_group_info:
+ arn: arn:aws:network-firewall:us-east-1:123456789012:stateful-rulegroup/ExampleRuleGroup
+
+# Describe a Rule Group by name
+- community.aws.networkfirewall_rule_group_info:
+ name: ExampleRuleGroup
+ type: stateful
+
+'''
+
+RETURN = '''
+rule_list:
+ description: A list of ARNs of the matching rule groups.
+ type: list
+ elements: str
+ returned: When a rule name isn't specified
+
+rule_groups:
+ description: The details of the rule groups
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ rule_group:
+ description: Details of the rules in the rule group
+ type: dict
+ returned: success
+ contains:
+ rule_variables:
+ description: Settings that are available for use in the rules in the rule group.
+ returned: When rule variables are attached to the rule group.
+ type: complex
+ contains:
+ ip_sets:
+ description: A dictionary mapping variable names to IP addresses in CIDR format.
+ returned: success
+ type: dict
+ example: ['192.0.2.0/24']
+ port_sets:
+ description: A dictionary mapping variable names to ports
+ returned: success
+ type: dict
+ example: ['42']
+ stateful_rule_options:
+ description: Additional options governing how Network Firewall handles stateful rules.
+ returned: When the rule group is either "rules string" or "rules list" based.
+ type: dict
+ contains:
+ rule_order:
+ description: The order in which rules will be evaluated.
+ returned: success
+ type: str
+ example: 'DEFAULT_ACTION_ORDER'
+ rules_source:
+ description: DEFAULT_ACTION_ORDER
+ returned: success
+ type: dict
+ contains:
+ stateful_rules:
+ description: A list of dictionaries describing the rules that the rule group is comprised of.
+ returned: When the rule group is "rules list" based.
+ type: list
+ elements: dict
+ contains:
+ action:
+ description: What action to perform when a flow matches the rule criteria.
+ returned: success
+ type: str
+ example: 'PASS'
+ header:
+ description: A description of the criteria used for the rule.
+ returned: success
+ type: dict
+ contains:
+ protocol:
+ description: The protocol to inspect for.
+ returned: success
+ type: str
+ example: 'IP'
+ source:
+ description: The source address or range of addresses to inspect for.
+ returned: success
+ type: str
+ example: '203.0.113.98'
+ source_port:
+ description: The source port to inspect for.
+ returned: success
+ type: str
+ example: '42'
+ destination:
+ description: The destination address or range of addresses to inspect for.
+ returned: success
+ type: str
+ example: '198.51.100.0/24'
+ destination_port:
+ description: The destination port to inspect for.
+ returned: success
+ type: str
+ example: '6666:6667'
+ direction:
+ description: The direction of traffic flow to inspect.
+ returned: success
+ type: str
+ example: 'FORWARD'
+ rule_options:
+ description: Additional Suricata RuleOptions settings for the rule.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ keyword:
+ description: The keyword for the setting.
+ returned: success
+ type: str
+ example: 'sid:1'
+ settings:
+ description: A list of values passed to the setting.
+ returned: When values are available
+ type: list
+ elements: str
+ rules_string:
+ description: A string describing the rules that the rule group is comprised of.
+ returned: When the rule group is "rules string" based.
+ type: str
+ rules_source_list:
+ description: A description of the criteria for a domain list rule group.
+ returned: When the rule group is "domain list" based.
+ type: dict
+ contains:
+ targets:
+ description: A list of domain names to be inspected for.
+ returned: success
+ type: list
+ elements: str
+ example: ['abc.example.com', '.example.net']
+ target_types:
+ description: The protocols to be inspected by the rule group.
+ returned: success
+ type: list
+ elements: str
+ example: ['TLS_SNI', 'HTTP_HOST']
+ generated_rules_type:
+ description: Whether the rule group allows or denies access to the domains in the list.
+ returned: success
+ type: str
+ example: 'ALLOWLIST'
+ stateless_rules_and_custom_actions:
+ description: A description of the criteria for a stateless rule group.
+ returned: When the rule group is a stateless rule group.
+ type: dict
+ contains:
+ stateless_rules:
+ description: A list of stateless rules for use in a stateless rule group.
+ type: list
+ elements: dict
+ contains:
+ rule_definition:
+ description: Describes the stateless 5-tuple inspection criteria and actions for the rule.
+ returned: success
+ type: dict
+ contains:
+ match_attributes:
+ description: Describes the stateless 5-tuple inspection criteria for the rule.
+ returned: success
+ type: dict
+ contains:
+ sources:
+ description: The source IP addresses and address ranges to inspect for.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ address_definition:
+ description: An IP address or a block of IP addresses in CIDR notation.
+ returned: success
+ type: str
+ example: '192.0.2.3'
+ destinations:
+ description: The destination IP addresses and address ranges to inspect for.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ address_definition:
+ description: An IP address or a block of IP addresses in CIDR notation.
+ returned: success
+ type: str
+ example: '192.0.2.3'
+ source_ports:
+ description: The source port ranges to inspect for.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ from_port:
+ description: The lower limit of the port range.
+ returned: success
+ type: int
+ to_port:
+ description: The upper limit of the port range.
+ returned: success
+ type: int
+ destination_ports:
+ description: The destination port ranges to inspect for.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ from_port:
+ description: The lower limit of the port range.
+ returned: success
+ type: int
+ to_port:
+ description: The upper limit of the port range.
+ returned: success
+ type: int
+ protocols:
+ description: The IANA protocol numbers of the protocols to inspect for.
+ returned: success
+ type: list
+ elements: int
+ example: [6]
+ tcp_flags:
+ description: The TCP flags and masks to inspect for.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ flags:
+ description: Used with masks to define the TCP flags that flows are inspected for.
+ returned: success
+ type: list
+ elements: str
+ masks:
+ description: The set of flags considered during inspection.
+ returned: success
+ type: list
+ elements: str
+ actions:
+ description: The actions to take when a flow matches the rule.
+ returned: success
+ type: list
+ elements: str
+ example: ['aws:pass', 'CustomActionName']
+ priority:
+ description: Indicates the order in which to run this rule relative to all of the rules that are defined for a stateless rule group.
+ returned: success
+ type: int
+ custom_actions:
+ description: A list of individual custom action definitions that are available for use in stateless rules.
+ type: list
+ elements: dict
+ contains:
+ action_name:
+ description: The name for the custom action.
+ returned: success
+ type: str
+ action_definition:
+ description: The custom action associated with the action name.
+ returned: success
+ type: dict
+ contains:
+ publish_metric_action:
+ description: The description of an action which publishes to CloudWatch.
+ returned: When the action publishes to CloudWatch.
+ type: dict
+ contains:
+ dimensions:
+ description: The value to use in an Amazon CloudWatch custom metric dimension.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ value:
+ description: The value to use in the custom metric dimension.
+ returned: success
+ type: str
+ rule_group_metadata:
+ description: Details of the rules in the rule group
+ type: dict
+ returned: success
+ contains:
+ capacity:
+ description: The maximum operating resources that this rule group can use.
+ type: int
+ returned: success
+ consumed_capacity:
+ description: The number of capacity units currently consumed by the rule group rules.
+ type: int
+ returned: success
+ description:
+ description: A description of the rule group.
+ type: str
+ returned: success
+ number_of_associations:
+ description: The number of firewall policies that use this rule group.
+ type: int
+ returned: success
+ rule_group_arn:
+ description: The ARN for the rule group
+ type: int
+ returned: success
+ example: 'arn:aws:network-firewall:us-east-1:123456789012:stateful-rulegroup/ExampleGroup'
+ rule_group_id:
+ description: A unique identifier for the rule group.
+ type: int
+ returned: success
+ example: '12345678-abcd-1234-abcd-123456789abc'
+ rule_group_name:
+ description: The name of the rule group.
+ type: str
+ returned: success
+ rule_group_status:
+ description: The current status of a rule group.
+ type: str
+ returned: success
+ example: 'DELETING'
+ tags:
+ description: A dictionary representing the tags associated with the rule group.
+ type: dict
+ returned: success
+ type:
+ description: Whether the rule group is stateless or stateful.
+ type: str
+ returned: success
+ example: 'STATEFUL'
+'''
+
+
+from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallRuleManager
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(type='str', required=False),
+ rule_type=dict(type='str', required=False, aliases=['type'], choices=['stateless', 'stateful']),
+ arn=dict(type='str', required=False),
+ scope=dict(type='str', required=False, choices=['managed', 'account']),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ('arn', 'name',),
+ ('arn', 'rule_type'),
+ ],
+ required_together=[
+ ('name', 'rule_type'),
+ ]
+ )
+
+ module.require_botocore_at_least('1.19.20')
+
+ arn = module.params.get('arn')
+ name = module.params.get('name')
+ rule_type = module.params.get('rule_type')
+ scope = module.params.get('scope')
+
+ if module.params.get('scope') == 'managed':
+ module.require_botocore_at_least('1.23.23', reason='to list managed rules')
+
+ manager = NetworkFirewallRuleManager(module, name=name, rule_type=rule_type)
+
+ results = dict(changed=False)
+
+ if name or arn:
+ rule = manager.get_rule_group(name=name, rule_type=rule_type, arn=arn)
+ if rule:
+ results['rule_groups'] = [rule]
+ else:
+ results['rule_groups'] = []
+ else:
+ rule_list = manager.list(scope=scope)
+ results['rule_list'] = rule_list
+ if scope != 'managed':
+ rules = [manager.get_rule_group(arn=r) for r in rule_list]
+ results['rule_groups'] = rules
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/opensearch.py b/ansible_collections/community/aws/plugins/modules/opensearch.py
new file mode 100644
index 000000000..7ed8c0722
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/opensearch.py
@@ -0,0 +1,1501 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: opensearch
+short_description: Creates OpenSearch or ElasticSearch domain
+description:
+ - Creates or modify a Amazon OpenSearch Service domain.
+version_added: 4.0.0
+author: "Sebastien Rosset (@sebastien-rosset)"
+options:
+ state:
+ description:
+ - Creates or modifies an existing OpenSearch domain.
+ - Deletes an OpenSearch domain.
+ required: false
+ type: str
+ choices: ['present', 'absent']
+ default: present
+ domain_name:
+ description:
+ - The name of the Amazon OpenSearch/ElasticSearch Service domain.
+ - Domain names are unique across the domains owned by an account within an AWS region.
+ required: true
+ type: str
+ engine_version:
+ description:
+ ->
+ The engine version to use. For example, 'ElasticSearch_7.10' or 'OpenSearch_1.1'.
+ ->
+ If the currently running version is not equal to I(engine_version),
+ a cluster upgrade is triggered.
+ ->
+ It may not be possible to upgrade directly from the currently running version
+ to I(engine_version). In that case, the upgrade is performed incrementally by
+ upgrading to the highest compatible version, then repeat the operation until
+ the cluster is running at the target version.
+ ->
+ The upgrade operation fails if there is no path from current version to I(engine_version).
+ ->
+ See OpenSearch documentation for upgrade compatibility.
+ required: false
+ type: str
+ allow_intermediate_upgrades:
+ description:
+ - >
+ If true, allow OpenSearch domain to be upgraded through one or more intermediate versions.
+ - >
+ If false, do not allow OpenSearch domain to be upgraded through intermediate versions.
+ The upgrade operation fails if it's not possible to ugrade to I(engine_version) directly.
+ required: false
+ type: bool
+ default: true
+ cluster_config:
+ description:
+ - Parameters for the cluster configuration of an OpenSearch Service domain.
+ type: dict
+ suboptions:
+ instance_type:
+ description:
+ - Type of the instances to use for the domain.
+ required: false
+ type: str
+ instance_count:
+ description:
+ - Number of instances for the domain.
+ required: false
+ type: int
+ zone_awareness:
+ description:
+ - A boolean value to indicate whether zone awareness is enabled.
+ required: false
+ type: bool
+ availability_zone_count:
+ description:
+ - >
+ An integer value to indicate the number of availability zones for a domain when zone awareness is enabled.
+ This should be equal to number of subnets if VPC endpoints is enabled.
+ required: false
+ type: int
+ dedicated_master:
+ description:
+ - A boolean value to indicate whether a dedicated master node is enabled.
+ required: false
+ type: bool
+ dedicated_master_instance_type:
+ description:
+ - The instance type for a dedicated master node.
+ required: false
+ type: str
+ dedicated_master_instance_count:
+ description:
+ - Total number of dedicated master nodes, active and on standby, for the domain.
+ required: false
+ type: int
+ warm_enabled:
+ description:
+ - True to enable UltraWarm storage.
+ required: false
+ type: bool
+ warm_type:
+ description:
+ - The instance type for the OpenSearch domain's warm nodes.
+ required: false
+ type: str
+ warm_count:
+ description:
+ - The number of UltraWarm nodes in the domain.
+ required: false
+ type: int
+ cold_storage_options:
+ description:
+ - Specifies the ColdStorageOptions config for a Domain.
+ type: dict
+ suboptions:
+ enabled:
+ description:
+ - True to enable cold storage. Supported on Elasticsearch 7.9 or above.
+ required: false
+ type: bool
+ ebs_options:
+ description:
+ - Parameters to configure EBS-based storage for an OpenSearch Service domain.
+ type: dict
+ suboptions:
+ ebs_enabled:
+ description:
+ - Specifies whether EBS-based storage is enabled.
+ required: false
+ type: bool
+ volume_type:
+ description:
+ - Specifies the volume type for EBS-based storage. "standard"|"gp2"|"io1"
+ required: false
+ type: str
+ volume_size:
+ description:
+ - Integer to specify the size of an EBS volume.
+ required: false
+ type: int
+ iops:
+ description:
+ - The IOPD for a Provisioned IOPS EBS volume (SSD).
+ required: false
+ type: int
+ vpc_options:
+ description:
+ - Options to specify the subnets and security groups for a VPC endpoint.
+ type: dict
+ suboptions:
+ subnets:
+ description:
+ - Specifies the subnet ids for VPC endpoint.
+ required: false
+ type: list
+ elements: str
+ security_groups:
+ description:
+ - Specifies the security group ids for VPC endpoint.
+ required: false
+ type: list
+ elements: str
+ snapshot_options:
+ description:
+ - Option to set time, in UTC format, of the daily automated snapshot.
+ type: dict
+ suboptions:
+ automated_snapshot_start_hour:
+ description:
+ - >
+ Integer value from 0 to 23 specifying when the service takes a daily automated snapshot
+ of the specified Elasticsearch domain.
+ required: false
+ type: int
+ access_policies:
+ description:
+ - IAM access policy as a JSON-formatted string.
+ required: false
+ type: dict
+ encryption_at_rest_options:
+ description:
+ - Parameters to enable encryption at rest.
+ type: dict
+ suboptions:
+ enabled:
+ description:
+ - Should data be encrypted while at rest.
+ required: false
+ type: bool
+ kms_key_id:
+ description:
+ - If encryption at rest enabled, this identifies the encryption key to use.
+ - The value should be a KMS key ARN. It can also be the KMS key id.
+ required: false
+ type: str
+ node_to_node_encryption_options:
+ description:
+ - Node-to-node encryption options.
+ type: dict
+ suboptions:
+ enabled:
+ description:
+ - True to enable node-to-node encryption.
+ required: false
+ type: bool
+ cognito_options:
+ description:
+ - Parameters to configure OpenSearch Service to use Amazon Cognito authentication for OpenSearch Dashboards.
+ type: dict
+ suboptions:
+ enabled:
+ description:
+ - The option to enable Cognito for OpenSearch Dashboards authentication.
+ required: false
+ type: bool
+ user_pool_id:
+ description:
+ - The Cognito user pool ID for OpenSearch Dashboards authentication.
+ required: false
+ type: str
+ identity_pool_id:
+ description:
+ - The Cognito identity pool ID for OpenSearch Dashboards authentication.
+ required: false
+ type: str
+ role_arn:
+ description:
+ - The role ARN that provides OpenSearch permissions for accessing Cognito resources.
+ required: false
+ type: str
+ domain_endpoint_options:
+ description:
+ - Options to specify configuration that will be applied to the domain endpoint.
+ type: dict
+ suboptions:
+ enforce_https:
+ description:
+ - Whether only HTTPS endpoint should be enabled for the domain.
+ type: bool
+ tls_security_policy:
+ description:
+ - Specify the TLS security policy to apply to the HTTPS endpoint of the domain.
+ type: str
+ custom_endpoint_enabled:
+ description:
+ - Whether to enable a custom endpoint for the domain.
+ type: bool
+ custom_endpoint:
+ description:
+ - The fully qualified domain for your custom endpoint.
+ type: str
+ custom_endpoint_certificate_arn:
+ description:
+ - The ACM certificate ARN for your custom endpoint.
+ type: str
+ advanced_security_options:
+ description:
+ - Specifies advanced security options.
+ type: dict
+ suboptions:
+ enabled:
+ description:
+ - True if advanced security is enabled.
+ - You must enable node-to-node encryption to use advanced security options.
+ type: bool
+ internal_user_database_enabled:
+ description:
+ - True if the internal user database is enabled.
+ type: bool
+ master_user_options:
+ description:
+ - Credentials for the master user, username and password, ARN, or both.
+ type: dict
+ suboptions:
+ master_user_arn:
+ description:
+ - ARN for the master user (if IAM is enabled).
+ type: str
+ master_user_name:
+ description:
+ - The username of the master user, which is stored in the Amazon OpenSearch Service domain internal database.
+ type: str
+ master_user_password:
+ description:
+ - The password of the master user, which is stored in the Amazon OpenSearch Service domain internal database.
+ type: str
+ saml_options:
+ description:
+ - The SAML application configuration for the domain.
+ type: dict
+ suboptions:
+ enabled:
+ description:
+ - True if SAML is enabled.
+ - To use SAML authentication, you must enable fine-grained access control.
+ - You can only enable SAML authentication for OpenSearch Dashboards on existing domains,
+ not during the creation of new ones.
+ - Domains only support one Dashboards authentication method at a time.
+ If you have Amazon Cognito authentication for OpenSearch Dashboards enabled,
+ you must disable it before you can enable SAML.
+ type: bool
+ idp:
+ description:
+ - The SAML Identity Provider's information.
+ type: dict
+ suboptions:
+ metadata_content:
+ description:
+ - The metadata of the SAML application in XML format.
+ type: str
+ entity_id:
+ description:
+ - The unique entity ID of the application in SAML identity provider.
+ type: str
+ master_user_name:
+ description:
+ - The SAML master username, which is stored in the Amazon OpenSearch Service domain internal database.
+ type: str
+ master_backend_role:
+ description:
+ - The backend role that the SAML master user is mapped to.
+ type: str
+ subject_key:
+ description:
+ - Element of the SAML assertion to use for username. Default is NameID.
+ type: str
+ roles_key:
+ description:
+ - Element of the SAML assertion to use for backend roles. Default is roles.
+ type: str
+ session_timeout_minutes:
+ description:
+ - The duration, in minutes, after which a user session becomes inactive. Acceptable values are between 1 and 1440, and the default value is 60.
+ type: int
+ auto_tune_options:
+ description:
+ - Specifies Auto-Tune options.
+ type: dict
+ suboptions:
+ desired_state:
+ description:
+ - The Auto-Tune desired state. Valid values are ENABLED and DISABLED.
+ type: str
+ choices: ['ENABLED', 'DISABLED']
+ maintenance_schedules:
+ description:
+ - A list of maintenance schedules.
+ type: list
+ elements: dict
+ suboptions:
+ start_at:
+ description:
+ - The timestamp at which the Auto-Tune maintenance schedule starts.
+ type: str
+ duration:
+ description:
+ - Specifies maintenance schedule duration, duration value and duration unit.
+ type: dict
+ suboptions:
+ value:
+ description:
+ - Integer to specify the value of a maintenance schedule duration.
+ type: int
+ unit:
+ description:
+ - The unit of a maintenance schedule duration. Valid value is HOURS.
+ choices: ['HOURS']
+ type: str
+ cron_expression_for_recurrence:
+ description:
+ - A cron expression for a recurring maintenance schedule.
+ type: str
+ wait:
+ description:
+ - Whether or not to wait for completion of OpenSearch creation, modification or deletion.
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds.
+ default: 300
+ type: int
+requirements:
+ - botocore >= 1.21.38
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+ - amazon.aws.tags
+"""
+
+EXAMPLES = """
+
+- name: Create OpenSearch domain for dev environment, no zone awareness, no dedicated masters
+ community.aws.opensearch:
+ domain_name: "dev-cluster"
+ engine_version: Elasticsearch_1.1
+ cluster_config:
+ instance_type: "t2.small.search"
+ instance_count: 2
+ zone_awareness: false
+ dedicated_master: false
+ ebs_options:
+ ebs_enabled: true
+ volume_type: "gp2"
+ volume_size: 10
+ access_policies: "{{ lookup('file', 'policy.json') | from_json }}"
+
+- name: Create OpenSearch domain with dedicated masters
+ community.aws.opensearch:
+ domain_name: "my-domain"
+ engine_version: OpenSearch_1.1
+ cluster_config:
+ instance_type: "t2.small.search"
+ instance_count: 12
+ dedicated_master: true
+ zone_awareness: true
+ availability_zone_count: 2
+ dedicated_master_instance_type: "t2.small.search"
+ dedicated_master_instance_count: 3
+ warm_enabled: true
+ warm_type: "ultrawarm1.medium.search"
+ warm_count: 1
+ cold_storage_options:
+ enabled: false
+ ebs_options:
+ ebs_enabled: true
+ volume_type: "io1"
+ volume_size: 10
+ iops: 1000
+ vpc_options:
+ subnets:
+ - "subnet-e537d64a"
+ - "subnet-e537d64b"
+ security_groups:
+ - "sg-dd2f13cb"
+ - "sg-dd2f13cc"
+ snapshot_options:
+ automated_snapshot_start_hour: 13
+ access_policies: "{{ lookup('file', 'policy.json') | from_json }}"
+ encryption_at_rest_options:
+ enabled: false
+ node_to_node_encryption_options:
+ enabled: false
+ auto_tune_options:
+ enabled: true
+ maintenance_schedules:
+ - start_at: "2025-01-12"
+ duration:
+ value: 1
+ unit: "HOURS"
+ cron_expression_for_recurrence: "cron(0 12 * * ? *)"
+ - start_at: "2032-01-12"
+ duration:
+ value: 2
+ unit: "HOURS"
+ cron_expression_for_recurrence: "cron(0 12 * * ? *)"
+ tags:
+ Environment: Development
+ Application: Search
+ wait: true
+
+- name: Increase size of EBS volumes for existing cluster
+ community.aws.opensearch:
+ domain_name: "my-domain"
+ ebs_options:
+ volume_size: 5
+ wait: true
+
+- name: Increase instance count for existing cluster
+ community.aws.opensearch:
+ domain_name: "my-domain"
+ cluster_config:
+ instance_count: 40
+ wait: true
+
+"""
+
+from copy import deepcopy
+import datetime
+import json
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible.module_utils.six import string_types
+
+# import module snippets
+from ansible_collections.amazon.aws.plugins.module_utils.core import (
+ AnsibleAWSModule,
+ is_boto3_error_code,
+)
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (
+ AWSRetry,
+ boto3_tag_list_to_ansible_dict,
+ compare_policies,
+)
+from ansible_collections.community.aws.plugins.module_utils.opensearch import (
+ compare_domain_versions,
+ ensure_tags,
+ get_domain_status,
+ get_domain_config,
+ get_target_increment_version,
+ normalize_opensearch,
+ parse_version,
+ wait_for_domain_status,
+)
+
+
+def ensure_domain_absent(client, module):
+ domain_name = module.params.get("domain_name")
+ changed = False
+
+ domain = get_domain_status(client, module, domain_name)
+ if module.check_mode:
+ module.exit_json(
+ changed=True, msg="Would have deleted domain if not in check mode"
+ )
+ try:
+ client.delete_domain(DomainName=domain_name)
+ changed = True
+ except is_boto3_error_code("ResourceNotFoundException"):
+ # The resource does not exist, or it has already been deleted
+ return dict(changed=False)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="trying to delete domain")
+
+ # If we're not waiting for a delete to complete then we're all done
+ # so just return
+ if not domain or not module.params.get("wait"):
+ return dict(changed=changed)
+ try:
+ wait_for_domain_status(client, module, domain_name, "domain_deleted")
+ return dict(changed=changed)
+ except is_boto3_error_code("ResourceNotFoundException"):
+ return dict(changed=changed)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, "awaiting domain deletion")
+
+
+def upgrade_domain(client, module, source_version, target_engine_version):
+ domain_name = module.params.get("domain_name")
+ # Determine if it's possible to upgrade directly from source version
+ # to target version, or if it's necessary to upgrade through intermediate major versions.
+ next_version = target_engine_version
+ # When perform_check_only is true, indicates that an upgrade eligibility check needs
+ # to be performed. Does not actually perform the upgrade.
+ perform_check_only = False
+ if module.check_mode:
+ perform_check_only = True
+ current_version = source_version
+ while current_version != target_engine_version:
+ v = get_target_increment_version(client, module, domain_name, target_engine_version)
+ if v is None:
+ # There is no compatible version, according to the get_compatible_versions() API.
+ # The upgrade should fail, but try anyway.
+ next_version = target_engine_version
+ if next_version != target_engine_version:
+ # It's not possible to upgrade directly to the target version.
+ # Check the module parameters to determine if this is allowed or not.
+ if not module.params.get("allow_intermediate_upgrades"):
+ module.fail_json(msg="Cannot upgrade from {0} to version {1}. The highest compatible version is {2}".format(
+ source_version, target_engine_version, next_version))
+
+ parameters = {
+ "DomainName": domain_name,
+ "TargetVersion": next_version,
+ "PerformCheckOnly": perform_check_only,
+ }
+
+ if not module.check_mode:
+ # If background tasks are in progress, wait until they complete.
+ # This can take several hours depending on the cluster size and the type of background tasks
+ # (maybe an upgrade is already in progress).
+ # It's not possible to upgrade a domain that has background tasks are in progress,
+ # the call to client.upgrade_domain would fail.
+ wait_for_domain_status(client, module, domain_name, "domain_available")
+
+ try:
+ client.upgrade_domain(**parameters)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ # In check mode (=> PerformCheckOnly==True), a ValidationException may be
+ # raised if it's not possible to upgrade to the target version.
+ module.fail_json_aws(
+ e,
+ msg="Couldn't upgrade domain {0} from {1} to {2}".format(
+ domain_name, current_version, next_version
+ ),
+ )
+
+ if module.check_mode:
+ module.exit_json(
+ changed=True,
+ msg="Would have upgraded domain from {0} to {1} if not in check mode".format(
+ current_version, next_version
+ ),
+ )
+ current_version = next_version
+
+ if module.params.get("wait"):
+ wait_for_domain_status(client, module, domain_name, "domain_available")
+
+
+def set_cluster_config(
+ module, current_domain_config, desired_domain_config, change_set
+):
+ changed = False
+
+ cluster_config = desired_domain_config["ClusterConfig"]
+ cluster_opts = module.params.get("cluster_config")
+ if cluster_opts is not None:
+ if cluster_opts.get("instance_type") is not None:
+ cluster_config["InstanceType"] = cluster_opts.get("instance_type")
+ if cluster_opts.get("instance_count") is not None:
+ cluster_config["InstanceCount"] = cluster_opts.get("instance_count")
+ if cluster_opts.get("zone_awareness") is not None:
+ cluster_config["ZoneAwarenessEnabled"] = cluster_opts.get("zone_awareness")
+ if cluster_config["ZoneAwarenessEnabled"]:
+ if cluster_opts.get("availability_zone_count") is not None:
+ cluster_config["ZoneAwarenessConfig"] = {
+ "AvailabilityZoneCount": cluster_opts.get(
+ "availability_zone_count"
+ ),
+ }
+
+ if cluster_opts.get("dedicated_master") is not None:
+ cluster_config["DedicatedMasterEnabled"] = cluster_opts.get(
+ "dedicated_master"
+ )
+ if cluster_config["DedicatedMasterEnabled"]:
+ if cluster_opts.get("dedicated_master_instance_type") is not None:
+ cluster_config["DedicatedMasterType"] = cluster_opts.get(
+ "dedicated_master_instance_type"
+ )
+ if cluster_opts.get("dedicated_master_instance_count") is not None:
+ cluster_config["DedicatedMasterCount"] = cluster_opts.get(
+ "dedicated_master_instance_count"
+ )
+
+ if cluster_opts.get("warm_enabled") is not None:
+ cluster_config["WarmEnabled"] = cluster_opts.get("warm_enabled")
+ if cluster_config["WarmEnabled"]:
+ if cluster_opts.get("warm_type") is not None:
+ cluster_config["WarmType"] = cluster_opts.get("warm_type")
+ if cluster_opts.get("warm_count") is not None:
+ cluster_config["WarmCount"] = cluster_opts.get("warm_count")
+
+ cold_storage_opts = None
+ if cluster_opts is not None:
+ cold_storage_opts = cluster_opts.get("cold_storage_options")
+ if compare_domain_versions(desired_domain_config["EngineVersion"], "Elasticsearch_7.9") < 0:
+ # If the engine version is ElasticSearch < 7.9, cold storage is not supported.
+ # When querying a domain < 7.9, the AWS API indicates cold storage is disabled (Enabled: False),
+ # which makes sense. However, trying to do HTTP POST with Enable: False causes an API error.
+ # The 'ColdStorageOptions' attribute should not be present in HTTP POST.
+ if cold_storage_opts is not None and cold_storage_opts.get("enabled"):
+ module.fail_json(msg="Cold Storage is not supported")
+ cluster_config.pop("ColdStorageOptions", None)
+ if (
+ current_domain_config is not None
+ and "ClusterConfig" in current_domain_config
+ ):
+ # Remove 'ColdStorageOptions' from the current domain config, otherwise the actual vs desired diff
+ # will indicate a change must be done.
+ current_domain_config["ClusterConfig"].pop("ColdStorageOptions", None)
+ else:
+ # Elasticsearch 7.9 and above support ColdStorageOptions.
+ if (
+ cold_storage_opts is not None
+ and cold_storage_opts.get("enabled") is not None
+ ):
+ cluster_config["ColdStorageOptions"] = {
+ "Enabled": cold_storage_opts.get("enabled"),
+ }
+
+ if (
+ current_domain_config is not None
+ and current_domain_config["ClusterConfig"] != cluster_config
+ ):
+ change_set.append(
+ "ClusterConfig changed from {0} to {1}".format(
+ current_domain_config["ClusterConfig"], cluster_config
+ )
+ )
+ changed = True
+ return changed
+
+
+def set_ebs_options(module, current_domain_config, desired_domain_config, change_set):
+ changed = False
+ ebs_config = desired_domain_config["EBSOptions"]
+ ebs_opts = module.params.get("ebs_options")
+ if ebs_opts is None:
+ return changed
+ if ebs_opts.get("ebs_enabled") is not None:
+ ebs_config["EBSEnabled"] = ebs_opts.get("ebs_enabled")
+
+ if not ebs_config["EBSEnabled"]:
+ desired_domain_config["EBSOptions"] = {
+ "EBSEnabled": False,
+ }
+ else:
+ if ebs_opts.get("volume_type") is not None:
+ ebs_config["VolumeType"] = ebs_opts.get("volume_type")
+ if ebs_opts.get("volume_size") is not None:
+ ebs_config["VolumeSize"] = ebs_opts.get("volume_size")
+ if ebs_opts.get("iops") is not None:
+ ebs_config["Iops"] = ebs_opts.get("iops")
+
+ if (
+ current_domain_config is not None
+ and current_domain_config["EBSOptions"] != ebs_config
+ ):
+ change_set.append(
+ "EBSOptions changed from {0} to {1}".format(
+ current_domain_config["EBSOptions"], ebs_config
+ )
+ )
+ changed = True
+ return changed
+
+
+def set_encryption_at_rest_options(
+ module, current_domain_config, desired_domain_config, change_set
+):
+ changed = False
+ encryption_at_rest_config = desired_domain_config["EncryptionAtRestOptions"]
+ encryption_at_rest_opts = module.params.get("encryption_at_rest_options")
+ if encryption_at_rest_opts is None:
+ return False
+ if encryption_at_rest_opts.get("enabled") is not None:
+ encryption_at_rest_config["Enabled"] = encryption_at_rest_opts.get("enabled")
+ if not encryption_at_rest_config["Enabled"]:
+ desired_domain_config["EncryptionAtRestOptions"] = {
+ "Enabled": False,
+ }
+ else:
+ if encryption_at_rest_opts.get("kms_key_id") is not None:
+ encryption_at_rest_config["KmsKeyId"] = encryption_at_rest_opts.get(
+ "kms_key_id"
+ )
+
+ if (
+ current_domain_config is not None
+ and current_domain_config["EncryptionAtRestOptions"]
+ != encryption_at_rest_config
+ ):
+ change_set.append(
+ "EncryptionAtRestOptions changed from {0} to {1}".format(
+ current_domain_config["EncryptionAtRestOptions"],
+ encryption_at_rest_config,
+ )
+ )
+ changed = True
+ return changed
+
+
+def set_node_to_node_encryption_options(
+ module, current_domain_config, desired_domain_config, change_set
+):
+ changed = False
+ node_to_node_encryption_config = desired_domain_config[
+ "NodeToNodeEncryptionOptions"
+ ]
+ node_to_node_encryption_opts = module.params.get("node_to_node_encryption_options")
+ if node_to_node_encryption_opts is None:
+ return changed
+ if node_to_node_encryption_opts.get("enabled") is not None:
+ node_to_node_encryption_config["Enabled"] = node_to_node_encryption_opts.get(
+ "enabled"
+ )
+
+ if (
+ current_domain_config is not None
+ and current_domain_config["NodeToNodeEncryptionOptions"]
+ != node_to_node_encryption_config
+ ):
+ change_set.append(
+ "NodeToNodeEncryptionOptions changed from {0} to {1}".format(
+ current_domain_config["NodeToNodeEncryptionOptions"],
+ node_to_node_encryption_config,
+ )
+ )
+ changed = True
+ return changed
+
+
+def set_vpc_options(module, current_domain_config, desired_domain_config, change_set):
+ changed = False
+ vpc_config = None
+ if "VPCOptions" in desired_domain_config:
+ vpc_config = desired_domain_config["VPCOptions"]
+ vpc_opts = module.params.get("vpc_options")
+ if vpc_opts is None:
+ return changed
+ vpc_subnets = vpc_opts.get("subnets")
+ if vpc_subnets is not None:
+ if vpc_config is None:
+ vpc_config = {}
+ desired_domain_config["VPCOptions"] = vpc_config
+ # OpenSearch cluster is attached to VPC
+ if isinstance(vpc_subnets, string_types):
+ vpc_subnets = [x.strip() for x in vpc_subnets.split(",")]
+ vpc_config["SubnetIds"] = vpc_subnets
+
+ vpc_security_groups = vpc_opts.get("security_groups")
+ if vpc_security_groups is not None:
+ if vpc_config is None:
+ vpc_config = {}
+ desired_domain_config["VPCOptions"] = vpc_config
+ if isinstance(vpc_security_groups, string_types):
+ vpc_security_groups = [x.strip() for x in vpc_security_groups.split(",")]
+ vpc_config["SecurityGroupIds"] = vpc_security_groups
+
+ if current_domain_config is not None:
+ # Modify existing cluster.
+ current_cluster_is_vpc = False
+ desired_cluster_is_vpc = False
+ if (
+ "VPCOptions" in current_domain_config
+ and "SubnetIds" in current_domain_config["VPCOptions"]
+ and len(current_domain_config["VPCOptions"]["SubnetIds"]) > 0
+ ):
+ current_cluster_is_vpc = True
+ if (
+ "VPCOptions" in desired_domain_config
+ and "SubnetIds" in desired_domain_config["VPCOptions"]
+ and len(desired_domain_config["VPCOptions"]["SubnetIds"]) > 0
+ ):
+ desired_cluster_is_vpc = True
+ if current_cluster_is_vpc != desired_cluster_is_vpc:
+ # AWS does not allow changing the type. Don't fail here so we return the AWS API error.
+ change_set.append("VPCOptions changed between Internet and VPC")
+ changed = True
+ elif desired_cluster_is_vpc is False:
+ # There are no VPCOptions to configure.
+ pass
+ else:
+ # Note the subnets may be the same but be listed in a different order.
+ if set(current_domain_config["VPCOptions"]["SubnetIds"]) != set(
+ vpc_config["SubnetIds"]
+ ):
+ change_set.append(
+ "SubnetIds changed from {0} to {1}".format(
+ current_domain_config["VPCOptions"]["SubnetIds"],
+ vpc_config["SubnetIds"],
+ )
+ )
+ changed = True
+ if set(current_domain_config["VPCOptions"]["SecurityGroupIds"]) != set(
+ vpc_config["SecurityGroupIds"]
+ ):
+ change_set.append(
+ "SecurityGroup changed from {0} to {1}".format(
+ current_domain_config["VPCOptions"]["SecurityGroupIds"],
+ vpc_config["SecurityGroupIds"],
+ )
+ )
+ changed = True
+ return changed
+
+
+def set_snapshot_options(
+ module, current_domain_config, desired_domain_config, change_set
+):
+ changed = False
+ snapshot_config = desired_domain_config["SnapshotOptions"]
+ snapshot_opts = module.params.get("snapshot_options")
+ if snapshot_opts is None:
+ return changed
+ if snapshot_opts.get("automated_snapshot_start_hour") is not None:
+ snapshot_config["AutomatedSnapshotStartHour"] = snapshot_opts.get(
+ "automated_snapshot_start_hour"
+ )
+ if (
+ current_domain_config is not None
+ and current_domain_config["SnapshotOptions"] != snapshot_config
+ ):
+ change_set.append("SnapshotOptions changed")
+ changed = True
+ return changed
+
+
+def set_cognito_options(
+ module, current_domain_config, desired_domain_config, change_set
+):
+ changed = False
+ cognito_config = desired_domain_config["CognitoOptions"]
+ cognito_opts = module.params.get("cognito_options")
+ if cognito_opts is None:
+ return changed
+ if cognito_opts.get("enabled") is not None:
+ cognito_config["Enabled"] = cognito_opts.get("enabled")
+ if not cognito_config["Enabled"]:
+ desired_domain_config["CognitoOptions"] = {
+ "Enabled": False,
+ }
+ else:
+ if cognito_opts.get("cognito_user_pool_id") is not None:
+ cognito_config["UserPoolId"] = cognito_opts.get("cognito_user_pool_id")
+ if cognito_opts.get("cognito_identity_pool_id") is not None:
+ cognito_config["IdentityPoolId"] = cognito_opts.get(
+ "cognito_identity_pool_id"
+ )
+ if cognito_opts.get("cognito_role_arn") is not None:
+ cognito_config["RoleArn"] = cognito_opts.get("cognito_role_arn")
+
+ if (
+ current_domain_config is not None
+ and current_domain_config["CognitoOptions"] != cognito_config
+ ):
+ change_set.append(
+ "CognitoOptions changed from {0} to {1}".format(
+ current_domain_config["CognitoOptions"], cognito_config
+ )
+ )
+ changed = True
+ return changed
+
+
+def set_advanced_security_options(
+ module, current_domain_config, desired_domain_config, change_set
+):
+ changed = False
+ advanced_security_config = desired_domain_config["AdvancedSecurityOptions"]
+ advanced_security_opts = module.params.get("advanced_security_options")
+ if advanced_security_opts is None:
+ return changed
+ if advanced_security_opts.get("enabled") is not None:
+ advanced_security_config["Enabled"] = advanced_security_opts.get("enabled")
+ if not advanced_security_config["Enabled"]:
+ desired_domain_config["AdvancedSecurityOptions"] = {
+ "Enabled": False,
+ }
+ else:
+ if advanced_security_opts.get("internal_user_database_enabled") is not None:
+ advanced_security_config[
+ "InternalUserDatabaseEnabled"
+ ] = advanced_security_opts.get("internal_user_database_enabled")
+ master_user_opts = advanced_security_opts.get("master_user_options")
+ if master_user_opts is not None:
+ advanced_security_config.setdefault("MasterUserOptions", {})
+ if master_user_opts.get("master_user_arn") is not None:
+ advanced_security_config["MasterUserOptions"][
+ "MasterUserARN"
+ ] = master_user_opts.get("master_user_arn")
+ if master_user_opts.get("master_user_name") is not None:
+ advanced_security_config["MasterUserOptions"][
+ "MasterUserName"
+ ] = master_user_opts.get("master_user_name")
+ if master_user_opts.get("master_user_password") is not None:
+ advanced_security_config["MasterUserOptions"][
+ "MasterUserPassword"
+ ] = master_user_opts.get("master_user_password")
+ saml_opts = advanced_security_opts.get("saml_options")
+ if saml_opts is not None:
+ if saml_opts.get("enabled") is not None:
+ advanced_security_config["SamlOptions"]["Enabled"] = saml_opts.get(
+ "enabled"
+ )
+ idp_opts = saml_opts.get("idp")
+ if idp_opts is not None:
+ if idp_opts.get("metadata_content") is not None:
+ advanced_security_config["SamlOptions"]["Idp"][
+ "MetadataContent"
+ ] = idp_opts.get("metadata_content")
+ if idp_opts.get("entity_id") is not None:
+ advanced_security_config["SamlOptions"]["Idp"][
+ "EntityId"
+ ] = idp_opts.get("entity_id")
+ if saml_opts.get("master_user_name") is not None:
+ advanced_security_config["SamlOptions"][
+ "MasterUserName"
+ ] = saml_opts.get("master_user_name")
+ if saml_opts.get("master_backend_role") is not None:
+ advanced_security_config["SamlOptions"][
+ "MasterBackendRole"
+ ] = saml_opts.get("master_backend_role")
+ if saml_opts.get("subject_key") is not None:
+ advanced_security_config["SamlOptions"]["SubjectKey"] = saml_opts.get(
+ "subject_key"
+ )
+ if saml_opts.get("roles_key") is not None:
+ advanced_security_config["SamlOptions"]["RolesKey"] = saml_opts.get(
+ "roles_key"
+ )
+ if saml_opts.get("session_timeout_minutes") is not None:
+ advanced_security_config["SamlOptions"][
+ "SessionTimeoutMinutes"
+ ] = saml_opts.get("session_timeout_minutes")
+
+ if (
+ current_domain_config is not None
+ and current_domain_config["AdvancedSecurityOptions"] != advanced_security_config
+ ):
+ change_set.append(
+ "AdvancedSecurityOptions changed from {0} to {1}".format(
+ current_domain_config["AdvancedSecurityOptions"],
+ advanced_security_config,
+ )
+ )
+ changed = True
+ return changed
+
+
+def set_domain_endpoint_options(
+ module, current_domain_config, desired_domain_config, change_set
+):
+ changed = False
+ domain_endpoint_config = desired_domain_config["DomainEndpointOptions"]
+ domain_endpoint_opts = module.params.get("domain_endpoint_options")
+ if domain_endpoint_opts is None:
+ return changed
+ if domain_endpoint_opts.get("enforce_https") is not None:
+ domain_endpoint_config["EnforceHTTPS"] = domain_endpoint_opts.get(
+ "enforce_https"
+ )
+ if domain_endpoint_opts.get("tls_security_policy") is not None:
+ domain_endpoint_config["TLSSecurityPolicy"] = domain_endpoint_opts.get(
+ "tls_security_policy"
+ )
+ if domain_endpoint_opts.get("custom_endpoint_enabled") is not None:
+ domain_endpoint_config["CustomEndpointEnabled"] = domain_endpoint_opts.get(
+ "custom_endpoint_enabled"
+ )
+ if domain_endpoint_config["CustomEndpointEnabled"]:
+ if domain_endpoint_opts.get("custom_endpoint") is not None:
+ domain_endpoint_config["CustomEndpoint"] = domain_endpoint_opts.get(
+ "custom_endpoint"
+ )
+ if domain_endpoint_opts.get("custom_endpoint_certificate_arn") is not None:
+ domain_endpoint_config[
+ "CustomEndpointCertificateArn"
+ ] = domain_endpoint_opts.get("custom_endpoint_certificate_arn")
+
+ if (
+ current_domain_config is not None
+ and current_domain_config["DomainEndpointOptions"] != domain_endpoint_config
+ ):
+ change_set.append(
+ "DomainEndpointOptions changed from {0} to {1}".format(
+ current_domain_config["DomainEndpointOptions"], domain_endpoint_config
+ )
+ )
+ changed = True
+ return changed
+
+
+def set_auto_tune_options(
+ module, current_domain_config, desired_domain_config, change_set
+):
+ changed = False
+ auto_tune_config = desired_domain_config["AutoTuneOptions"]
+ auto_tune_opts = module.params.get("auto_tune_options")
+ if auto_tune_opts is None:
+ return changed
+ schedules = auto_tune_opts.get("maintenance_schedules")
+ if auto_tune_opts.get("desired_state") is not None:
+ auto_tune_config["DesiredState"] = auto_tune_opts.get("desired_state")
+ if auto_tune_config["DesiredState"] != "ENABLED":
+ desired_domain_config["AutoTuneOptions"] = {
+ "DesiredState": "DISABLED",
+ }
+ elif schedules is not None:
+ auto_tune_config["MaintenanceSchedules"] = []
+ for s in schedules:
+ schedule_entry = {}
+ start_at = s.get("start_at")
+ if start_at is not None:
+ if isinstance(start_at, datetime.datetime):
+ # The property was parsed from yaml to datetime, but the AWS API wants a string
+ start_at = start_at.strftime("%Y-%m-%d")
+ schedule_entry["StartAt"] = start_at
+ duration_opt = s.get("duration")
+ if duration_opt is not None:
+ schedule_entry["Duration"] = {}
+ if duration_opt.get("value") is not None:
+ schedule_entry["Duration"]["Value"] = duration_opt.get("value")
+ if duration_opt.get("unit") is not None:
+ schedule_entry["Duration"]["Unit"] = duration_opt.get("unit")
+ if s.get("cron_expression_for_recurrence") is not None:
+ schedule_entry["CronExpressionForRecurrence"] = s.get(
+ "cron_expression_for_recurrence"
+ )
+ auto_tune_config["MaintenanceSchedules"].append(schedule_entry)
+ if current_domain_config is not None:
+ if (
+ current_domain_config["AutoTuneOptions"]["DesiredState"]
+ != auto_tune_config["DesiredState"]
+ ):
+ change_set.append(
+ "AutoTuneOptions.DesiredState changed from {0} to {1}".format(
+ current_domain_config["AutoTuneOptions"]["DesiredState"],
+ auto_tune_config["DesiredState"],
+ )
+ )
+ changed = True
+ if (
+ auto_tune_config["MaintenanceSchedules"]
+ != current_domain_config["AutoTuneOptions"]["MaintenanceSchedules"]
+ ):
+ change_set.append(
+ "AutoTuneOptions.MaintenanceSchedules changed from {0} to {1}".format(
+ current_domain_config["AutoTuneOptions"]["MaintenanceSchedules"],
+ auto_tune_config["MaintenanceSchedules"],
+ )
+ )
+ changed = True
+ return changed
+
+
+def set_access_policy(module, current_domain_config, desired_domain_config, change_set):
+ access_policy_config = None
+ changed = False
+ access_policy_opt = module.params.get("access_policies")
+ if access_policy_opt is None:
+ return changed
+ try:
+ access_policy_config = json.dumps(access_policy_opt)
+ except Exception as e:
+ module.fail_json(
+ msg="Failed to convert the policy into valid JSON: %s" % str(e)
+ )
+ if current_domain_config is not None:
+ # Updating existing domain
+ current_access_policy = json.loads(current_domain_config["AccessPolicies"])
+ if not compare_policies(current_access_policy, access_policy_opt):
+ change_set.append(
+ "AccessPolicy changed from {0} to {1}".format(
+ current_access_policy, access_policy_opt
+ )
+ )
+ changed = True
+ desired_domain_config["AccessPolicies"] = access_policy_config
+ else:
+ # Creating new domain
+ desired_domain_config["AccessPolicies"] = access_policy_config
+ return changed
+
+
+def ensure_domain_present(client, module):
+ domain_name = module.params.get("domain_name")
+
+ # Create default if OpenSearch does not exist. If domain already exists,
+ # the data is populated by retrieving the current configuration from the API.
+ desired_domain_config = {
+ "DomainName": module.params.get("domain_name"),
+ "EngineVersion": "OpenSearch_1.1",
+ "ClusterConfig": {
+ "InstanceType": "t2.small.search",
+ "InstanceCount": 2,
+ "ZoneAwarenessEnabled": False,
+ "DedicatedMasterEnabled": False,
+ "WarmEnabled": False,
+ },
+ # By default create ES attached to the Internet.
+ # If the "VPCOptions" property is specified, even if empty, the API server interprets
+ # as incomplete VPC configuration.
+ # "VPCOptions": {},
+ "EBSOptions": {
+ "EBSEnabled": False,
+ },
+ "EncryptionAtRestOptions": {
+ "Enabled": False,
+ },
+ "NodeToNodeEncryptionOptions": {
+ "Enabled": False,
+ },
+ "SnapshotOptions": {
+ "AutomatedSnapshotStartHour": 0,
+ },
+ "CognitoOptions": {
+ "Enabled": False,
+ },
+ "AdvancedSecurityOptions": {
+ "Enabled": False,
+ },
+ "DomainEndpointOptions": {
+ "CustomEndpointEnabled": False,
+ },
+ "AutoTuneOptions": {
+ "DesiredState": "DISABLED",
+ },
+ }
+ # Determine if OpenSearch domain already exists.
+ # current_domain_config may be None if the domain does not exist.
+ (current_domain_config, domain_arn) = get_domain_config(client, module, domain_name)
+ if current_domain_config is not None:
+ desired_domain_config = deepcopy(current_domain_config)
+
+ if module.params.get("engine_version") is not None:
+ # Validate the engine_version
+ v = parse_version(module.params.get("engine_version"))
+ if v is None:
+ module.fail_json(
+ "Invalid engine_version. Must be Elasticsearch_X.Y or OpenSearch_X.Y"
+ )
+ desired_domain_config["EngineVersion"] = module.params.get("engine_version")
+
+ changed = False
+ change_set = [] # For check mode purpose
+
+ changed |= set_cluster_config(
+ module, current_domain_config, desired_domain_config, change_set
+ )
+ changed |= set_ebs_options(
+ module, current_domain_config, desired_domain_config, change_set
+ )
+ changed |= set_encryption_at_rest_options(
+ module, current_domain_config, desired_domain_config, change_set
+ )
+ changed |= set_node_to_node_encryption_options(
+ module, current_domain_config, desired_domain_config, change_set
+ )
+ changed |= set_vpc_options(
+ module, current_domain_config, desired_domain_config, change_set
+ )
+ changed |= set_snapshot_options(
+ module, current_domain_config, desired_domain_config, change_set
+ )
+ changed |= set_cognito_options(
+ module, current_domain_config, desired_domain_config, change_set
+ )
+ changed |= set_advanced_security_options(
+ module, current_domain_config, desired_domain_config, change_set
+ )
+ changed |= set_domain_endpoint_options(
+ module, current_domain_config, desired_domain_config, change_set
+ )
+ changed |= set_auto_tune_options(
+ module, current_domain_config, desired_domain_config, change_set
+ )
+ changed |= set_access_policy(
+ module, current_domain_config, desired_domain_config, change_set
+ )
+
+ if current_domain_config is not None:
+ if (
+ desired_domain_config["EngineVersion"]
+ != current_domain_config["EngineVersion"]
+ ):
+ changed = True
+ change_set.append("EngineVersion changed")
+ upgrade_domain(
+ client,
+ module,
+ current_domain_config["EngineVersion"],
+ desired_domain_config["EngineVersion"],
+ )
+
+ if changed:
+ if module.check_mode:
+ module.exit_json(
+ changed=True,
+ msg=f"Would have updated domain if not in check mode: {change_set}",
+ )
+ # Remove the "EngineVersion" attribute, the AWS API does not accept this attribute.
+ desired_domain_config.pop("EngineVersion", None)
+ try:
+ client.update_domain_config(**desired_domain_config)
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e:
+ module.fail_json_aws(
+ e, msg="Couldn't update domain {0}".format(domain_name)
+ )
+
+ else:
+ # Create new OpenSearch cluster
+ if module.params.get("access_policies") is None:
+ module.fail_json(
+ "state is present but the following is missing: access_policies"
+ )
+
+ changed = True
+ if module.check_mode:
+ module.exit_json(
+ changed=True, msg="Would have created a domain if not in check mode"
+ )
+ try:
+ response = client.create_domain(**desired_domain_config)
+ domain = response["DomainStatus"]
+ domain_arn = domain["ARN"]
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e:
+ module.fail_json_aws(
+ e, msg="Couldn't update domain {0}".format(domain_name)
+ )
+
+ try:
+ existing_tags = boto3_tag_list_to_ansible_dict(
+ client.list_tags(ARN=domain_arn, aws_retry=True)["TagList"]
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Couldn't get tags for domain %s" % domain_name)
+
+ desired_tags = module.params["tags"]
+ purge_tags = module.params["purge_tags"]
+ changed |= ensure_tags(
+ client, module, domain_arn, existing_tags, desired_tags, purge_tags
+ )
+
+ if module.params.get("wait") and not module.check_mode:
+ wait_for_domain_status(client, module, domain_name, "domain_available")
+
+ domain = get_domain_status(client, module, domain_name)
+
+ return dict(changed=changed, **normalize_opensearch(client, module, domain))
+
+
+def main():
+
+ module = AnsibleAWSModule(
+ argument_spec=dict(
+ state=dict(choices=["present", "absent"], default="present"),
+ domain_name=dict(required=True),
+ engine_version=dict(),
+ allow_intermediate_upgrades=dict(required=False, type="bool", default=True),
+ access_policies=dict(required=False, type="dict"),
+ cluster_config=dict(
+ type="dict",
+ default=None,
+ options=dict(
+ instance_type=dict(),
+ instance_count=dict(required=False, type="int"),
+ zone_awareness=dict(required=False, type="bool"),
+ availability_zone_count=dict(required=False, type="int"),
+ dedicated_master=dict(required=False, type="bool"),
+ dedicated_master_instance_type=dict(),
+ dedicated_master_instance_count=dict(type="int"),
+ warm_enabled=dict(required=False, type="bool"),
+ warm_type=dict(required=False),
+ warm_count=dict(required=False, type="int"),
+ cold_storage_options=dict(
+ type="dict",
+ default=None,
+ options=dict(
+ enabled=dict(required=False, type="bool"),
+ ),
+ ),
+ ),
+ ),
+ snapshot_options=dict(
+ type="dict",
+ default=None,
+ options=dict(
+ automated_snapshot_start_hour=dict(required=False, type="int"),
+ ),
+ ),
+ ebs_options=dict(
+ type="dict",
+ default=None,
+ options=dict(
+ ebs_enabled=dict(required=False, type="bool"),
+ volume_type=dict(required=False),
+ volume_size=dict(required=False, type="int"),
+ iops=dict(required=False, type="int"),
+ ),
+ ),
+ vpc_options=dict(
+ type="dict",
+ default=None,
+ options=dict(
+ subnets=dict(type="list", elements="str", required=False),
+ security_groups=dict(type="list", elements="str", required=False),
+ ),
+ ),
+ cognito_options=dict(
+ type="dict",
+ default=None,
+ options=dict(
+ enabled=dict(required=False, type="bool"),
+ user_pool_id=dict(required=False),
+ identity_pool_id=dict(required=False),
+ role_arn=dict(required=False, no_log=False),
+ ),
+ ),
+ encryption_at_rest_options=dict(
+ type="dict",
+ default=None,
+ options=dict(
+ enabled=dict(type="bool"),
+ kms_key_id=dict(required=False),
+ ),
+ ),
+ node_to_node_encryption_options=dict(
+ type="dict",
+ default=None,
+ options=dict(
+ enabled=dict(type="bool"),
+ ),
+ ),
+ domain_endpoint_options=dict(
+ type="dict",
+ default=None,
+ options=dict(
+ enforce_https=dict(type="bool"),
+ tls_security_policy=dict(),
+ custom_endpoint_enabled=dict(type="bool"),
+ custom_endpoint=dict(),
+ custom_endpoint_certificate_arn=dict(),
+ ),
+ ),
+ advanced_security_options=dict(
+ type="dict",
+ default=None,
+ options=dict(
+ enabled=dict(type="bool"),
+ internal_user_database_enabled=dict(type="bool"),
+ master_user_options=dict(
+ type="dict",
+ default=None,
+ options=dict(
+ master_user_arn=dict(),
+ master_user_name=dict(),
+ master_user_password=dict(no_log=True),
+ ),
+ ),
+ saml_options=dict(
+ type="dict",
+ default=None,
+ options=dict(
+ enabled=dict(type="bool"),
+ idp=dict(
+ type="dict",
+ default=None,
+ options=dict(
+ metadata_content=dict(),
+ entity_id=dict(),
+ ),
+ ),
+ master_user_name=dict(),
+ master_backend_role=dict(),
+ subject_key=dict(no_log=False),
+ roles_key=dict(no_log=False),
+ session_timeout_minutes=dict(type="int"),
+ ),
+ ),
+ ),
+ ),
+ auto_tune_options=dict(
+ type="dict",
+ default=None,
+ options=dict(
+ desired_state=dict(choices=["ENABLED", "DISABLED"]),
+ maintenance_schedules=dict(
+ type="list",
+ elements="dict",
+ default=None,
+ options=dict(
+ start_at=dict(),
+ duration=dict(
+ type="dict",
+ default=None,
+ options=dict(
+ value=dict(type="int"),
+ unit=dict(choices=["HOURS"]),
+ ),
+ ),
+ cron_expression_for_recurrence=dict(),
+ ),
+ ),
+ ),
+ ),
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(type="bool", default=True),
+ wait=dict(type="bool", default=False),
+ wait_timeout=dict(type="int", default=300),
+ ),
+ supports_check_mode=True,
+ )
+
+ module.require_botocore_at_least("1.21.38")
+
+ try:
+ client = module.client("opensearch", retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to connect to AWS opensearch service")
+
+ if module.params["state"] == "absent":
+ ret_dict = ensure_domain_absent(client, module)
+ else:
+ ret_dict = ensure_domain_present(client, module)
+
+ module.exit_json(**ret_dict)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/opensearch_info.py b/ansible_collections/community/aws/plugins/modules/opensearch_info.py
new file mode 100644
index 000000000..700ad26fd
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/opensearch_info.py
@@ -0,0 +1,531 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: opensearch_info
+short_description: obtain information about one or more OpenSearch or ElasticSearch domain
+description:
+ - Obtain information about one Amazon OpenSearch Service domain.
+version_added: 4.0.0
+author: "Sebastien Rosset (@sebastien-rosset)"
+options:
+ domain_name:
+ description:
+ - The name of the Amazon OpenSearch/ElasticSearch Service domain.
+ required: false
+ type: str
+ tags:
+ description:
+ - >
+ A dict of tags that are used to filter OpenSearch domains that match
+ all tag key, value pairs.
+ required: false
+ type: dict
+requirements:
+ - botocore >= 1.21.38
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+"""
+
+EXAMPLES = '''
+- name: Get information about an OpenSearch domain instance
+ community.aws.opensearch_info:
+ domain-name: my-search-cluster
+ register: new_cluster_info
+
+- name: Get all OpenSearch instances
+ community.aws.opensearch_info:
+
+- name: Get all OpenSearch instances that have the specified Key, Value tags
+ community.aws.opensearch_info:
+ tags:
+ Applications: search
+ Environment: Development
+'''
+
+RETURN = '''
+instances:
+ description: List of OpenSearch domain instances
+ returned: always
+ type: complex
+ contains:
+ domain_status:
+ description: The current status of the OpenSearch domain.
+ returned: always
+ type: complex
+ contains:
+ arn:
+ description: The ARN of the OpenSearch domain.
+ returned: always
+ type: str
+ domain_id:
+ description: The unique identifier for the OpenSearch domain.
+ returned: always
+ type: str
+ domain_name:
+ description: The name of the OpenSearch domain.
+ returned: always
+ type: str
+ created:
+ description:
+ - >
+ The domain creation status. True if the creation of a domain is complete.
+ False if domain creation is still in progress.
+ returned: always
+ type: bool
+ deleted:
+ description:
+ - >
+ The domain deletion status.
+ True if a delete request has been received for the domain but resource cleanup is still in progress.
+ False if the domain has not been deleted.
+ Once domain deletion is complete, the status of the domain is no longer returned.
+ returned: always
+ type: bool
+ endpoint:
+ description: The domain endpoint that you use to submit index and search requests.
+ returned: always
+ type: str
+ endpoints:
+ description:
+ - >
+ Map containing the domain endpoints used to submit index and search requests.
+ - >
+ When you create a domain attached to a VPC domain, this propery contains
+ the DNS endpoint to which service requests are submitted.
+ - >
+ If you query the opensearch_info immediately after creating the OpenSearch cluster,
+ the VPC endpoint may not be returned. It may take several minutes until the
+ endpoints is available.
+ type: dict
+ processing:
+ description:
+ - >
+ The status of the domain configuration.
+ True if Amazon OpenSearch Service is processing configuration changes.
+ False if the configuration is active.
+ returned: always
+ type: bool
+ upgrade_processing:
+ description: true if a domain upgrade operation is in progress.
+ returned: always
+ type: bool
+ engine_version:
+ description: The version of the OpenSearch domain.
+ returned: always
+ type: str
+ sample: OpenSearch_1.1
+ cluster_config:
+ description:
+ - Parameters for the cluster configuration of an OpenSearch Service domain.
+ type: complex
+ contains:
+ instance_type:
+ description:
+ - Type of the instances to use for the domain.
+ type: str
+ instance_count:
+ description:
+ - Number of instances for the domain.
+ type: int
+ zone_awareness:
+ description:
+ - A boolean value to indicate whether zone awareness is enabled.
+ type: bool
+ availability_zone_count:
+ description:
+ - >
+ An integer value to indicate the number of availability zones for a domain when zone awareness is enabled.
+ This should be equal to number of subnets if VPC endpoints is enabled.
+ type: int
+ dedicated_master_enabled:
+ description:
+ - A boolean value to indicate whether a dedicated master node is enabled.
+ type: bool
+ zone_awareness_enabled:
+ description:
+ - A boolean value to indicate whether zone awareness is enabled.
+ type: bool
+ zone_awareness_config:
+ description:
+ - The zone awareness configuration for a domain when zone awareness is enabled.
+ type: complex
+ contains:
+ availability_zone_count:
+ description:
+ - An integer value to indicate the number of availability zones for a domain when zone awareness is enabled.
+ type: int
+ dedicated_master_type:
+ description:
+ - The instance type for a dedicated master node.
+ type: str
+ dedicated_master_count:
+ description:
+ - Total number of dedicated master nodes, active and on standby, for the domain.
+ type: int
+ warm_enabled:
+ description:
+ - True to enable UltraWarm storage.
+ type: bool
+ warm_type:
+ description:
+ - The instance type for the OpenSearch domain's warm nodes.
+ type: str
+ warm_count:
+ description:
+ - The number of UltraWarm nodes in the domain.
+ type: int
+ cold_storage_options:
+ description:
+ - Specifies the ColdStorageOptions config for a Domain.
+ type: complex
+ contains:
+ enabled:
+ description:
+ - True to enable cold storage. Supported on Elasticsearch 7.9 or above.
+ type: bool
+ ebs_options:
+ description:
+ - Parameters to configure EBS-based storage for an OpenSearch Service domain.
+ type: complex
+ contains:
+ ebs_enabled:
+ description:
+ - Specifies whether EBS-based storage is enabled.
+ type: bool
+ volume_type:
+ description:
+ - Specifies the volume type for EBS-based storage. "standard"|"gp2"|"io1"
+ type: str
+ volume_size:
+ description:
+ - Integer to specify the size of an EBS volume.
+ type: int
+ iops:
+ description:
+ - The IOPD for a Provisioned IOPS EBS volume (SSD).
+ type: int
+ vpc_options:
+ description:
+ - Options to specify the subnets and security groups for a VPC endpoint.
+ type: complex
+ contains:
+ vpc_id:
+ description: The VPC ID for the domain.
+ type: str
+ subnet_ids:
+ description:
+ - Specifies the subnet ids for VPC endpoint.
+ type: list
+ elements: str
+ security_group_ids:
+ description:
+ - Specifies the security group ids for VPC endpoint.
+ type: list
+ elements: str
+ availability_zones:
+ description:
+ - The Availability Zones for the domain..
+ type: list
+ elements: str
+ snapshot_options:
+ description:
+ - Option to set time, in UTC format, of the daily automated snapshot.
+ type: complex
+ contains:
+ automated_snapshot_start_hour:
+ description:
+ - >
+ Integer value from 0 to 23 specifying when the service takes a daily automated snapshot
+ of the specified Elasticsearch domain.
+ type: int
+ access_policies:
+ description:
+ - IAM access policy as a JSON-formatted string.
+ type: complex
+ encryption_at_rest_options:
+ description:
+ - Parameters to enable encryption at rest.
+ type: complex
+ contains:
+ enabled:
+ description:
+ - Should data be encrypted while at rest.
+ type: bool
+ kms_key_id:
+ description:
+ - If encryption at rest enabled, this identifies the encryption key to use.
+ - The value should be a KMS key ARN. It can also be the KMS key id.
+ type: str
+ node_to_node_encryption_options:
+ description:
+ - Node-to-node encryption options.
+ type: complex
+ contains:
+ enabled:
+ description:
+ - True to enable node-to-node encryption.
+ type: bool
+ cognito_options:
+ description:
+ - Parameters to configure OpenSearch Service to use Amazon Cognito authentication for OpenSearch Dashboards.
+ type: complex
+ contains:
+ enabled:
+ description:
+ - The option to enable Cognito for OpenSearch Dashboards authentication.
+ type: bool
+ user_pool_id:
+ description:
+ - The Cognito user pool ID for OpenSearch Dashboards authentication.
+ type: str
+ identity_pool_id:
+ description:
+ - The Cognito identity pool ID for OpenSearch Dashboards authentication.
+ type: str
+ role_arn:
+ description:
+ - The role ARN that provides OpenSearch permissions for accessing Cognito resources.
+ type: str
+ domain_endpoint_options:
+ description:
+ - Options to specify configuration that will be applied to the domain endpoint.
+ type: complex
+ contains:
+ enforce_https:
+ description:
+ - Whether only HTTPS endpoint should be enabled for the domain.
+ type: bool
+ tls_security_policy:
+ description:
+ - Specify the TLS security policy to apply to the HTTPS endpoint of the domain.
+ type: str
+ custom_endpoint_enabled:
+ description:
+ - Whether to enable a custom endpoint for the domain.
+ type: bool
+ custom_endpoint:
+ description:
+ - The fully qualified domain for your custom endpoint.
+ type: str
+ custom_endpoint_certificate_arn:
+ description:
+ - The ACM certificate ARN for your custom endpoint.
+ type: str
+ advanced_security_options:
+ description:
+ - Specifies advanced security options.
+ type: complex
+ contains:
+ enabled:
+ description:
+ - True if advanced security is enabled.
+ - You must enable node-to-node encryption to use advanced security options.
+ type: bool
+ internal_user_database_enabled:
+ description:
+ - True if the internal user database is enabled.
+ type: bool
+ master_user_options:
+ description:
+ - Credentials for the master user, username and password, ARN, or both.
+ type: complex
+ contains:
+ master_user_arn:
+ description:
+ - ARN for the master user (if IAM is enabled).
+ type: str
+ master_user_name:
+ description:
+ - The username of the master user, which is stored in the Amazon OpenSearch Service domain internal database.
+ type: str
+ master_user_password:
+ description:
+ - The password of the master user, which is stored in the Amazon OpenSearch Service domain internal database.
+ type: str
+ saml_options:
+ description:
+ - The SAML application configuration for the domain.
+ type: complex
+ contains:
+ enabled:
+ description:
+ - True if SAML is enabled.
+ type: bool
+ idp:
+ description:
+ - The SAML Identity Provider's information.
+ type: complex
+ contains:
+ metadata_content:
+ description:
+ - The metadata of the SAML application in XML format.
+ type: str
+ entity_id:
+ description:
+ - The unique entity ID of the application in SAML identity provider.
+ type: str
+ master_user_name:
+ description:
+ - The SAML master username, which is stored in the Amazon OpenSearch Service domain internal database.
+ type: str
+ master_backend_role:
+ description:
+ - The backend role that the SAML master user is mapped to.
+ type: str
+ subject_key:
+ description:
+ - Element of the SAML assertion to use for username. Default is NameID.
+ type: str
+ roles_key:
+ description:
+ - Element of the SAML assertion to use for backend roles. Default is roles.
+ type: str
+ session_timeout_minutes:
+ description:
+ - >
+ The duration, in minutes, after which a user session becomes inactive.
+ Acceptable values are between 1 and 1440, and the default value is 60.
+ type: int
+ auto_tune_options:
+ description:
+ - Specifies Auto-Tune options.
+ type: complex
+ contains:
+ desired_state:
+ description:
+ - The Auto-Tune desired state. Valid values are ENABLED and DISABLED.
+ type: str
+ maintenance_schedules:
+ description:
+ - A list of maintenance schedules.
+ type: list
+ elements: dict
+ contains:
+ start_at:
+ description:
+ - The timestamp at which the Auto-Tune maintenance schedule starts.
+ type: str
+ duration:
+ description:
+ - Specifies maintenance schedule duration, duration value and duration unit.
+ type: complex
+ contains:
+ value:
+ description:
+ - Integer to specify the value of a maintenance schedule duration.
+ type: int
+ unit:
+ description:
+ - The unit of a maintenance schedule duration. Valid value is HOURS.
+ type: str
+ cron_expression_for_recurrence:
+ description:
+ - A cron expression for a recurring maintenance schedule.
+ type: str
+ domain_config:
+ description: The OpenSearch domain configuration
+ returned: always
+ type: complex
+ contains:
+ domain_name:
+ description: The name of the OpenSearch domain.
+ returned: always
+ type: str
+'''
+
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (
+ AWSRetry,
+ boto3_tag_list_to_ansible_dict,
+ camel_dict_to_snake_dict,
+)
+from ansible_collections.community.aws.plugins.module_utils.opensearch import (
+ get_domain_config,
+ get_domain_status,
+)
+
+
+def domain_info(client, module):
+ domain_name = module.params.get('domain_name')
+ filter_tags = module.params.get('tags')
+
+ domain_list = []
+ if domain_name:
+ domain_status = get_domain_status(client, module, domain_name)
+ if domain_status:
+ domain_list.append({'DomainStatus': domain_status})
+ else:
+ domain_summary_list = client.list_domain_names()['DomainNames']
+ for d in domain_summary_list:
+ domain_status = get_domain_status(client, module, d['DomainName'])
+ if domain_status:
+ domain_list.append({'DomainStatus': domain_status})
+
+ # Get the domain tags
+ for domain in domain_list:
+ current_domain_tags = None
+ domain_arn = domain['DomainStatus']['ARN']
+ try:
+ current_domain_tags = client.list_tags(ARN=domain_arn, aws_retry=True)["TagList"]
+ domain['Tags'] = boto3_tag_list_to_ansible_dict(current_domain_tags)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ # This could potentially happen if a domain is deleted between the time
+ # its domain status was queried and the tags were queried.
+ domain['Tags'] = {}
+
+ # Filter by tags
+ if filter_tags:
+ for tag_key in filter_tags:
+ try:
+ domain_list = [c for c in domain_list if ('Tags' in c) and (tag_key in c['Tags']) and (c['Tags'][tag_key] == filter_tags[tag_key])]
+ except (TypeError, AttributeError) as e:
+ module.fail_json(msg="OpenSearch tag filtering error", exception=e)
+
+ # Get the domain config
+ for idx, domain in enumerate(domain_list):
+ domain_name = domain['DomainStatus']['DomainName']
+ (domain_config, arn) = get_domain_config(client, module, domain_name)
+ if domain_config:
+ domain['DomainConfig'] = domain_config
+ domain_list[idx] = camel_dict_to_snake_dict(domain,
+ ignore_list=['AdvancedOptions', 'Endpoints', 'Tags'])
+
+ return dict(changed=False, domains=domain_list)
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec=dict(
+ domain_name=dict(required=False),
+ tags=dict(type='dict', required=False),
+ ),
+ supports_check_mode=True,
+ )
+ module.require_botocore_at_least("1.21.38")
+
+ try:
+ client = module.client("opensearch", retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to connect to AWS opensearch service")
+
+ module.exit_json(**domain_info(client, module))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/redshift.py b/ansible_collections/community/aws/plugins/modules/redshift.py
new file mode 100644
index 000000000..27e959893
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/redshift.py
@@ -0,0 +1,673 @@
+#!/usr/bin/python
+
+# Copyright 2014 Jens Carl, Hothead Games Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author:
+ - "Jens Carl (@j-carl), Hothead Games Inc."
+ - "Rafael Driutti (@rafaeldriutti)"
+module: redshift
+version_added: 1.0.0
+short_description: create, delete, or modify an Amazon Redshift instance
+description:
+ - Creates, deletes, or modifies Amazon Redshift cluster instances.
+options:
+ command:
+ description:
+ - Specifies the action to take.
+ required: true
+ choices: [ 'create', 'facts', 'delete', 'modify' ]
+ type: str
+ identifier:
+ description:
+ - Redshift cluster identifier.
+ required: true
+ type: str
+ node_type:
+ description:
+ - The node type of the cluster.
+ - Require when I(command=create).
+ choices: ['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large','dc2.large',
+ 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge']
+ type: str
+ username:
+ description:
+ - Master database username.
+ - Used only when I(command=create).
+ type: str
+ password:
+ description:
+ - Master database password.
+ - Used only when I(command=create).
+ type: str
+ cluster_type:
+ description:
+ - The type of cluster.
+ choices: ['multi-node', 'single-node' ]
+ default: 'single-node'
+ type: str
+ db_name:
+ description:
+ - Name of the database.
+ type: str
+ availability_zone:
+ description:
+ - Availability zone in which to launch cluster.
+ aliases: ['zone', 'aws_zone']
+ type: str
+ number_of_nodes:
+ description:
+ - Number of nodes.
+ - Only used when I(cluster_type=multi-node).
+ type: int
+ cluster_subnet_group_name:
+ description:
+ - Which subnet to place the cluster.
+ aliases: ['subnet']
+ type: str
+ cluster_security_groups:
+ description:
+ - In which security group the cluster belongs.
+ type: list
+ elements: str
+ aliases: ['security_groups']
+ vpc_security_group_ids:
+ description:
+ - VPC security group
+ aliases: ['vpc_security_groups']
+ type: list
+ elements: str
+ skip_final_cluster_snapshot:
+ description:
+ - Skip a final snapshot before deleting the cluster.
+ - Used only when I(command=delete).
+ aliases: ['skip_final_snapshot']
+ default: false
+ type: bool
+ final_cluster_snapshot_identifier:
+ description:
+ - Identifier of the final snapshot to be created before deleting the cluster.
+ - If this parameter is provided, I(skip_final_cluster_snapshot) must be C(false).
+ - Used only when I(command=delete).
+ aliases: ['final_snapshot_id']
+ type: str
+ preferred_maintenance_window:
+ description:
+ - 'Maintenance window in format of C(ddd:hh24:mi-ddd:hh24:mi). (Example: C(Mon:22:00-Mon:23:15))'
+ - Times are specified in UTC.
+ - If not specified then a random 30 minute maintenance window is assigned.
+ aliases: ['maintance_window', 'maint_window']
+ type: str
+ cluster_parameter_group_name:
+ description:
+ - Name of the cluster parameter group.
+ aliases: ['param_group_name']
+ type: str
+ automated_snapshot_retention_period:
+ description:
+ - The number of days that automated snapshots are retained.
+ aliases: ['retention_period']
+ type: int
+ port:
+ description:
+ - Which port the cluster is listening on.
+ type: int
+ cluster_version:
+ description:
+ - Which version the cluster should have.
+ aliases: ['version']
+ choices: ['1.0']
+ type: str
+ allow_version_upgrade:
+ description:
+ - When I(allow_version_upgrade=true) the cluster may be automatically
+ upgraded during the maintenance window.
+ aliases: ['version_upgrade']
+ default: true
+ type: bool
+ publicly_accessible:
+ description:
+ - If the cluster is accessible publicly or not.
+ default: false
+ type: bool
+ encrypted:
+ description:
+ - If the cluster is encrypted or not.
+ default: false
+ type: bool
+ elastic_ip:
+ description:
+ - An Elastic IP to use for the cluster.
+ type: str
+ new_cluster_identifier:
+ description:
+ - Only used when command=modify.
+ aliases: ['new_identifier']
+ type: str
+ wait:
+ description:
+ - When I(command=create), I(command=modify) or I(command=restore) then wait for the database to enter the 'available' state.
+ - When I(command=delete) wait for the database to be terminated.
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - When I(wait=true) defines how long in seconds before giving up.
+ default: 300
+ type: int
+ enhanced_vpc_routing:
+ description:
+ - Whether the cluster should have enhanced VPC routing enabled.
+ default: false
+ type: bool
+notes:
+ - Support for I(tags) and I(purge_tags) was added in release 1.3.0.
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+ - amazon.aws.tags
+'''
+
+EXAMPLES = r'''
+- name: Basic cluster provisioning example
+ community.aws.redshift:
+ command: create
+ node_type: ds1.xlarge
+ identifier: new_cluster
+ username: cluster_admin
+ password: 1nsecure
+
+- name: Cluster delete example
+ community.aws.redshift:
+ command: delete
+ identifier: new_cluster
+ skip_final_cluster_snapshot: true
+ wait: true
+'''
+
+RETURN = r'''
+cluster:
+ description: dictionary containing all the cluster information
+ returned: success
+ type: complex
+ contains:
+ identifier:
+ description: Id of the cluster.
+ returned: success
+ type: str
+ sample: "new_redshift_cluster"
+ create_time:
+ description: Time of the cluster creation as timestamp.
+ returned: success
+ type: float
+ sample: 1430158536.308
+ status:
+ description: Status of the cluster.
+ returned: success
+ type: str
+ sample: "available"
+ db_name:
+ description: Name of the database.
+ returned: success
+ type: str
+ sample: "new_db_name"
+ availability_zone:
+ description: Amazon availability zone where the cluster is located. "None" until cluster is available.
+ returned: success
+ type: str
+ sample: "us-east-1b"
+ maintenance_window:
+ description: Time frame when maintenance/upgrade are done.
+ returned: success
+ type: str
+ sample: "sun:09:30-sun:10:00"
+ private_ip_address:
+ description: Private IP address of the main node.
+ returned: success
+ type: str
+ sample: "10.10.10.10"
+ public_ip_address:
+ description: Public IP address of the main node. "None" when enhanced_vpc_routing is enabled.
+ returned: success
+ type: str
+ sample: "0.0.0.0"
+ port:
+ description: Port of the cluster. "None" until cluster is available.
+ returned: success
+ type: int
+ sample: 5439
+ url:
+ description: FQDN of the main cluster node. "None" until cluster is available.
+ returned: success
+ type: str
+ sample: "new-redshift_cluster.jfkdjfdkj.us-east-1.redshift.amazonaws.com"
+ enhanced_vpc_routing:
+ description: status of the enhanced vpc routing feature.
+ returned: success
+ type: bool
+ tags:
+ description: aws tags for cluster.
+ returned: success
+ type: dict
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict
+from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_id
+
+
+def _ensure_tags(redshift, identifier, existing_tags, module):
+ """Compares and update resource tags"""
+
+ account_id = get_aws_account_id(module)
+ region = module.params.get('region')
+ resource_arn = "arn:aws:redshift:{0}:{1}:cluster:{2}" .format(region, account_id, identifier)
+ tags = module.params.get('tags')
+ purge_tags = module.params.get('purge_tags')
+
+ tags_to_add, tags_to_remove = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags), tags, purge_tags)
+
+ if tags_to_add:
+ try:
+ redshift.create_tags(ResourceName=resource_arn, Tags=ansible_dict_to_boto3_tag_list(tags_to_add))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to add tags to cluster")
+
+ if tags_to_remove:
+ try:
+ redshift.delete_tags(ResourceName=resource_arn, TagKeys=tags_to_remove)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete tags on cluster")
+
+ changed = bool(tags_to_add or tags_to_remove)
+ return changed
+
+
+def _collect_facts(resource):
+ """Transform cluster information to dict."""
+ facts = {
+ 'identifier': resource['ClusterIdentifier'],
+ 'status': resource['ClusterStatus'],
+ 'username': resource['MasterUsername'],
+ 'db_name': resource['DBName'],
+ 'maintenance_window': resource['PreferredMaintenanceWindow'],
+ 'enhanced_vpc_routing': resource['EnhancedVpcRouting']
+
+ }
+
+ for node in resource['ClusterNodes']:
+ if node['NodeRole'] in ('SHARED', 'LEADER'):
+ facts['private_ip_address'] = node['PrivateIPAddress']
+ if facts['enhanced_vpc_routing'] is False:
+ facts['public_ip_address'] = node['PublicIPAddress']
+ else:
+ facts['public_ip_address'] = None
+ break
+
+ # Some parameters are not ready instantly if you don't wait for available
+ # cluster status
+ facts['create_time'] = None
+ facts['url'] = None
+ facts['port'] = None
+ facts['availability_zone'] = None
+ facts['tags'] = {}
+
+ if resource['ClusterStatus'] != "creating":
+ facts['create_time'] = resource['ClusterCreateTime']
+ facts['url'] = resource['Endpoint']['Address']
+ facts['port'] = resource['Endpoint']['Port']
+ facts['availability_zone'] = resource['AvailabilityZone']
+ facts['tags'] = boto3_tag_list_to_ansible_dict(resource['Tags'])
+
+ return facts
+
+
+@AWSRetry.jittered_backoff()
+def _describe_cluster(redshift, identifier):
+ '''
+ Basic wrapper around describe_clusters with a retry applied
+ '''
+ return redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0]
+
+
+@AWSRetry.jittered_backoff()
+def _create_cluster(redshift, **kwargs):
+ '''
+ Basic wrapper around create_cluster with a retry applied
+ '''
+ return redshift.create_cluster(**kwargs)
+
+
+# Simple wrapper around delete, try to avoid throwing an error if some other
+# operation is in progress
+@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidClusterState'])
+def _delete_cluster(redshift, **kwargs):
+ '''
+ Basic wrapper around delete_cluster with a retry applied.
+ Explicitly catches 'InvalidClusterState' (~ Operation in progress) so that
+ we can still delete a cluster if some kind of change operation was in
+ progress.
+ '''
+ return redshift.delete_cluster(**kwargs)
+
+
+@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidClusterState'])
+def _modify_cluster(redshift, **kwargs):
+ '''
+ Basic wrapper around modify_cluster with a retry applied.
+ Explicitly catches 'InvalidClusterState' (~ Operation in progress) for cases
+ where another modification is still in progress
+ '''
+ return redshift.modify_cluster(**kwargs)
+
+
+def create_cluster(module, redshift):
+ """
+ Create a new cluster
+
+ module: AnsibleAWSModule object
+ redshift: authenticated redshift connection object
+
+ Returns:
+ """
+
+ identifier = module.params.get('identifier')
+ node_type = module.params.get('node_type')
+ username = module.params.get('username')
+ password = module.params.get('password')
+ d_b_name = module.params.get('db_name')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ tags = module.params.get('tags')
+
+ changed = True
+ # Package up the optional parameters
+ params = {}
+ for p in ('cluster_type', 'cluster_security_groups',
+ 'vpc_security_group_ids', 'cluster_subnet_group_name',
+ 'availability_zone', 'preferred_maintenance_window',
+ 'cluster_parameter_group_name',
+ 'automated_snapshot_retention_period', 'port',
+ 'cluster_version', 'allow_version_upgrade',
+ 'number_of_nodes', 'publicly_accessible', 'encrypted',
+ 'elastic_ip', 'enhanced_vpc_routing'):
+ # https://github.com/boto/boto3/issues/400
+ if module.params.get(p) is not None:
+ params[p] = module.params.get(p)
+
+ if d_b_name:
+ params['d_b_name'] = d_b_name
+ if tags:
+ tags = ansible_dict_to_boto3_tag_list(tags)
+ params['tags'] = tags
+
+ try:
+ _describe_cluster(redshift, identifier)
+ changed = False
+ except is_boto3_error_code('ClusterNotFound'):
+ try:
+ _create_cluster(redshift,
+ ClusterIdentifier=identifier,
+ NodeType=node_type,
+ MasterUsername=username,
+ MasterUserPassword=password,
+ **snake_dict_to_camel_dict(params, capitalize_first=True))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to create cluster")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to describe cluster")
+ if wait:
+ attempts = wait_timeout // 60
+ waiter = redshift.get_waiter('cluster_available')
+ try:
+ waiter.wait(
+ ClusterIdentifier=identifier,
+ WaiterConfig=dict(MaxAttempts=attempts)
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Timeout waiting for the cluster creation")
+ try:
+ resource = _describe_cluster(redshift, identifier)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to describe cluster")
+
+ if tags:
+ if _ensure_tags(redshift, identifier, resource['Tags'], module):
+ changed = True
+ resource = _describe_cluster(redshift, identifier)
+
+ return changed, _collect_facts(resource)
+
+
+def describe_cluster(module, redshift):
+ """
+ Collect data about the cluster.
+
+ module: Ansible module object
+ redshift: authenticated redshift connection object
+ """
+ identifier = module.params.get('identifier')
+
+ try:
+ resource = _describe_cluster(redshift, identifier)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Error describing cluster")
+
+ return True, _collect_facts(resource)
+
+
+def delete_cluster(module, redshift):
+ """
+ Delete a cluster.
+
+ module: Ansible module object
+ redshift: authenticated redshift connection object
+ """
+
+ identifier = module.params.get('identifier')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ params = {}
+ for p in ('skip_final_cluster_snapshot',
+ 'final_cluster_snapshot_identifier'):
+ if p in module.params:
+ # https://github.com/boto/boto3/issues/400
+ if module.params.get(p) is not None:
+ params[p] = module.params.get(p)
+
+ try:
+ _delete_cluster(
+ redshift,
+ ClusterIdentifier=identifier,
+ **snake_dict_to_camel_dict(params, capitalize_first=True))
+ except is_boto3_error_code('ClusterNotFound'):
+ return False, {}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to delete cluster")
+
+ if wait:
+ attempts = wait_timeout // 60
+ waiter = redshift.get_waiter('cluster_deleted')
+ try:
+ waiter.wait(
+ ClusterIdentifier=identifier,
+ WaiterConfig=dict(MaxAttempts=attempts)
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Timeout deleting the cluster")
+
+ return True, {}
+
+
+def modify_cluster(module, redshift):
+ """
+ Modify an existing cluster.
+
+ module: Ansible module object
+ redshift: authenticated redshift connection object
+ """
+
+ identifier = module.params.get('identifier')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ # Package up the optional parameters
+ params = {}
+ for p in ('cluster_type', 'cluster_security_groups',
+ 'vpc_security_group_ids', 'cluster_subnet_group_name',
+ 'availability_zone', 'preferred_maintenance_window',
+ 'cluster_parameter_group_name',
+ 'automated_snapshot_retention_period', 'port', 'cluster_version',
+ 'allow_version_upgrade', 'number_of_nodes', 'new_cluster_identifier'):
+ # https://github.com/boto/boto3/issues/400
+ if module.params.get(p) is not None:
+ params[p] = module.params.get(p)
+
+ # enhanced_vpc_routing parameter change needs an exclusive request
+ if module.params.get('enhanced_vpc_routing') is not None:
+ try:
+ _modify_cluster(
+ redshift,
+ ClusterIdentifier=identifier,
+ EnhancedVpcRouting=module.params.get('enhanced_vpc_routing'))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier)
+ if wait:
+ attempts = wait_timeout // 60
+ waiter = redshift.get_waiter('cluster_available')
+ try:
+ waiter.wait(
+ ClusterIdentifier=identifier,
+ WaiterConfig=dict(MaxAttempts=attempts))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e,
+ msg="Timeout waiting for cluster enhanced vpc routing modification")
+
+ # change the rest
+ try:
+ _modify_cluster(
+ redshift,
+ ClusterIdentifier=identifier,
+ **snake_dict_to_camel_dict(params, capitalize_first=True))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier)
+
+ if module.params.get('new_cluster_identifier'):
+ identifier = module.params.get('new_cluster_identifier')
+
+ if wait:
+ attempts = wait_timeout // 60
+ waiter2 = redshift.get_waiter('cluster_available')
+ try:
+ waiter2.wait(
+ ClusterIdentifier=identifier,
+ WaiterConfig=dict(MaxAttempts=attempts)
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Timeout waiting for cluster modification")
+ try:
+ resource = _describe_cluster(redshift, identifier)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier)
+
+ if _ensure_tags(redshift, identifier, resource['Tags'], module):
+ resource = redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0]
+
+ return True, _collect_facts(resource)
+
+
+def main():
+ argument_spec = dict(
+ command=dict(choices=['create', 'facts', 'delete', 'modify'], required=True),
+ identifier=dict(required=True),
+ node_type=dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge',
+ 'ds2.8xlarge', 'dc1.large', 'dc2.large',
+ 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge',
+ 'dw2.large', 'dw2.8xlarge'], required=False),
+ username=dict(required=False),
+ password=dict(no_log=True, required=False),
+ db_name=dict(required=False),
+ cluster_type=dict(choices=['multi-node', 'single-node'], default='single-node'),
+ cluster_security_groups=dict(aliases=['security_groups'], type='list', elements='str'),
+ vpc_security_group_ids=dict(aliases=['vpc_security_groups'], type='list', elements='str'),
+ skip_final_cluster_snapshot=dict(aliases=['skip_final_snapshot'],
+ type='bool', default=False),
+ final_cluster_snapshot_identifier=dict(aliases=['final_snapshot_id'], required=False),
+ cluster_subnet_group_name=dict(aliases=['subnet']),
+ availability_zone=dict(aliases=['aws_zone', 'zone']),
+ preferred_maintenance_window=dict(aliases=['maintance_window', 'maint_window']),
+ cluster_parameter_group_name=dict(aliases=['param_group_name']),
+ automated_snapshot_retention_period=dict(aliases=['retention_period'], type='int'),
+ port=dict(type='int'),
+ cluster_version=dict(aliases=['version'], choices=['1.0']),
+ allow_version_upgrade=dict(aliases=['version_upgrade'], type='bool', default=True),
+ number_of_nodes=dict(type='int'),
+ publicly_accessible=dict(type='bool', default=False),
+ encrypted=dict(type='bool', default=False),
+ elastic_ip=dict(required=False),
+ new_cluster_identifier=dict(aliases=['new_identifier']),
+ enhanced_vpc_routing=dict(type='bool', default=False),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True)
+ )
+
+ required_if = [
+ ('command', 'delete', ['skip_final_cluster_snapshot']),
+ ('command', 'create', ['node_type',
+ 'username',
+ 'password'])
+ ]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=required_if
+ )
+
+ command = module.params.get('command')
+ skip_final_cluster_snapshot = module.params.get('skip_final_cluster_snapshot')
+ final_cluster_snapshot_identifier = module.params.get('final_cluster_snapshot_identifier')
+ # can't use module basic required_if check for this case
+ if command == 'delete' and skip_final_cluster_snapshot is False and final_cluster_snapshot_identifier is None:
+ module.fail_json(msg="Need to specify final_cluster_snapshot_identifier if skip_final_cluster_snapshot is False")
+
+ conn = module.client('redshift')
+
+ changed = True
+ if command == 'create':
+ (changed, cluster) = create_cluster(module, conn)
+
+ elif command == 'facts':
+ (changed, cluster) = describe_cluster(module, conn)
+
+ elif command == 'delete':
+ (changed, cluster) = delete_cluster(module, conn)
+
+ elif command == 'modify':
+ (changed, cluster) = modify_cluster(module, conn)
+
+ module.exit_json(changed=changed, cluster=cluster)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/redshift_cross_region_snapshots.py b/ansible_collections/community/aws/plugins/modules/redshift_cross_region_snapshots.py
new file mode 100644
index 000000000..1c42ea802
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/redshift_cross_region_snapshots.py
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, JR Kerkstra <jrkerkstra@example.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: redshift_cross_region_snapshots
+version_added: 1.0.0
+short_description: Manage Redshift Cross Region Snapshots
+description:
+ - Manage Redshift Cross Region Snapshots. Supports KMS-Encrypted Snapshots.
+ - For more information, see U(https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html#cross-region-snapshot-copy)
+author: JR Kerkstra (@captainkerk)
+options:
+ cluster_name:
+ description:
+ - The name of the cluster to configure cross-region snapshots for.
+ required: true
+ aliases: [ "cluster" ]
+ type: str
+ state:
+ description:
+ - Create or remove the cross-region snapshot configuration.
+ choices: [ "present", "absent" ]
+ default: present
+ type: str
+ region:
+ description:
+ - "The cluster's region."
+ required: true
+ aliases: [ "source" ]
+ type: str
+ destination_region:
+ description:
+ - The region to copy snapshots to.
+ required: true
+ aliases: [ "destination" ]
+ type: str
+ snapshot_copy_grant:
+ description:
+ - A grant for Amazon Redshift to use a master key in the I(destination_region).
+ - See U(http://boto3.readthedocs.io/en/latest/reference/services/redshift.html#Redshift.Client.create_snapshot_copy_grant)
+ aliases: [ "copy_grant" ]
+ type: str
+ snapshot_retention_period:
+ description:
+ - The number of days to keep cross-region snapshots for.
+ required: true
+ aliases: [ "retention_period" ]
+ type: int
+extends_documentation_fragment:
+- amazon.aws.ec2
+- amazon.aws.aws
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+- name: configure cross-region snapshot on cluster `johniscool`
+ community.aws.redshift_cross_region_snapshots:
+ cluster_name: johniscool
+ state: present
+ region: us-east-1
+ destination_region: us-west-2
+ retention_period: 1
+
+- name: configure cross-region snapshot on kms-encrypted cluster
+ community.aws.redshift_cross_region_snapshots:
+ cluster_name: whatever
+ state: present
+ region: us-east-1
+ destination: us-west-2
+ copy_grant: 'my-grant-in-destination'
+ retention_period: 10
+
+- name: disable cross-region snapshots, necessary before most cluster modifications (rename, resize)
+ community.aws.redshift_cross_region_snapshots:
+ cluster_name: whatever
+ state: absent
+ region: us-east-1
+ destination_region: us-west-2
+'''
+
+RETURN = ''' # '''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+class SnapshotController(object):
+
+ def __init__(self, client, cluster_name):
+ self.client = client
+ self.cluster_name = cluster_name
+
+ def get_cluster_snapshot_copy_status(self):
+ response = self.client.describe_clusters(
+ ClusterIdentifier=self.cluster_name
+ )
+ return response['Clusters'][0].get('ClusterSnapshotCopyStatus')
+
+ def enable_snapshot_copy(self, destination_region, grant_name, retention_period):
+ if grant_name:
+ self.client.enable_snapshot_copy(
+ ClusterIdentifier=self.cluster_name,
+ DestinationRegion=destination_region,
+ RetentionPeriod=retention_period,
+ SnapshotCopyGrantName=grant_name,
+ )
+ else:
+ self.client.enable_snapshot_copy(
+ ClusterIdentifier=self.cluster_name,
+ DestinationRegion=destination_region,
+ RetentionPeriod=retention_period,
+ )
+
+ def disable_snapshot_copy(self):
+ self.client.disable_snapshot_copy(
+ ClusterIdentifier=self.cluster_name
+ )
+
+ def modify_snapshot_copy_retention_period(self, retention_period):
+ self.client.modify_snapshot_copy_retention_period(
+ ClusterIdentifier=self.cluster_name,
+ RetentionPeriod=retention_period
+ )
+
+
+def requesting_unsupported_modifications(actual, requested):
+ if (actual['SnapshotCopyGrantName'] != requested['snapshot_copy_grant'] or
+ actual['DestinationRegion'] != requested['destination_region']):
+ return True
+ return False
+
+
+def needs_update(actual, requested):
+ if actual['RetentionPeriod'] != requested['snapshot_retention_period']:
+ return True
+ return False
+
+
+def run_module():
+ argument_spec = dict(
+ cluster_name=dict(type='str', required=True, aliases=['cluster']),
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ region=dict(type='str', required=True, aliases=['source']),
+ destination_region=dict(type='str', required=True, aliases=['destination']),
+ snapshot_copy_grant=dict(type='str', aliases=['copy_grant']),
+ snapshot_retention_period=dict(type='int', required=True, aliases=['retention_period']),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ result = dict(
+ changed=False,
+ message=''
+ )
+ connection = module.client('redshift')
+
+ snapshot_controller = SnapshotController(client=connection,
+ cluster_name=module.params.get('cluster_name'))
+
+ current_config = snapshot_controller.get_cluster_snapshot_copy_status()
+ if current_config is not None:
+ if module.params.get('state') == 'present':
+ if requesting_unsupported_modifications(current_config, module.params):
+ message = 'Cannot modify destination_region or grant_name. ' \
+ 'Please disable cross-region snapshots, and re-run.'
+ module.fail_json(msg=message, **result)
+ if needs_update(current_config, module.params):
+ result['changed'] = True
+ if not module.check_mode:
+ snapshot_controller.modify_snapshot_copy_retention_period(
+ module.params.get('snapshot_retention_period')
+ )
+ else:
+ result['changed'] = True
+ if not module.check_mode:
+ snapshot_controller.disable_snapshot_copy()
+ else:
+ if module.params.get('state') == 'present':
+ result['changed'] = True
+ if not module.check_mode:
+ snapshot_controller.enable_snapshot_copy(module.params.get('destination_region'),
+ module.params.get('snapshot_copy_grant'),
+ module.params.get('snapshot_retention_period'))
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/redshift_info.py b/ansible_collections/community/aws/plugins/modules/redshift_info.py
new file mode 100644
index 000000000..ff4da774e
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/redshift_info.py
@@ -0,0 +1,348 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: redshift_info
+version_added: 1.0.0
+author: "Jens Carl (@j-carl)"
+short_description: Gather information about Redshift cluster(s)
+description:
+ - Gather information about Redshift cluster(s).
+options:
+ cluster_identifier:
+ description:
+ - The prefix of cluster identifier of the Redshift cluster you are searching for.
+ - "This is a regular expression match with implicit '^'. Append '$' for a complete match."
+ required: false
+ aliases: ['name', 'identifier']
+ type: str
+ tags:
+ description:
+ - "A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' }
+ to match against the security group(s) you are searching for."
+ required: false
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.ec2
+- amazon.aws.aws
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do net set authentication details, see the AWS guide for details.
+
+- name: Find all clusters
+ community.aws.redshift_info:
+ register: redshift
+
+- name: Find cluster(s) with matching tags
+ community.aws.redshift_info:
+ tags:
+ env: prd
+ stack: monitoring
+ register: redshift_tags
+
+- name: Find cluster(s) with matching name/prefix and tags
+ community.aws.redshift_info:
+ tags:
+ env: dev
+ stack: web
+ name: user-
+ register: redshift_web
+
+- name: Fail if no cluster(s) is/are found
+ community.aws.redshift_info:
+ tags:
+ env: stg
+ stack: db
+ register: redshift_user
+ failed_when: "{{ redshift_user.results | length == 0 }}"
+'''
+
+RETURN = '''
+# For more information see U(http://boto3.readthedocs.io/en/latest/reference/services/redshift.html#Redshift.Client.describe_clusters)
+---
+cluster_identifier:
+ description: Unique key to identify the cluster.
+ returned: success
+ type: str
+ sample: "redshift-identifier"
+node_type:
+ description: The node type for nodes in the cluster.
+ returned: success
+ type: str
+ sample: "ds2.xlarge"
+cluster_status:
+ description: Current state of the cluster.
+ returned: success
+ type: str
+ sample: "available"
+modify_status:
+ description: The status of a modify operation.
+ returned: optional
+ type: str
+ sample: ""
+master_username:
+ description: The master user name for the cluster.
+ returned: success
+ type: str
+ sample: "admin"
+db_name:
+ description: The name of the initial database that was created when the cluster was created.
+ returned: success
+ type: str
+ sample: "dev"
+endpoint:
+ description: The connection endpoint.
+ returned: success
+ type: str
+ sample: {
+ "address": "cluster-ds2.ocmugla0rf.us-east-1.redshift.amazonaws.com",
+ "port": 5439
+ }
+cluster_create_time:
+ description: The date and time that the cluster was created.
+ returned: success
+ type: str
+ sample: "2016-05-10T08:33:16.629000+00:00"
+automated_snapshot_retention_period:
+ description: The number of days that automatic cluster snapshots are retained.
+ returned: success
+ type: int
+ sample: 1
+cluster_security_groups:
+ description: A list of cluster security groups that are associated with the cluster.
+ returned: success
+ type: list
+ sample: []
+vpc_security_groups:
+ description: A list of VPC security groups the are associated with the cluster.
+ returned: success
+ type: list
+ sample: [
+ {
+ "status": "active",
+ "vpc_security_group_id": "sg-12cghhg"
+ }
+ ]
+cluster_paramater_groups:
+ description: The list of cluster parameters that are associated with this cluster.
+ returned: success
+ type: list
+ sample: [
+ {
+ "cluster_parameter_status_list": [
+ {
+ "parameter_apply_status": "in-sync",
+ "parameter_name": "statement_timeout"
+ },
+ {
+ "parameter_apply_status": "in-sync",
+ "parameter_name": "require_ssl"
+ }
+ ],
+ "parameter_apply_status": "in-sync",
+ "parameter_group_name": "tuba"
+ }
+ ]
+cluster_subnet_group_name:
+ description: The name of the subnet group that is associated with the cluster.
+ returned: success
+ type: str
+ sample: "redshift-subnet"
+vpc_id:
+ description: The identifier of the VPC the cluster is in, if the cluster is in a VPC.
+ returned: success
+ type: str
+ sample: "vpc-1234567"
+availability_zone:
+ description: The name of the Availability Zone in which the cluster is located.
+ returned: success
+ type: str
+ sample: "us-east-1b"
+preferred_maintenance_window:
+ description: The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur.
+ returned: success
+ type: str
+ sample: "tue:07:30-tue:08:00"
+pending_modified_values:
+ description: A value that, if present, indicates that changes to the cluster are pending.
+ returned: success
+ type: dict
+ sample: {}
+cluster_version:
+ description: The version ID of the Amazon Redshift engine that is running on the cluster.
+ returned: success
+ type: str
+ sample: "1.0"
+allow_version_upgrade:
+ description: >
+ A Boolean value that, if true, indicates that major version upgrades will be applied
+ automatically to the cluster during the maintenance window.
+ returned: success
+ type: bool
+ sample: true|false
+number_of_nodes:
+ description: The number of compute nodes in the cluster.
+ returned: success
+ type: int
+ sample: 12
+publicly_accessible:
+ description: A Boolean value that, if true , indicates that the cluster can be accessed from a public network.
+ returned: success
+ type: bool
+ sample: true|false
+encrypted:
+ description: Boolean value that, if true , indicates that data in the cluster is encrypted at rest.
+ returned: success
+ type: bool
+ sample: true|false
+restore_status:
+ description: A value that describes the status of a cluster restore action.
+ returned: success
+ type: dict
+ sample: {}
+hsm_status:
+ description: >
+ A value that reports whether the Amazon Redshift cluster has finished applying any hardware
+ security module (HSM) settings changes specified in a modify cluster command.
+ returned: success
+ type: dict
+ sample: {}
+cluster_snapshot_copy_status:
+ description: A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
+ returned: success
+ type: dict
+ sample: {}
+cluster_public_keys:
+ description: The public key for the cluster.
+ returned: success
+ type: str
+ sample: "ssh-rsa anjigfam Amazon-Redshift\n"
+cluster_nodes:
+ description: The nodes in the cluster.
+ returned: success
+ type: list
+ sample: [
+ {
+ "node_role": "LEADER",
+ "private_ip_address": "10.0.0.1",
+ "public_ip_address": "x.x.x.x"
+ },
+ {
+ "node_role": "COMPUTE-1",
+ "private_ip_address": "10.0.0.3",
+ "public_ip_address": "x.x.x.x"
+ }
+ ]
+elastic_ip_status:
+ description: The status of the elastic IP (EIP) address.
+ returned: success
+ type: dict
+ sample: {}
+cluster_revision_number:
+ description: The specific revision number of the database in the cluster.
+ returned: success
+ type: str
+ sample: "1231"
+tags:
+ description: The list of tags for the cluster.
+ returned: success
+ type: list
+ sample: []
+kms_key_id:
+ description: The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
+ returned: success
+ type: str
+ sample: ""
+enhanced_vpc_routing:
+ description: An option that specifies whether to create the cluster with enhanced VPC routing enabled.
+ returned: success
+ type: bool
+ sample: true|false
+iam_roles:
+ description: List of IAM roles attached to the cluster.
+ returned: success
+ type: list
+ sample: []
+'''
+
+import re
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def match_tags(tags_to_match, cluster):
+ for key, value in tags_to_match.items():
+ for tag in cluster['Tags']:
+ if key == tag['Key'] and value == tag['Value']:
+ return True
+
+ return False
+
+
+def find_clusters(conn, module, identifier=None, tags=None):
+
+ try:
+ cluster_paginator = conn.get_paginator('describe_clusters')
+ clusters = cluster_paginator.paginate().build_full_result()
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to fetch clusters.')
+
+ matched_clusters = []
+
+ if identifier is not None:
+ identifier_prog = re.compile('^' + identifier)
+
+ for cluster in clusters['Clusters']:
+
+ matched_identifier = True
+ if identifier:
+ matched_identifier = identifier_prog.search(cluster['ClusterIdentifier'])
+
+ matched_tags = True
+ if tags:
+ matched_tags = match_tags(tags, cluster)
+
+ if matched_identifier and matched_tags:
+ matched_clusters.append(camel_dict_to_snake_dict(cluster))
+
+ return matched_clusters
+
+
+def main():
+
+ argument_spec = dict(
+ cluster_identifier=dict(type='str', aliases=['identifier', 'name']),
+ tags=dict(type='dict')
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ cluster_identifier = module.params.get('cluster_identifier')
+ cluster_tags = module.params.get('tags')
+
+ redshift = module.client('redshift')
+
+ results = find_clusters(redshift, module, identifier=cluster_identifier, tags=cluster_tags)
+ module.exit_json(results=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/redshift_subnet_group.py b/ansible_collections/community/aws/plugins/modules/redshift_subnet_group.py
new file mode 100644
index 000000000..3c7ca31f5
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/redshift_subnet_group.py
@@ -0,0 +1,273 @@
+#!/usr/bin/python
+
+# Copyright 2014 Jens Carl, Hothead Games Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: redshift_subnet_group
+version_added: 1.0.0
+short_description: manage Redshift cluster subnet groups
+description:
+ - Create, modifies, and deletes Redshift cluster subnet groups.
+options:
+ state:
+ description:
+ - Specifies whether the subnet group should be present or absent.
+ default: 'present'
+ choices: ['present', 'absent' ]
+ type: str
+ name:
+ description:
+ - Cluster subnet group name.
+ required: true
+ aliases: ['group_name']
+ type: str
+ description:
+ description:
+ - Cluster subnet group description.
+ aliases: ['group_description']
+ type: str
+ subnets:
+ description:
+ - List of subnet IDs that make up the cluster subnet group.
+ - At least one subnet must be provided when creating a cluster subnet group.
+ aliases: ['group_subnets']
+ type: list
+ elements: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+author:
+ - "Jens Carl (@j-carl), Hothead Games Inc."
+'''
+
+EXAMPLES = r'''
+- name: Create a Redshift subnet group
+ community.aws.redshift_subnet_group:
+ state: present
+ group_name: redshift-subnet
+ group_description: Redshift subnet
+ group_subnets:
+ - 'subnet-aaaaa'
+ - 'subnet-bbbbb'
+
+- name: Remove subnet group
+ community.aws.redshift_subnet_group:
+ state: absent
+ group_name: redshift-subnet
+'''
+
+RETURN = r'''
+cluster_subnet_group:
+ description: A dictionary containing information about the Redshift subnet group.
+ returned: success
+ type: dict
+ contains:
+ name:
+ description: Name of the Redshift subnet group.
+ returned: when the cache subnet group exists
+ type: str
+ sample: "redshift_subnet_group_name"
+ vpc_id:
+ description: Id of the VPC where the subnet is located.
+ returned: when the cache subnet group exists
+ type: str
+ sample: "vpc-aabb1122"
+ description:
+ description: The description of the cache subnet group.
+ returned: when the cache subnet group exists
+ type: str
+ sample: Redshift subnet
+ subnet_ids:
+ description: The IDs of the subnets beloging to the Redshift subnet group.
+ returned: when the cache subnet group exists
+ type: list
+ elements: str
+ sample:
+ - subnet-aaaaaaaa
+ - subnet-bbbbbbbb
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def get_subnet_group(name):
+ try:
+ groups = client.describe_cluster_subnet_groups(
+ aws_retry=True,
+ ClusterSubnetGroupName=name,
+ )['ClusterSubnetGroups']
+ except is_boto3_error_code('ClusterSubnetGroupNotFoundFault'):
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to describe subnet group")
+
+ if not groups:
+ return None
+
+ if len(groups) > 1:
+ module.fail_aws(
+ msg="Found multiple matches for subnet group",
+ cluster_subnet_groups=camel_dict_to_snake_dict(groups),
+ )
+
+ # No support for managing tags yet, but make sure that we don't need to
+ # change the return value structure after it's been available in a release.
+ tags = boto3_tag_list_to_ansible_dict(groups[0]['Tags'])
+
+ subnet_group = camel_dict_to_snake_dict(groups[0])
+
+ subnet_group['tags'] = tags
+ subnet_group['name'] = subnet_group['cluster_subnet_group_name']
+
+ subnet_ids = list(s['subnet_identifier'] for s in subnet_group['subnets'])
+ subnet_group['subnet_ids'] = subnet_ids
+
+ return subnet_group
+
+
+def create_subnet_group(name, description, subnets):
+
+ if not subnets:
+ module.fail_json(msg='At least one subnet must be provided when creating a subnet group')
+
+ if module.check_mode:
+ return True
+
+ try:
+ if not description:
+ description = name
+ client.create_cluster_subnet_group(
+ aws_retry=True,
+ ClusterSubnetGroupName=name,
+ Description=description,
+ SubnetIds=subnets,
+ )
+ return True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to create subnet group")
+
+
+def update_subnet_group(subnet_group, name, description, subnets):
+ update_params = dict()
+ if description and subnet_group['description'] != description:
+ update_params['Description'] = description
+ if subnets:
+ old_subnets = set(subnet_group['subnet_ids'])
+ new_subnets = set(subnets)
+ if old_subnets != new_subnets:
+ update_params['SubnetIds'] = list(subnets)
+
+ if not update_params:
+ return False
+
+ if module.check_mode:
+ return True
+
+ # Description is optional, SubnetIds is not
+ if 'SubnetIds' not in update_params:
+ update_params['SubnetIds'] = subnet_group['subnet_ids']
+
+ try:
+ client.modify_cluster_subnet_group(
+ aws_retry=True,
+ ClusterSubnetGroupName=name,
+ **update_params,
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to update subnet group")
+
+ return True
+
+
+def delete_subnet_group(name):
+
+ if module.check_mode:
+ return True
+
+ try:
+ client.delete_cluster_subnet_group(
+ aws_retry=True,
+ ClusterSubnetGroupName=name,
+ )
+ return True
+ except is_boto3_error_code('ClusterSubnetGroupNotFoundFault'):
+ # AWS is "eventually consistent", cope with the race conditions where
+ # deletion hadn't completed when we ran describe
+ return False
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to delete subnet group")
+
+
+def main():
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ name=dict(required=True, aliases=['group_name']),
+ description=dict(required=False, aliases=['group_description']),
+ subnets=dict(required=False, aliases=['group_subnets'], type='list', elements='str'),
+ )
+
+ global module
+ global client
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ state = module.params.get('state')
+ name = module.params.get('name')
+ description = module.params.get('description')
+ subnets = module.params.get('subnets')
+
+ client = module.client('redshift', retry_decorator=AWSRetry.jittered_backoff())
+
+ subnet_group = get_subnet_group(name)
+ changed = False
+
+ if state == 'present':
+ if not subnet_group:
+ result = create_subnet_group(name, description, subnets)
+ changed |= result
+ else:
+ result = update_subnet_group(subnet_group, name, description, subnets)
+ changed |= result
+ subnet_group = get_subnet_group(name)
+ else:
+ if subnet_group:
+ result = delete_subnet_group(name)
+ changed |= result
+ subnet_group = None
+
+ compat_results = dict()
+ if subnet_group:
+ compat_results['group'] = dict(
+ name=subnet_group['name'],
+ vpc_id=subnet_group['vpc_id'],
+ )
+
+ module.exit_json(
+ changed=changed,
+ cluster_subnet_group=subnet_group,
+ **compat_results,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/s3_bucket_info.py b/ansible_collections/community/aws/plugins/modules/s3_bucket_info.py
new file mode 100644
index 000000000..541a02b0f
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/s3_bucket_info.py
@@ -0,0 +1,620 @@
+#!/usr/bin/python
+"""
+Copyright (c) 2017 Ansible Project
+GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: s3_bucket_info
+version_added: 1.0.0
+author:
+ - "Gerben Geijteman (@hyperized)"
+short_description: Lists S3 buckets in AWS
+description:
+ - Lists S3 buckets and details about those buckets.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_s3_bucket_info).
+ The usage did not change.
+options:
+ name:
+ description:
+ - Name of bucket to query.
+ type: str
+ default: ""
+ version_added: 1.4.0
+ name_filter:
+ description:
+ - Limits buckets to only buckets who's name contain the string in I(name_filter).
+ type: str
+ default: ""
+ version_added: 1.4.0
+ bucket_facts:
+ description:
+ - Retrieve requested S3 bucket detailed information.
+ - Each bucket_X option executes one API call, hence many options being set to C(true) will cause slower module execution.
+ - You can limit buckets by using the I(name) or I(name_filter) option.
+ suboptions:
+ bucket_accelerate_configuration:
+ description: Retrive S3 accelerate configuration.
+ type: bool
+ default: False
+ bucket_location:
+ description: Retrive S3 bucket location.
+ type: bool
+ default: False
+ bucket_replication:
+ description: Retrive S3 bucket replication.
+ type: bool
+ default: False
+ bucket_acl:
+ description: Retrive S3 bucket ACLs.
+ type: bool
+ default: False
+ bucket_logging:
+ description: Retrive S3 bucket logging.
+ type: bool
+ default: False
+ bucket_request_payment:
+ description: Retrive S3 bucket request payment.
+ type: bool
+ default: False
+ bucket_tagging:
+ description: Retrive S3 bucket tagging.
+ type: bool
+ default: False
+ bucket_cors:
+ description: Retrive S3 bucket CORS configuration.
+ type: bool
+ default: False
+ bucket_notification_configuration:
+ description: Retrive S3 bucket notification configuration.
+ type: bool
+ default: False
+ bucket_encryption:
+ description: Retrive S3 bucket encryption.
+ type: bool
+ default: False
+ bucket_ownership_controls:
+ description:
+ - Retrive S3 ownership controls.
+ type: bool
+ default: False
+ bucket_website:
+ description: Retrive S3 bucket website.
+ type: bool
+ default: False
+ bucket_policy:
+ description: Retrive S3 bucket policy.
+ type: bool
+ default: False
+ bucket_policy_status:
+ description: Retrive S3 bucket policy status.
+ type: bool
+ default: False
+ bucket_lifecycle_configuration:
+ description: Retrive S3 bucket lifecycle configuration.
+ type: bool
+ default: False
+ public_access_block:
+ description: Retrive S3 bucket public access block.
+ type: bool
+ default: False
+ type: dict
+ version_added: 1.4.0
+ transform_location:
+ description:
+ - S3 bucket location for default us-east-1 is normally reported as C(null).
+ - Setting this option to C(true) will return C(us-east-1) instead.
+ - Affects only queries with I(bucket_facts=true) and I(bucket_location=true).
+ type: bool
+ default: False
+ version_added: 1.4.0
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Note: Only AWS S3 is currently supported
+
+# Lists all S3 buckets
+- community.aws.s3_bucket_info:
+ register: result
+
+# Retrieve detailed bucket information
+- community.aws.s3_bucket_info:
+ # Show only buckets with name matching
+ name_filter: your.testing
+ # Choose facts to retrieve
+ bucket_facts:
+ # bucket_accelerate_configuration: true
+ bucket_acl: true
+ bucket_cors: true
+ bucket_encryption: true
+ # bucket_lifecycle_configuration: true
+ bucket_location: true
+ # bucket_logging: true
+ # bucket_notification_configuration: true
+ # bucket_ownership_controls: true
+ # bucket_policy: true
+ # bucket_policy_status: true
+ # bucket_replication: true
+ # bucket_request_payment: true
+ # bucket_tagging: true
+ # bucket_website: true
+ # public_access_block: true
+ transform_location: true
+ register: result
+
+# Print out result
+- name: List buckets
+ ansible.builtin.debug:
+ msg: "{{ result['buckets'] }}"
+'''
+
+RETURN = '''
+bucket_list:
+ description: "List of buckets"
+ returned: always
+ type: complex
+ contains:
+ name:
+ description: Bucket name.
+ returned: always
+ type: str
+ sample: a-testing-bucket-name
+ creation_date:
+ description: Bucket creation date timestamp.
+ returned: always
+ type: str
+ sample: "2021-01-21T12:44:10+00:00"
+ public_access_block:
+ description: Bucket public access block configuration.
+ returned: when I(bucket_facts=true) and I(public_access_block=true)
+ type: complex
+ contains:
+ PublicAccessBlockConfiguration:
+ description: PublicAccessBlockConfiguration data.
+ returned: when PublicAccessBlockConfiguration is defined for the bucket
+ type: complex
+ contains:
+ BlockPublicAcls:
+ description: BlockPublicAcls setting value.
+ type: bool
+ sample: true
+ BlockPublicPolicy:
+ description: BlockPublicPolicy setting value.
+ type: bool
+ sample: true
+ IgnorePublicAcls:
+ description: IgnorePublicAcls setting value.
+ type: bool
+ sample: true
+ RestrictPublicBuckets:
+ description: RestrictPublicBuckets setting value.
+ type: bool
+ sample: true
+ bucket_name_filter:
+ description: String used to limit buckets. See I(name_filter).
+ returned: when I(name_filter) is defined
+ type: str
+ sample: filter-by-this-string
+ bucket_acl:
+ description: Bucket ACL configuration.
+ returned: when I(bucket_facts=true) and I(bucket_acl=true)
+ type: complex
+ contains:
+ Grants:
+ description: List of ACL grants.
+ type: list
+ sample: []
+ Owner:
+ description: Bucket owner information.
+ type: complex
+ contains:
+ DisplayName:
+ description: Bucket owner user display name.
+ returned: always
+ type: str
+ sample: username
+ ID:
+ description: Bucket owner user ID.
+ returned: always
+ type: str
+ sample: 123894e509349etc
+ bucket_cors:
+ description: Bucket CORS configuration.
+ returned: when I(bucket_facts=true) and I(bucket_cors=true)
+ type: complex
+ contains:
+ CORSRules:
+ description: Bucket CORS configuration.
+ returned: when CORS rules are defined for the bucket
+ type: list
+ sample: []
+ bucket_encryption:
+ description: Bucket encryption configuration.
+ returned: when I(bucket_facts=true) and I(bucket_encryption=true)
+ type: complex
+ contains:
+ ServerSideEncryptionConfiguration:
+ description: ServerSideEncryptionConfiguration configuration.
+ returned: when encryption is enabled on the bucket
+ type: complex
+ contains:
+ Rules:
+ description: List of applied encryptio rules.
+ returned: when encryption is enabled on the bucket
+ type: list
+ sample: { "ApplyServerSideEncryptionByDefault": { "SSEAlgorithm": "AES256" }, "BucketKeyEnabled": False }
+ bucket_lifecycle_configuration:
+ description: Bucket lifecycle configuration settings.
+ returned: when I(bucket_facts=true) and I(bucket_lifecycle_configuration=true)
+ type: complex
+ contains:
+ Rules:
+ description: List of lifecycle management rules.
+ returned: when lifecycle configuration is present
+ type: list
+ sample: [{ "Status": "Enabled", "ID": "example-rule" }]
+ bucket_location:
+ description: Bucket location.
+ returned: when I(bucket_facts=true) and I(bucket_location=true)
+ type: complex
+ contains:
+ LocationConstraint:
+ description: AWS region.
+ returned: always
+ type: str
+ sample: us-east-2
+ bucket_logging:
+ description: Server access logging configuration.
+ returned: when I(bucket_facts=true) and I(bucket_logging=true)
+ type: complex
+ contains:
+ LoggingEnabled:
+ description: Server access logging configuration.
+ returned: when server access logging is defined for the bucket
+ type: complex
+ contains:
+ TargetBucket:
+ description: Target bucket name.
+ returned: always
+ type: str
+ sample: logging-bucket-name
+ TargetPrefix:
+ description: Prefix in target bucket.
+ returned: always
+ type: str
+ sample: ""
+ bucket_notification_configuration:
+ description: Bucket notification settings.
+ returned: when I(bucket_facts=true) and I(bucket_notification_configuration=true)
+ type: complex
+ contains:
+ TopicConfigurations:
+ description: List of notification events configurations.
+ returned: when at least one notification is configured
+ type: list
+ sample: []
+ bucket_ownership_controls:
+ description: Preffered object ownership settings.
+ returned: when I(bucket_facts=true) and I(bucket_ownership_controls=true)
+ type: complex
+ contains:
+ OwnershipControls:
+ description: Object ownership settings.
+ returned: when ownership controls are defined for the bucket
+ type: complex
+ contains:
+ Rules:
+ description: List of ownership rules.
+ returned: when ownership rule is defined
+ type: list
+ sample: [{ "ObjectOwnership:": "ObjectWriter" }]
+ bucket_policy:
+ description: Bucket policy contents.
+ returned: when I(bucket_facts=true) and I(bucket_policy=true)
+ type: str
+ sample: '{"Version":"2012-10-17","Statement":[{"Sid":"AddCannedAcl","Effect":"Allow",..}}]}'
+ bucket_policy_status:
+ description: Status of bucket policy.
+ returned: when I(bucket_facts=true) and I(bucket_policy_status=true)
+ type: complex
+ contains:
+ PolicyStatus:
+ description: Status of bucket policy.
+ returned: when bucket policy is present
+ type: complex
+ contains:
+ IsPublic:
+ description: Report bucket policy public status.
+ returned: when bucket policy is present
+ type: bool
+ sample: True
+ bucket_replication:
+ description: Replication configuration settings.
+ returned: when I(bucket_facts=true) and I(bucket_replication=true)
+ type: complex
+ contains:
+ Role:
+ description: IAM role used for replication.
+ returned: when replication rule is defined
+ type: str
+ sample: "arn:aws:iam::123:role/example-role"
+ Rules:
+ description: List of replication rules.
+ returned: when replication rule is defined
+ type: list
+ sample: [{ "ID": "rule-1", "Filter": "{}" }]
+ bucket_request_payment:
+ description: Requester pays setting.
+ returned: when I(bucket_facts=true) and I(bucket_request_payment=true)
+ type: complex
+ contains:
+ Payer:
+ description: Current payer.
+ returned: always
+ type: str
+ sample: BucketOwner
+ bucket_tagging:
+ description: Bucket tags.
+ returned: when I(bucket_facts=true) and I(bucket_tagging=true)
+ type: dict
+ sample: { "Tag1": "Value1", "Tag2": "Value2" }
+ bucket_website:
+ description: Static website hosting.
+ returned: when I(bucket_facts=true) and I(bucket_website=true)
+ type: complex
+ contains:
+ ErrorDocument:
+ description: Object serving as HTTP error page.
+ returned: when static website hosting is enabled
+ type: dict
+ sample: { "Key": "error.html" }
+ IndexDocument:
+ description: Object serving as HTTP index page.
+ returned: when static website hosting is enabled
+ type: dict
+ sample: { "Suffix": "error.html" }
+ RedirectAllRequestsTo:
+ description: Website redict settings.
+ returned: when redirect requests is configured
+ type: complex
+ contains:
+ HostName:
+ description: Hostname to redirect.
+ returned: always
+ type: str
+ sample: www.example.com
+ Protocol:
+ description: Protocol used for redirect.
+ returned: always
+ type: str
+ sample: https
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def get_bucket_list(module, connection, name="", name_filter=""):
+ """
+ Return result of list_buckets json encoded
+ Filter only buckets matching 'name' or name_filter if defined
+ :param module:
+ :param connection:
+ :return:
+ """
+ buckets = []
+ filtered_buckets = []
+ final_buckets = []
+
+ # Get all buckets
+ try:
+ buckets = camel_dict_to_snake_dict(connection.list_buckets())['buckets']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err_code:
+ module.fail_json_aws(err_code, msg="Failed to list buckets")
+
+ # Filter buckets if requested
+ if name_filter:
+ for bucket in buckets:
+ if name_filter in bucket['name']:
+ filtered_buckets.append(bucket)
+ elif name:
+ for bucket in buckets:
+ if name == bucket['name']:
+ filtered_buckets.append(bucket)
+
+ # Return proper list (filtered or all)
+ if name or name_filter:
+ final_buckets = filtered_buckets
+ else:
+ final_buckets = buckets
+ return final_buckets
+
+
+def get_buckets_facts(connection, buckets, requested_facts, transform_location):
+ """
+ Retrive additional information about S3 buckets
+ """
+ full_bucket_list = []
+ # Iterate over all buckets and append retrived facts to bucket
+ for bucket in buckets:
+ bucket.update(get_bucket_details(connection, bucket['name'], requested_facts, transform_location))
+ full_bucket_list.append(bucket)
+
+ return full_bucket_list
+
+
+def get_bucket_details(connection, name, requested_facts, transform_location):
+ """
+ Execute all enabled S3API get calls for selected bucket
+ """
+ all_facts = {}
+
+ for key in requested_facts:
+ if requested_facts[key]:
+ if key == 'bucket_location':
+ all_facts[key] = {}
+ try:
+ all_facts[key] = get_bucket_location(name, connection, transform_location)
+ # we just pass on error - error means that resources is undefined
+ except botocore.exceptions.ClientError:
+ pass
+ elif key == 'bucket_tagging':
+ all_facts[key] = {}
+ try:
+ all_facts[key] = get_bucket_tagging(name, connection)
+ # we just pass on error - error means that resources is undefined
+ except botocore.exceptions.ClientError:
+ pass
+ else:
+ all_facts[key] = {}
+ try:
+ all_facts[key] = get_bucket_property(name, connection, key)
+ # we just pass on error - error means that resources is undefined
+ except botocore.exceptions.ClientError:
+ pass
+
+ return all_facts
+
+
+@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def get_bucket_location(name, connection, transform_location=False):
+ """
+ Get bucket location and optionally transform 'null' to 'us-east-1'
+ """
+ data = connection.get_bucket_location(Bucket=name)
+
+ # Replace 'null' with 'us-east-1'?
+ if transform_location:
+ try:
+ if not data['LocationConstraint']:
+ data['LocationConstraint'] = 'us-east-1'
+ except KeyError:
+ pass
+ # Strip response metadata (not needed)
+ data.pop('ResponseMetadata', None)
+ return data
+
+
+@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def get_bucket_tagging(name, connection):
+ """
+ Get bucket tags and transform them using `boto3_tag_list_to_ansible_dict` function
+ """
+ data = connection.get_bucket_tagging(Bucket=name)
+
+ try:
+ bucket_tags = boto3_tag_list_to_ansible_dict(data['TagSet'])
+ return bucket_tags
+ except KeyError:
+ # Strip response metadata (not needed)
+ data.pop('ResponseMetadata', None)
+ return data
+
+
+@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def get_bucket_property(name, connection, get_api_name):
+ """
+ Get bucket property
+ """
+ api_call = "get_" + get_api_name
+ api_function = getattr(connection, api_call)
+ data = api_function(Bucket=name)
+
+ # Strip response metadata (not needed)
+ data.pop('ResponseMetadata', None)
+ return data
+
+
+def main():
+ """
+ Get list of S3 buckets
+ :return:
+ """
+ argument_spec = dict(
+ name=dict(type='str', default=""),
+ name_filter=dict(type='str', default=""),
+ bucket_facts=dict(type='dict', options=dict(
+ bucket_accelerate_configuration=dict(type='bool', default=False),
+ bucket_acl=dict(type='bool', default=False),
+ bucket_cors=dict(type='bool', default=False),
+ bucket_encryption=dict(type='bool', default=False),
+ bucket_lifecycle_configuration=dict(type='bool', default=False),
+ bucket_location=dict(type='bool', default=False),
+ bucket_logging=dict(type='bool', default=False),
+ bucket_notification_configuration=dict(type='bool', default=False),
+ bucket_ownership_controls=dict(type='bool', default=False),
+ bucket_policy=dict(type='bool', default=False),
+ bucket_policy_status=dict(type='bool', default=False),
+ bucket_replication=dict(type='bool', default=False),
+ bucket_request_payment=dict(type='bool', default=False),
+ bucket_tagging=dict(type='bool', default=False),
+ bucket_website=dict(type='bool', default=False),
+ public_access_block=dict(type='bool', default=False),
+ )),
+ transform_location=dict(type='bool', default=False)
+ )
+
+ # Ensure we have an empty dict
+ result = {}
+
+ # Define mutually exclusive options
+ mutually_exclusive = [
+ ['name', 'name_filter']
+ ]
+
+ # Including ec2 argument spec
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=mutually_exclusive)
+
+ # Get parameters
+ name = module.params.get("name")
+ name_filter = module.params.get("name_filter")
+ requested_facts = module.params.get("bucket_facts")
+ transform_location = module.params.get("bucket_facts")
+
+ # Set up connection
+ connection = {}
+ try:
+ connection = module.client('s3')
+ except (connection.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err_code:
+ module.fail_json_aws(err_code, msg='Failed to connect to AWS')
+
+ # Get basic bucket list (name + creation date)
+ bucket_list = get_bucket_list(module, connection, name, name_filter)
+
+ # Add information about name/name_filter to result
+ if name:
+ result['bucket_name'] = name
+ elif name_filter:
+ result['bucket_name_filter'] = name_filter
+
+ # Gather detailed information about buckets if requested
+ bucket_facts = module.params.get("bucket_facts")
+ if bucket_facts:
+ result['buckets'] = get_buckets_facts(connection, bucket_list, requested_facts, transform_location)
+ else:
+ result['buckets'] = bucket_list
+
+ module.exit_json(msg="Retrieved s3 info.", **result)
+
+
+# MAIN
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/s3_bucket_notification.py b/ansible_collections/community/aws/plugins/modules/s3_bucket_notification.py
new file mode 100644
index 000000000..645ca6989
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/s3_bucket_notification.py
@@ -0,0 +1,410 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2021, Ansible Project
+# (c) 2019, XLAB d.o.o <www.xlab.si>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: s3_bucket_notification
+version_added: 1.0.0
+short_description: Creates, updates or deletes S3 Bucket notifications targeting Lambda functions, SNS or SQS.
+description:
+ - This module supports the creation, updates and deletions of S3 bucket notification profiles targeting
+ either Lambda functions, SNS topics or SQS queues.
+ - The target for the notifications must already exist. For lambdas use module M(community.aws.lambda)
+ to manage the lambda function itself, M(community.aws.lambda_alias)
+ to manage function aliases and M(community.aws.lambda_policy) to modify lambda permissions.
+ For SNS or SQS then use M(community.aws.sns_topic) or M(community.aws.sqs_queue).
+notes:
+ - If using Lambda function as the target then a Lambda policy is also needed, use
+ M(community.aws.lambda_policy) to do so to allow C(lambda:InvokeFunction) for the notification.
+author:
+ - XLAB d.o.o. (@xlab-si)
+ - Aljaz Kosir (@aljazkosir)
+ - Miha Plesko (@miha-plesko)
+ - Mark Woolley (@marknet15)
+options:
+ event_name:
+ description:
+ - Unique name for event notification on bucket.
+ required: true
+ type: str
+ bucket_name:
+ description:
+ - S3 bucket name.
+ required: true
+ type: str
+ state:
+ description:
+ - Describes the desired state.
+ default: "present"
+ choices: ["present", "absent"]
+ type: str
+ queue_arn:
+ description:
+ - The ARN of the SQS queue.
+ - Mutually exclusive with I(topic_arn) and I(lambda_function_arn).
+ type: str
+ version_added: 3.2.0
+ topic_arn:
+ description:
+ - The ARN of the SNS topic.
+ - Mutually exclusive with I(queue_arn) and I(lambda_function_arn).
+ type: str
+ version_added: 3.2.0
+ lambda_function_arn:
+ description:
+ - The ARN of the lambda function.
+ - Mutually exclusive with I(queue_arn) and I(topic_arn).
+ aliases: ['function_arn']
+ type: str
+ lambda_alias:
+ description:
+ - Name of the Lambda function alias.
+ - Mutually exclusive with I(lambda_version).
+ type: str
+ lambda_version:
+ description:
+ - Version of the Lambda function.
+ - Mutually exclusive with I(lambda_alias).
+ type: int
+ default: 0
+ events:
+ description:
+ - Events that will be triggering a notification. You can select multiple events to send
+ to the same destination, you can set up different events to send to different destinations,
+ and you can set up a prefix or suffix for an event. However, for each bucket,
+ individual events cannot have multiple configurations with overlapping prefixes or
+ suffixes that could match the same object key.
+ - Required when I(state=present).
+ choices: ['s3:ObjectCreated:*', 's3:ObjectCreated:Put', 's3:ObjectCreated:Post',
+ 's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload',
+ 's3:ObjectRemoved:*', 's3:ObjectRemoved:Delete',
+ 's3:ObjectRemoved:DeleteMarkerCreated', 's3:ObjectRestore:Post',
+ 's3:ObjectRestore:Completed', 's3:ReducedRedundancyLostObject']
+ type: list
+ elements: str
+ default: []
+ prefix:
+ description:
+ - Optional prefix to limit the notifications to objects with keys that start with matching
+ characters.
+ type: str
+ default: ''
+ suffix:
+ description:
+ - Optional suffix to limit the notifications to objects with keys that end with matching
+ characters.
+ type: str
+ default: ''
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+---
+# Examples adding notification target configs to a S3 bucket
+- name: Setup bucket event notification to a Lambda function
+ community.aws.s3_bucket_notification:
+ state: present
+ event_name: on_file_add_or_remove
+ bucket_name: test-bucket
+ lambda_function_arn: arn:aws:lambda:us-east-2:123456789012:function:test-lambda
+ events: ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"]
+ prefix: images/
+ suffix: .jpg
+
+- name: Setup bucket event notification to SQS
+ community.aws.s3_bucket_notification:
+ state: present
+ event_name: on_file_add_or_remove
+ bucket_name: test-bucket
+ queue_arn: arn:aws:sqs:us-east-2:123456789012:test-queue
+ events: ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"]
+ prefix: images/
+ suffix: .jpg
+
+# Example removing an event notification
+- name: Remove event notification
+ community.aws.s3_bucket_notification:
+ state: absent
+ event_name: on_file_add_or_remove
+ bucket_name: test-bucket
+'''
+
+RETURN = r'''
+notification_configuration:
+ description: dictionary of currently applied notifications
+ returned: success
+ type: complex
+ contains:
+ lambda_function_configurations:
+ description:
+ - List of current Lambda function notification configurations applied to the bucket.
+ type: list
+ queue_configurations:
+ description:
+ - List of current SQS notification configurations applied to the bucket.
+ type: list
+ topic_configurations:
+ description:
+ - List of current SNS notification configurations applied to the bucket.
+ type: list
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # will be protected by AnsibleAWSModule
+
+
+class AmazonBucket:
+ def __init__(self, module, client):
+ self.module = module
+ self.client = client
+ self.bucket_name = module.params['bucket_name']
+ self.check_mode = module.check_mode
+ self._full_config_cache = None
+
+ def full_config(self):
+ if self._full_config_cache is None:
+ self._full_config_cache = dict(
+ QueueConfigurations=[],
+ TopicConfigurations=[],
+ LambdaFunctionConfigurations=[]
+ )
+
+ try:
+ config_lookup = self.client.get_bucket_notification_configuration(
+ Bucket=self.bucket_name)
+ except (ClientError, BotoCoreError) as e:
+ self.module.fail_json(msg='{0}'.format(e))
+
+ # Handle different event targets
+ if config_lookup.get('QueueConfigurations'):
+ for queue_config in config_lookup.get('QueueConfigurations'):
+ self._full_config_cache['QueueConfigurations'].append(Config.from_api(queue_config))
+
+ if config_lookup.get('TopicConfigurations'):
+ for topic_config in config_lookup.get('TopicConfigurations'):
+ self._full_config_cache['TopicConfigurations'].append(Config.from_api(topic_config))
+
+ if config_lookup.get('LambdaFunctionConfigurations'):
+ for function_config in config_lookup.get('LambdaFunctionConfigurations'):
+ self._full_config_cache['LambdaFunctionConfigurations'].append(Config.from_api(function_config))
+
+ return self._full_config_cache
+
+ def current_config(self, config_name):
+ # Iterate through configs and get current event config
+ for target_configs in self.full_config():
+ for config in self.full_config()[target_configs]:
+ if config.raw['Id'] == config_name:
+ return config
+
+ def apply_config(self, desired):
+ configs = dict(
+ QueueConfigurations=[],
+ TopicConfigurations=[],
+ LambdaFunctionConfigurations=[]
+ )
+
+ # Iterate through existing configs then add the desired config
+ for target_configs in self.full_config():
+ for config in self.full_config()[target_configs]:
+ if config.name != desired.raw['Id']:
+ configs[target_configs].append(config.raw)
+
+ if self.module.params.get('queue_arn'):
+ configs['QueueConfigurations'].append(desired.raw)
+ if self.module.params.get('topic_arn'):
+ configs['TopicConfigurations'].append(desired.raw)
+ if self.module.params.get('lambda_function_arn'):
+ configs['LambdaFunctionConfigurations'].append(desired.raw)
+
+ self._upload_bucket_config(configs)
+ return configs
+
+ def delete_config(self, desired):
+ configs = dict(
+ QueueConfigurations=[],
+ TopicConfigurations=[],
+ LambdaFunctionConfigurations=[]
+ )
+
+ # Iterate through existing configs omitting specified config
+ for target_configs in self.full_config():
+ for config in self.full_config()[target_configs]:
+ if config.name != desired.raw['Id']:
+ configs[target_configs].append(config.raw)
+
+ self._upload_bucket_config(configs)
+ return configs
+
+ def _upload_bucket_config(self, configs):
+ api_params = dict(
+ Bucket=self.bucket_name,
+ NotificationConfiguration=dict()
+ )
+
+ # Iterate through available configs
+ for target_configs in configs:
+ if len(configs[target_configs]) > 0:
+ api_params['NotificationConfiguration'][target_configs] = configs[target_configs]
+
+ if not self.check_mode:
+ try:
+ self.client.put_bucket_notification_configuration(**api_params)
+ except (ClientError, BotoCoreError) as e:
+ self.module.fail_json(msg='{0}'.format(e))
+
+
+class Config:
+ def __init__(self, content):
+ self._content = content
+ self.name = content.get('Id')
+
+ @property
+ def raw(self):
+ return self._content
+
+ def __eq__(self, other):
+ if other:
+ return self.raw == other.raw
+ return False
+
+ @classmethod
+ def from_params(cls, **params):
+ """Generate bucket notification params for target"""
+
+ bucket_event_params = dict(
+ Id=params['event_name'],
+ Events=sorted(params['events']),
+ Filter=dict(
+ Key=dict(
+ FilterRules=[
+ dict(
+ Name='Prefix',
+ Value=params['prefix']
+ ),
+ dict(
+ Name='Suffix',
+ Value=params['suffix']
+ )
+ ]
+ )
+ )
+ )
+
+ # Handle different event targets
+ if params.get('queue_arn'):
+ bucket_event_params['QueueArn'] = params['queue_arn']
+ if params.get('topic_arn'):
+ bucket_event_params['TopicArn'] = params['topic_arn']
+ if params.get('lambda_function_arn'):
+ function_arn = params['lambda_function_arn']
+
+ qualifier = None
+ if params['lambda_version'] > 0:
+ qualifier = str(params['lambda_version'])
+ elif params['lambda_alias']:
+ qualifier = str(params['lambda_alias'])
+ if qualifier:
+ params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier)
+
+ bucket_event_params['LambdaFunctionArn'] = params['lambda_function_arn']
+
+ return cls(bucket_event_params)
+
+ @classmethod
+ def from_api(cls, config):
+ return cls(config)
+
+
+def setup_module_object():
+ event_types = ['s3:ObjectCreated:*', 's3:ObjectCreated:Put', 's3:ObjectCreated:Post',
+ 's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload',
+ 's3:ObjectRemoved:*', 's3:ObjectRemoved:Delete',
+ 's3:ObjectRemoved:DeleteMarkerCreated', 's3:ObjectRestore:Post',
+ 's3:ObjectRestore:Completed', 's3:ReducedRedundancyLostObject']
+
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ event_name=dict(required=True),
+ lambda_function_arn=dict(aliases=['function_arn']),
+ queue_arn=dict(type='str'),
+ topic_arn=dict(type='str'),
+ bucket_name=dict(required=True),
+ events=dict(type='list', default=[], choices=event_types, elements='str'),
+ prefix=dict(default=''),
+ suffix=dict(default=''),
+ lambda_alias=dict(),
+ lambda_version=dict(type='int', default=0),
+ )
+
+ mutually_exclusive = [
+ ['queue_arn', 'topic_arn', 'lambda_function_arn'],
+ ['lambda_alias', 'lambda_version']
+ ]
+
+ return AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=mutually_exclusive,
+ required_if=[['state', 'present', ['events']]]
+ )
+
+
+def main():
+ module = setup_module_object()
+
+ client = module.client('s3')
+ bucket = AmazonBucket(module, client)
+ current = bucket.current_config(module.params['event_name'])
+ desired = Config.from_params(**module.params)
+
+ notification_configs = dict(
+ QueueConfigurations=[],
+ TopicConfigurations=[],
+ LambdaFunctionConfigurations=[]
+ )
+
+ for target_configs in bucket.full_config():
+ for cfg in bucket.full_config()[target_configs]:
+ notification_configs[target_configs].append(camel_dict_to_snake_dict(cfg.raw))
+
+ state = module.params['state']
+ updated_configuration = dict()
+ changed = False
+
+ if state == 'present':
+ if current != desired:
+ updated_configuration = bucket.apply_config(desired)
+ changed = True
+ elif state == 'absent':
+ if current:
+ updated_configuration = bucket.delete_config(desired)
+ changed = True
+
+ for target_configs in updated_configuration:
+ notification_configs[target_configs] = []
+ for cfg in updated_configuration.get(target_configs, list()):
+ notification_configs[target_configs].append(camel_dict_to_snake_dict(cfg))
+
+ module.exit_json(changed=changed, notification_configuration=camel_dict_to_snake_dict(
+ notification_configs))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/s3_cors.py b/ansible_collections/community/aws/plugins/modules/s3_cors.py
new file mode 100644
index 000000000..753e395f9
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/s3_cors.py
@@ -0,0 +1,170 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: s3_cors
+version_added: 1.0.0
+short_description: Manage CORS for S3 buckets in AWS
+description:
+ - Manage CORS for S3 buckets in AWS.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_s3_cors).
+ The usage did not change.
+author:
+ - "Oyvind Saltvik (@fivethreeo)"
+options:
+ name:
+ description:
+ - Name of the S3 bucket.
+ required: true
+ type: str
+ rules:
+ description:
+ - Cors rules to put on the S3 bucket.
+ type: list
+ elements: dict
+ state:
+ description:
+ - Create or remove cors on the S3 bucket.
+ required: true
+ choices: [ 'present', 'absent' ]
+ type: str
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create a simple cors for s3 bucket
+- community.aws.s3_cors:
+ name: mys3bucket
+ state: present
+ rules:
+ - allowed_origins:
+ - http://www.example.com/
+ allowed_methods:
+ - GET
+ - POST
+ allowed_headers:
+ - Authorization
+ expose_headers:
+ - x-amz-server-side-encryption
+ - x-amz-request-id
+ max_age_seconds: 30000
+
+# Remove cors for s3 bucket
+- community.aws.s3_cors:
+ name: mys3bucket
+ state: absent
+'''
+
+RETURN = r'''
+changed:
+ description: check to see if a change was made to the rules
+ returned: always
+ type: bool
+ sample: true
+name:
+ description: name of bucket
+ returned: always
+ type: str
+ sample: 'bucket-name'
+rules:
+ description: list of current rules
+ returned: always
+ type: list
+ sample: [
+ {
+ "allowed_headers": [
+ "Authorization"
+ ],
+ "allowed_methods": [
+ "GET"
+ ],
+ "allowed_origins": [
+ "*"
+ ],
+ "max_age_seconds": 30000
+ }
+ ]
+'''
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, compare_policies
+
+
+def create_or_update_bucket_cors(connection, module):
+
+ name = module.params.get("name")
+ rules = module.params.get("rules", [])
+ changed = False
+
+ try:
+ current_camel_rules = connection.get_bucket_cors(Bucket=name)['CORSRules']
+ except ClientError:
+ current_camel_rules = []
+
+ new_camel_rules = snake_dict_to_camel_dict(rules, capitalize_first=True)
+ # compare_policies() takes two dicts and makes them hashable for comparison
+ if compare_policies(new_camel_rules, current_camel_rules):
+ changed = True
+
+ if changed:
+ try:
+ cors = connection.put_bucket_cors(Bucket=name, CORSConfiguration={'CORSRules': new_camel_rules})
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to update CORS for bucket {0}".format(name))
+
+ module.exit_json(changed=changed, name=name, rules=rules)
+
+
+def destroy_bucket_cors(connection, module):
+
+ name = module.params.get("name")
+ changed = False
+
+ try:
+ cors = connection.delete_bucket_cors(Bucket=name)
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to delete CORS for bucket {0}".format(name))
+
+ module.exit_json(changed=changed)
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(required=True, type='str'),
+ rules=dict(type='list', elements='dict'),
+ state=dict(type='str', choices=['present', 'absent'], required=True)
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ client = module.client('s3')
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ create_or_update_bucket_cors(client, module)
+ elif state == 'absent':
+ destroy_bucket_cors(client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/s3_lifecycle.py b/ansible_collections/community/aws/plugins/modules/s3_lifecycle.py
new file mode 100644
index 000000000..660bca869
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/s3_lifecycle.py
@@ -0,0 +1,667 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: s3_lifecycle
+version_added: 1.0.0
+short_description: Manage S3 bucket lifecycle rules in AWS
+description:
+ - Manage S3 bucket lifecycle rules in AWS.
+author: "Rob White (@wimnat)"
+notes:
+ - If specifying expiration time as days then transition time must also be specified in days.
+ - If specifying expiration time as a date then transition time must also be specified as a date.
+options:
+ name:
+ description:
+ - Name of the S3 bucket.
+ required: true
+ type: str
+ abort_incomplete_multipart_upload_days:
+ description:
+ - Specifies the days since the initiation of an incomplete multipart upload that Amazon S3 will wait before permanently removing all parts of the upload.
+ type: int
+ version_added: 2.2.0
+ expiration_date:
+ description:
+ - Indicates the lifetime of the objects that are subject to the rule by the date they will expire.
+ - The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified.
+ - This cannot be specified with I(expire_object_delete_marker)
+ type: str
+ expiration_days:
+ description:
+ - Indicates the lifetime, in days, of the objects that are subject to the rule.
+ - The value must be a non-zero positive integer.
+ - This cannot be specified with I(expire_object_delete_marker)
+ type: int
+ expire_object_delete_marker:
+ description:
+ - Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions.
+ - If set to C(true), the delete marker will be expired; if set to C(false) the policy takes no action.
+ - This cannot be specified with I(expiration_days) or I(expiration_date).
+ type: bool
+ version_added: 2.2.0
+ prefix:
+ description:
+ - Prefix identifying one or more objects to which the rule applies.
+ - If no prefix is specified, the rule will apply to the whole bucket.
+ type: str
+ purge_transitions:
+ description:
+ - Whether to replace all the current transition(s) with the new transition(s).
+ - When C(false), the provided transition(s) will be added, replacing transitions
+ with the same storage_class. When true, existing transitions will be removed
+ and replaced with the new transition(s)
+ default: true
+ type: bool
+ noncurrent_version_expiration_days:
+ description:
+ - The number of days after which non-current versions should be deleted.
+ - Must be set if I(noncurrent_version_keep_newer) is set.
+ required: false
+ type: int
+ noncurrent_version_keep_newer:
+ description:
+ - The minimum number of non-current versions to retain.
+ - Requires C(botocore >= 1.23.12)
+ - Requres I(noncurrent_version_expiration_days).
+ required: false
+ type: int
+ version_added: 5.3.0
+ noncurrent_version_storage_class:
+ description:
+ - The storage class to which non-current versions are transitioned.
+ default: glacier
+ choices: ['glacier', 'onezone_ia', 'standard_ia', 'intelligent_tiering', 'deep_archive']
+ required: false
+ type: str
+ noncurrent_version_transition_days:
+ description:
+ - The number of days after which non-current versions will be transitioned
+ to the storage class specified in I(noncurrent_version_storage_class).
+ required: false
+ type: int
+ noncurrent_version_transitions:
+ description:
+ - A list of transition behaviors to be applied to noncurrent versions for the rule.
+ - Each storage class may be used only once. Each transition behavior contains these elements
+ I(transition_days)
+ I(storage_class)
+ type: list
+ elements: dict
+ rule_id:
+ description:
+ - Unique identifier for the rule.
+ - The value cannot be longer than 255 characters.
+ - A unique value for the rule will be generated if no value is provided.
+ type: str
+ state:
+ description:
+ - Create or remove the lifecycle rule.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ status:
+ description:
+ - If C(enabled), the rule is currently being applied.
+ - If C(disabled), the rule is not currently being applied.
+ default: enabled
+ choices: [ 'enabled', 'disabled' ]
+ type: str
+ storage_class:
+ description:
+ - The storage class to transition to.
+ default: glacier
+ choices: [ 'glacier', 'onezone_ia', 'standard_ia', 'intelligent_tiering', 'deep_archive']
+ type: str
+ transition_date:
+ description:
+ - Indicates the lifetime of the objects that are subject to the rule by the date they
+ will transition to a different storage class.
+ - The value must be ISO-8601 format, the time must be midnight and a GMT timezone must
+ be specified.
+ - If (transition_days) is not specified, this parameter is required.
+ type: str
+ transition_days:
+ description:
+ - Indicates when, in days, an object transitions to a different storage class.
+ - If I(transition_date) is not specified, this parameter is required.
+ type: int
+ transitions:
+ description:
+ - A list of transition behaviors to be applied to the rule.
+ - Each storage class may be used only once. Each transition behavior may contain these elements
+ I(transition_days)
+ I(transition_date)
+ I(storage_class)
+ type: list
+ elements: dict
+ wait:
+ description:
+ - Wait for the configuration to complete before returning.
+ version_added: 1.5.0
+ type: bool
+ default: false
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Configure a lifecycle rule on a bucket to expire (delete) items with a prefix of /logs/ after 30 days
+ community.aws.s3_lifecycle:
+ name: mybucket
+ expiration_days: 30
+ prefix: logs/
+ status: enabled
+ state: present
+
+- name: Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier after 7 days and then delete after 90 days
+ community.aws.s3_lifecycle:
+ name: mybucket
+ transition_days: 7
+ expiration_days: 90
+ prefix: logs/
+ status: enabled
+ state: present
+
+# Note that midnight GMT must be specified.
+# Be sure to quote your date strings
+- name: Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier on 31 Dec 2020 and then delete on 31 Dec 2030.
+ community.aws.s3_lifecycle:
+ name: mybucket
+ transition_date: "2020-12-30T00:00:00.000Z"
+ expiration_date: "2030-12-30T00:00:00.000Z"
+ prefix: logs/
+ status: enabled
+ state: present
+
+- name: Disable the rule created above
+ community.aws.s3_lifecycle:
+ name: mybucket
+ prefix: logs/
+ status: disabled
+ state: present
+
+- name: Delete the lifecycle rule created above
+ community.aws.s3_lifecycle:
+ name: mybucket
+ prefix: logs/
+ state: absent
+
+- name: Configure a lifecycle rule to transition all backup files older than 31 days in /backups/ to standard infrequent access class.
+ community.aws.s3_lifecycle:
+ name: mybucket
+ prefix: backups/
+ storage_class: standard_ia
+ transition_days: 31
+ state: present
+ status: enabled
+
+- name: Configure a lifecycle rule to transition files to infrequent access after 30 days and glacier after 90
+ community.aws.s3_lifecycle:
+ name: mybucket
+ prefix: logs/
+ state: present
+ status: enabled
+ transitions:
+ - transition_days: 30
+ storage_class: standard_ia
+ - transition_days: 90
+ storage_class: glacier
+'''
+
+from copy import deepcopy
+import datetime
+import time
+
+try:
+ from dateutil import parser as date_parser
+ HAS_DATEUTIL = True
+except ImportError:
+ HAS_DATEUTIL = False
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAwsModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message
+from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+def parse_date(date):
+ if date is None:
+ return None
+ try:
+ if HAS_DATEUTIL:
+ return date_parser.parse(date)
+ else:
+ # Very simplistic
+ return datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.000Z")
+ except ValueError:
+ return None
+
+
+def fetch_rules(client, module, name):
+ # Get the bucket's current lifecycle rules
+ try:
+ current_lifecycle = client.get_bucket_lifecycle_configuration(aws_retry=True, Bucket=name)
+ current_lifecycle_rules = normalize_boto3_result(current_lifecycle['Rules'])
+ except is_boto3_error_code('NoSuchLifecycleConfiguration'):
+ current_lifecycle_rules = []
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+ return current_lifecycle_rules
+
+
+def build_rule(client, module):
+ name = module.params.get("name")
+ abort_incomplete_multipart_upload_days = module.params.get("abort_incomplete_multipart_upload_days")
+ expiration_date = parse_date(module.params.get("expiration_date"))
+ expiration_days = module.params.get("expiration_days")
+ expire_object_delete_marker = module.params.get("expire_object_delete_marker")
+ noncurrent_version_expiration_days = module.params.get("noncurrent_version_expiration_days")
+ noncurrent_version_transition_days = module.params.get("noncurrent_version_transition_days")
+ noncurrent_version_transitions = module.params.get("noncurrent_version_transitions")
+ noncurrent_version_storage_class = module.params.get("noncurrent_version_storage_class")
+ noncurrent_version_keep_newer = module.params.get("noncurrent_version_keep_newer")
+ prefix = module.params.get("prefix") or ""
+ rule_id = module.params.get("rule_id")
+ status = module.params.get("status")
+ storage_class = module.params.get("storage_class")
+ transition_date = parse_date(module.params.get("transition_date"))
+ transition_days = module.params.get("transition_days")
+ transitions = module.params.get("transitions")
+ purge_transitions = module.params.get("purge_transitions")
+
+ rule = dict(Filter=dict(Prefix=prefix), Status=status.title())
+ if rule_id is not None:
+ rule['ID'] = rule_id
+
+ if abort_incomplete_multipart_upload_days:
+ rule['AbortIncompleteMultipartUpload'] = {
+ 'DaysAfterInitiation': abort_incomplete_multipart_upload_days
+ }
+
+ # Create expiration
+ if expiration_days is not None:
+ rule['Expiration'] = dict(Days=expiration_days)
+ elif expiration_date is not None:
+ rule['Expiration'] = dict(Date=expiration_date.isoformat())
+ elif expire_object_delete_marker is not None:
+ rule['Expiration'] = dict(ExpiredObjectDeleteMarker=expire_object_delete_marker)
+ if noncurrent_version_expiration_days or noncurrent_version_keep_newer:
+ rule['NoncurrentVersionExpiration'] = dict()
+ if noncurrent_version_expiration_days is not None:
+ rule['NoncurrentVersionExpiration']['NoncurrentDays'] = noncurrent_version_expiration_days
+ if noncurrent_version_keep_newer is not None:
+ rule['NoncurrentVersionExpiration']['NewerNoncurrentVersions'] = noncurrent_version_keep_newer
+ if transition_days is not None:
+ rule['Transitions'] = [dict(Days=transition_days, StorageClass=storage_class.upper()), ]
+
+ elif transition_date is not None:
+ rule['Transitions'] = [dict(Date=transition_date.isoformat(), StorageClass=storage_class.upper()), ]
+
+ if transitions is not None:
+ if not rule.get('Transitions'):
+ rule['Transitions'] = []
+ for transition in transitions:
+ t_out = dict()
+ if transition.get("transition_date"):
+ t_out["Date"] = transition["transition_date"]
+ elif transition.get("transition_days") is not None:
+ t_out["Days"] = int(transition["transition_days"])
+ if transition.get("storage_class"):
+ t_out["StorageClass"] = transition["storage_class"].upper()
+ rule["Transitions"].append(t_out)
+
+ if noncurrent_version_transition_days is not None:
+ rule['NoncurrentVersionTransitions'] = [dict(NoncurrentDays=noncurrent_version_transition_days,
+ StorageClass=noncurrent_version_storage_class.upper()), ]
+
+ if noncurrent_version_transitions is not None:
+ if not rule.get('NoncurrentVersionTransitions'):
+ rule['NoncurrentVersionTransitions'] = []
+ for noncurrent_version_transition in noncurrent_version_transitions:
+ t_out = dict()
+ t_out['NoncurrentDays'] = noncurrent_version_transition['transition_days']
+ if noncurrent_version_transition.get('storage_class'):
+ t_out['StorageClass'] = noncurrent_version_transition['storage_class'].upper()
+ rule['NoncurrentVersionTransitions'].append(t_out)
+
+ return rule
+
+
+def compare_and_update_configuration(client, module, current_lifecycle_rules, rule):
+ purge_transitions = module.params.get("purge_transitions")
+ rule_id = module.params.get("rule_id")
+
+ lifecycle_configuration = dict(Rules=[])
+ changed = False
+ appended = False
+
+ # If current_lifecycle_obj is not None then we have rules to compare, otherwise just add the rule
+ if current_lifecycle_rules:
+ # If rule ID exists, use that for comparison otherwise compare based on prefix
+ for existing_rule in current_lifecycle_rules:
+ if rule.get('ID') == existing_rule.get('ID') and rule['Filter'].get('Prefix', '') != existing_rule.get('Filter', {}).get('Prefix', ''):
+ existing_rule.pop('ID')
+ elif rule_id is None and rule['Filter'].get('Prefix', '') == existing_rule.get('Filter', {}).get('Prefix', ''):
+ existing_rule.pop('ID')
+ if rule.get('ID') == existing_rule.get('ID'):
+ changed_, appended_ = update_or_append_rule(rule, existing_rule, purge_transitions, lifecycle_configuration)
+ changed = changed_ or changed
+ appended = appended_ or appended
+ else:
+ lifecycle_configuration['Rules'].append(existing_rule)
+
+ # If nothing appended then append now as the rule must not exist
+ if not appended:
+ lifecycle_configuration['Rules'].append(rule)
+ changed = True
+ else:
+ lifecycle_configuration['Rules'].append(rule)
+ changed = True
+
+ return changed, lifecycle_configuration
+
+
+def update_or_append_rule(new_rule, existing_rule, purge_transitions, lifecycle_obj):
+ changed = False
+ if existing_rule['Status'] != new_rule['Status']:
+ if not new_rule.get('Transitions') and existing_rule.get('Transitions'):
+ new_rule['Transitions'] = existing_rule['Transitions']
+ if not new_rule.get('Expiration') and existing_rule.get('Expiration'):
+ new_rule['Expiration'] = existing_rule['Expiration']
+ if not new_rule.get('NoncurrentVersionExpiration') and existing_rule.get('NoncurrentVersionExpiration'):
+ new_rule['NoncurrentVersionExpiration'] = existing_rule['NoncurrentVersionExpiration']
+ lifecycle_obj['Rules'].append(new_rule)
+ changed = True
+ appended = True
+ else:
+ if not purge_transitions:
+ merge_transitions(new_rule, existing_rule)
+ if compare_rule(new_rule, existing_rule, purge_transitions):
+ lifecycle_obj['Rules'].append(new_rule)
+ appended = True
+ else:
+ lifecycle_obj['Rules'].append(new_rule)
+ changed = True
+ appended = True
+ return changed, appended
+
+
+def compare_and_remove_rule(current_lifecycle_rules, rule_id=None, prefix=None):
+ changed = False
+ lifecycle_configuration = dict(Rules=[])
+
+ # Check if rule exists
+ # If an ID exists, use that otherwise compare based on prefix
+ if rule_id is not None:
+ for existing_rule in current_lifecycle_rules:
+ if rule_id == existing_rule['ID']:
+ # We're not keeping the rule (i.e. deleting) so mark as changed
+ changed = True
+ else:
+ lifecycle_configuration['Rules'].append(existing_rule)
+ else:
+ for existing_rule in current_lifecycle_rules:
+ if prefix == existing_rule['Filter'].get('Prefix', ''):
+ # We're not keeping the rule (i.e. deleting) so mark as changed
+ changed = True
+ else:
+ lifecycle_configuration['Rules'].append(existing_rule)
+
+ return changed, lifecycle_configuration
+
+
+def compare_rule(new_rule, old_rule, purge_transitions):
+
+ # Copy objects
+ rule1 = deepcopy(new_rule)
+ rule2 = deepcopy(old_rule)
+
+ if purge_transitions:
+ return rule1 == rule2
+ else:
+ transitions1 = rule1.pop('Transitions', [])
+ transitions2 = rule2.pop('Transitions', [])
+ noncurrent_transtions1 = rule1.pop('NoncurrentVersionTransitions', [])
+ noncurrent_transtions2 = rule2.pop('NoncurrentVersionTransitions', [])
+ if rule1 != rule2:
+ return False
+ for transition in transitions1:
+ if transition not in transitions2:
+ return False
+ for transition in noncurrent_transtions1:
+ if transition not in noncurrent_transtions2:
+ return False
+ return True
+
+
+def merge_transitions(updated_rule, updating_rule):
+ # because of the legal S3 transitions, we know only one can exist for each storage class.
+ # So, our strategy is build some dicts, keyed on storage class and add the storage class transitions that are only
+ # in updating_rule to updated_rule
+ updated_transitions = {}
+ updating_transitions = {}
+ for transition in updated_rule.get('Transitions', []):
+ updated_transitions[transition['StorageClass']] = transition
+ for transition in updating_rule.get('Transitions', []):
+ updating_transitions[transition['StorageClass']] = transition
+ for storage_class, transition in updating_transitions.items():
+ if updated_transitions.get(storage_class) is None:
+ updated_rule['Transitions'].append(transition)
+
+
+def create_lifecycle_rule(client, module):
+
+ name = module.params.get("name")
+ wait = module.params.get("wait")
+ changed = False
+
+ old_lifecycle_rules = fetch_rules(client, module, name)
+ new_rule = build_rule(client, module)
+ (changed, lifecycle_configuration) = compare_and_update_configuration(client, module,
+ old_lifecycle_rules,
+ new_rule)
+ if changed:
+ # Write lifecycle to bucket
+ try:
+ client.put_bucket_lifecycle_configuration(
+ aws_retry=True,
+ Bucket=name,
+ LifecycleConfiguration=lifecycle_configuration,
+ )
+ except is_boto3_error_message("At least one action needs to be specified in a rule"):
+ # Amazon interpreted this as not changing anything
+ changed = False
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, lifecycle_configuration=lifecycle_configuration, name=name, old_lifecycle_rules=old_lifecycle_rules)
+
+ _changed = changed
+ _retries = 10
+ _not_changed_cnt = 6
+ while wait and _changed and _retries and _not_changed_cnt:
+ # We've seen examples where get_bucket_lifecycle_configuration returns
+ # the updated rules, then the old rules, then the updated rules again and
+ # again couple of times.
+ # Thus try to read the rule few times in a row to check if it has changed.
+ time.sleep(5)
+ _retries -= 1
+ new_rules = fetch_rules(client, module, name)
+ (_changed, lifecycle_configuration) = compare_and_update_configuration(client, module,
+ new_rules,
+ new_rule)
+ if not _changed:
+ _not_changed_cnt -= 1
+ _changed = True
+ else:
+ _not_changed_cnt = 6
+ else:
+ _retries = 0
+
+ new_rules = fetch_rules(client, module, name)
+
+ module.exit_json(changed=changed, new_rule=new_rule, rules=new_rules,
+ old_rules=old_lifecycle_rules, _retries=_retries,
+ _config=lifecycle_configuration)
+
+
+def destroy_lifecycle_rule(client, module):
+
+ name = module.params.get("name")
+ prefix = module.params.get("prefix")
+ rule_id = module.params.get("rule_id")
+ wait = module.params.get("wait")
+ changed = False
+
+ if prefix is None:
+ prefix = ""
+
+ current_lifecycle_rules = fetch_rules(client, module, name)
+ changed, lifecycle_obj = compare_and_remove_rule(current_lifecycle_rules, rule_id, prefix)
+
+ if changed:
+ # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration
+ try:
+ if lifecycle_obj['Rules']:
+ client.put_bucket_lifecycle_configuration(
+ aws_retry=True,
+ Bucket=name,
+ LifecycleConfiguration=lifecycle_obj)
+ elif current_lifecycle_rules:
+ changed = True
+ client.delete_bucket_lifecycle(aws_retry=True, Bucket=name)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e)
+
+ _changed = changed
+ _retries = 10
+ _not_changed_cnt = 6
+ while wait and _changed and _retries and _not_changed_cnt:
+ # We've seen examples where get_bucket_lifecycle_configuration returns
+ # the updated rules, then the old rules, then the updated rules again and
+ # again couple of times.
+ # Thus try to read the rule few times in a row to check if it has changed.
+ time.sleep(5)
+ _retries -= 1
+ new_rules = fetch_rules(client, module, name)
+ (_changed, lifecycle_configuration) = compare_and_remove_rule(new_rules, rule_id, prefix)
+ if not _changed:
+ _not_changed_cnt -= 1
+ _changed = True
+ else:
+ _not_changed_cnt = 6
+ else:
+ _retries = 0
+
+ new_rules = fetch_rules(client, module, name)
+
+ module.exit_json(changed=changed, rules=new_rules, old_rules=current_lifecycle_rules,
+ _retries=_retries)
+
+
+def main():
+ s3_storage_class = ['glacier', 'onezone_ia', 'standard_ia', 'intelligent_tiering', 'deep_archive']
+ argument_spec = dict(
+ name=dict(required=True, type='str'),
+ abort_incomplete_multipart_upload_days=dict(type='int'),
+ expiration_days=dict(type='int'),
+ expiration_date=dict(),
+ expire_object_delete_marker=dict(type='bool'),
+ noncurrent_version_expiration_days=dict(type='int'),
+ noncurrent_version_keep_newer=dict(type='int'),
+ noncurrent_version_storage_class=dict(default='glacier', type='str', choices=s3_storage_class),
+ noncurrent_version_transition_days=dict(type='int'),
+ noncurrent_version_transitions=dict(type='list', elements='dict'),
+ prefix=dict(),
+ rule_id=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ status=dict(default='enabled', choices=['enabled', 'disabled']),
+ storage_class=dict(default='glacier', type='str', choices=s3_storage_class),
+ transition_days=dict(type='int'),
+ transition_date=dict(),
+ transitions=dict(type='list', elements='dict'),
+ purge_transitions=dict(default=True, type='bool'),
+ wait=dict(type='bool', default=False)
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ["expiration_days", "expiration_date", "expire_object_delete_marker"],
+ ["expiration_days", "transition_date"],
+ ["transition_days", "transition_date"],
+ ["transition_days", "expiration_date"],
+ ["transition_days", "transitions"],
+ ["transition_date", "transitions"],
+ ["noncurrent_version_transition_days", "noncurrent_version_transitions"],
+ ],
+ required_by={
+ "noncurrent_version_keep_newer": ["noncurrent_version_expiration_days"],
+ },
+ )
+
+ client = module.client('s3', retry_decorator=AWSRetry.jittered_backoff())
+
+ expiration_date = module.params.get("expiration_date")
+ transition_date = module.params.get("transition_date")
+ state = module.params.get("state")
+
+ if module.params.get("noncurrent_version_keep_newer"):
+ module.require_botocore_at_least(
+ "1.23.12",
+ reason="to set number of versions to keep with noncurrent_version_keep_newer"
+ )
+
+ if state == 'present' and module.params["status"] == "enabled": # allow deleting/disabling a rule by id/prefix
+
+ required_when_present = ('abort_incomplete_multipart_upload_days',
+ 'expiration_date', 'expiration_days', 'expire_object_delete_marker',
+ 'transition_date', 'transition_days', 'transitions',
+ 'noncurrent_version_expiration_days',
+ 'noncurrent_version_keep_newer',
+ 'noncurrent_version_transition_days',
+ 'noncurrent_version_transitions')
+ for param in required_when_present:
+ if module.params.get(param) is None:
+ break
+ else:
+ msg = "one of the following is required when 'state' is 'present': %s" % ', '.join(required_when_present)
+ module.fail_json(msg=msg)
+
+ # If dates have been set, make sure they're in a valid format
+ if expiration_date:
+ expiration_date = parse_date(expiration_date)
+ if expiration_date is None:
+ module.fail_json(msg="expiration_date is not a valid ISO-8601 format."
+ " The time must be midnight and a timezone of GMT must be included")
+ if transition_date:
+ transition_date = parse_date(transition_date)
+ if transition_date is None:
+ module.fail_json(msg="transition_date is not a valid ISO-8601 format."
+ " The time must be midnight and a timezone of GMT must be included")
+
+ if state == 'present':
+ create_lifecycle_rule(client, module)
+ elif state == 'absent':
+ destroy_lifecycle_rule(client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/s3_logging.py b/ansible_collections/community/aws/plugins/modules/s3_logging.py
new file mode 100644
index 000000000..011baa951
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/s3_logging.py
@@ -0,0 +1,216 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: s3_logging
+version_added: 1.0.0
+short_description: Manage logging facility of an s3 bucket in AWS
+description:
+ - Manage logging facility of an s3 bucket in AWS
+author: Rob White (@wimnat)
+options:
+ name:
+ description:
+ - "Name of the s3 bucket."
+ required: true
+ type: str
+ state:
+ description:
+ - "Enable or disable logging."
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ target_bucket:
+ description:
+ - "The bucket to log to. Required when state=present."
+ type: str
+ target_prefix:
+ description:
+ - "The prefix that should be prepended to the generated log files written to the target_bucket."
+ default: ""
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Enable logging of s3 bucket mywebsite.com to s3 bucket mylogs
+ community.aws.s3_logging:
+ name: mywebsite.com
+ target_bucket: mylogs
+ target_prefix: logs/mywebsite.com
+ state: present
+
+- name: Remove logging on an s3 bucket
+ community.aws.s3_logging:
+ name: mywebsite.com
+ state: absent
+
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+def compare_bucket_logging(bucket_logging, target_bucket, target_prefix):
+
+ if not bucket_logging.get('LoggingEnabled', False):
+ if target_bucket:
+ return True
+ return False
+
+ logging = bucket_logging['LoggingEnabled']
+ if logging['TargetBucket'] != target_bucket:
+ return True
+ if logging['TargetPrefix'] != target_prefix:
+ return True
+ return False
+
+
+def verify_acls(connection, module, target_bucket):
+ try:
+ current_acl = connection.get_bucket_acl(aws_retry=True, Bucket=target_bucket)
+ current_grants = current_acl['Grants']
+ except is_boto3_error_code('NoSuchBucket'):
+ module.fail_json(msg="Target Bucket '{0}' not found".format(target_bucket))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to fetch target bucket ACL")
+
+ required_grant = {
+ 'Grantee': {
+ 'URI': "http://acs.amazonaws.com/groups/s3/LogDelivery",
+ 'Type': 'Group'
+ },
+ 'Permission': 'FULL_CONTROL'
+ }
+
+ for grant in current_grants:
+ if grant == required_grant:
+ return False
+
+ if module.check_mode:
+ return True
+
+ updated_acl = dict(current_acl)
+ updated_grants = list(current_grants)
+ updated_grants.append(required_grant)
+ updated_acl['Grants'] = updated_grants
+ del updated_acl['ResponseMetadata']
+ try:
+ connection.put_bucket_acl(aws_retry=True, Bucket=target_bucket, AccessControlPolicy=updated_acl)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to update target bucket ACL to allow log delivery")
+
+ return True
+
+
+def enable_bucket_logging(connection, module):
+
+ bucket_name = module.params.get("name")
+ target_bucket = module.params.get("target_bucket")
+ target_prefix = module.params.get("target_prefix")
+ changed = False
+
+ try:
+ bucket_logging = connection.get_bucket_logging(aws_retry=True, Bucket=bucket_name)
+ except is_boto3_error_code('NoSuchBucket'):
+ module.fail_json(msg="Bucket '{0}' not found".format(bucket_name))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to fetch current logging status")
+
+ try:
+ changed |= verify_acls(connection, module, target_bucket)
+
+ if not compare_bucket_logging(bucket_logging, target_bucket, target_prefix):
+ bucket_logging = camel_dict_to_snake_dict(bucket_logging)
+ module.exit_json(changed=changed, **bucket_logging)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ result = connection.put_bucket_logging(
+ aws_retry=True,
+ Bucket=bucket_name,
+ BucketLoggingStatus={
+ 'LoggingEnabled': {
+ 'TargetBucket': target_bucket,
+ 'TargetPrefix': target_prefix,
+ }
+ })
+
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to enable bucket logging")
+
+ result = camel_dict_to_snake_dict(result)
+ module.exit_json(changed=True, **result)
+
+
+def disable_bucket_logging(connection, module):
+
+ bucket_name = module.params.get("name")
+ changed = False
+
+ try:
+ bucket_logging = connection.get_bucket_logging(aws_retry=True, Bucket=bucket_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to fetch current logging status")
+
+ if not compare_bucket_logging(bucket_logging, None, None):
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ try:
+ response = AWSRetry.jittered_backoff(
+ catch_extra_error_codes=['InvalidTargetBucketForLogging']
+ )(connection.put_bucket_logging)(
+ Bucket=bucket_name, BucketLoggingStatus={}
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to disable bucket logging")
+
+ module.exit_json(changed=True)
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(required=True),
+ target_bucket=dict(required=False, default=None),
+ target_prefix=dict(required=False, default=""),
+ state=dict(required=False, default='present', choices=['present', 'absent']),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ connection = module.client('s3', retry_decorator=AWSRetry.jittered_backoff())
+ state = module.params.get("state")
+
+ if state == 'present':
+ enable_bucket_logging(connection, module)
+ elif state == 'absent':
+ disable_bucket_logging(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/s3_metrics_configuration.py b/ansible_collections/community/aws/plugins/modules/s3_metrics_configuration.py
new file mode 100644
index 000000000..dff566821
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/s3_metrics_configuration.py
@@ -0,0 +1,223 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: s3_metrics_configuration
+version_added: 1.3.0
+short_description: Manage s3 bucket metrics configuration in AWS
+description:
+ - Manage s3 bucket metrics configuration in AWS which allows to get the CloudWatch request metrics for the objects in a bucket
+author: Dmytro Vorotyntsev (@vorotech)
+notes:
+ - This modules manages single metrics configuration, the s3 bucket might have up to 1,000 metrics configurations
+ - To request metrics for the entire bucket, create a metrics configuration without a filter
+ - Metrics configurations are necessary only to enable request metric, bucket-level daily storage metrics are always turned on
+options:
+ bucket_name:
+ description:
+ - "Name of the s3 bucket"
+ required: true
+ type: str
+ id:
+ description:
+ - "The ID used to identify the metrics configuration"
+ required: true
+ type: str
+ filter_prefix:
+ description:
+ - "A prefix used when evaluating a metrics filter"
+ required: false
+ type: str
+ filter_tags:
+ description:
+ - "A dictionary of one or more tags used when evaluating a metrics filter"
+ required: false
+ aliases: ['filter_tag']
+ type: dict
+ default: {}
+ state:
+ description:
+ - "Create or delete metrics configuration"
+ default: present
+ choices: ['present', 'absent']
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create a metrics configuration that enables metrics for an entire bucket
+ community.aws.s3_metrics_configuration:
+ bucket_name: my-bucket
+ id: EntireBucket
+ state: present
+
+- name: Put a metrics configuration that enables metrics for objects starting with a prefix
+ community.aws.s3_metrics_configuration:
+ bucket_name: my-bucket
+ id: Assets
+ filter_prefix: assets
+ state: present
+
+- name: Put a metrics configuration that enables metrics for objects with specific tag
+ community.aws.s3_metrics_configuration:
+ bucket_name: my-bucket
+ id: Assets
+ filter_tag:
+ kind: asset
+ state: present
+
+- name: Put a metrics configuration that enables metrics for objects that start with a particular prefix and have specific tags applied
+ community.aws.s3_metrics_configuration:
+ bucket_name: my-bucket
+ id: ImportantBlueDocuments
+ filter_prefix: documents
+ filter_tags:
+ priority: high
+ class: blue
+ state: present
+
+- name: Delete metrics configuration
+ community.aws.s3_metrics_configuration:
+ bucket_name: my-bucket
+ id: EntireBucket
+ state: absent
+
+'''
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+
+
+def _create_metrics_configuration(mc_id, filter_prefix, filter_tags):
+ payload = {
+ 'Id': mc_id
+ }
+ # Just a filter_prefix or just a single tag filter is a special case
+ if filter_prefix and not filter_tags:
+ payload['Filter'] = {
+ 'Prefix': filter_prefix
+ }
+ elif not filter_prefix and len(filter_tags) == 1:
+ payload['Filter'] = {
+ 'Tag': ansible_dict_to_boto3_tag_list(filter_tags)[0]
+ }
+ # Otherwise we need to use 'And'
+ elif filter_tags:
+ payload['Filter'] = {
+ 'And': {
+ 'Tags': ansible_dict_to_boto3_tag_list(filter_tags)
+ }
+ }
+ if filter_prefix:
+ payload['Filter']['And']['Prefix'] = filter_prefix
+
+ return payload
+
+
+def create_or_update_metrics_configuration(client, module):
+ bucket_name = module.params.get('bucket_name')
+ mc_id = module.params.get('id')
+ filter_prefix = module.params.get('filter_prefix')
+ filter_tags = module.params.get('filter_tags')
+
+ try:
+ response = client.get_bucket_metrics_configuration(aws_retry=True, Bucket=bucket_name, Id=mc_id)
+ metrics_configuration = response['MetricsConfiguration']
+ except is_boto3_error_code('NoSuchConfiguration'):
+ metrics_configuration = None
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to get bucket metrics configuration")
+
+ new_configuration = _create_metrics_configuration(mc_id, filter_prefix, filter_tags)
+
+ if metrics_configuration:
+ if metrics_configuration == new_configuration:
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ try:
+ client.put_bucket_metrics_configuration(
+ aws_retry=True,
+ Bucket=bucket_name,
+ Id=mc_id,
+ MetricsConfiguration=new_configuration
+ )
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to put bucket metrics configuration '%s'" % mc_id)
+
+ module.exit_json(changed=True)
+
+
+def delete_metrics_configuration(client, module):
+ bucket_name = module.params.get('bucket_name')
+ mc_id = module.params.get('id')
+
+ try:
+ client.get_bucket_metrics_configuration(aws_retry=True, Bucket=bucket_name, Id=mc_id)
+ except is_boto3_error_code('NoSuchConfiguration'):
+ module.exit_json(changed=False)
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to get bucket metrics configuration")
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ try:
+ client.delete_bucket_metrics_configuration(aws_retry=True, Bucket=bucket_name, Id=mc_id)
+ except is_boto3_error_code('NoSuchConfiguration'):
+ module.exit_json(changed=False)
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to delete bucket metrics configuration '%s'" % mc_id)
+
+ module.exit_json(changed=True)
+
+
+def main():
+ argument_spec = dict(
+ bucket_name=dict(type='str', required=True),
+ id=dict(type='str', required=True),
+ filter_prefix=dict(type='str', required=False),
+ filter_tags=dict(default={}, type='dict', required=False, aliases=['filter_tag']),
+ state=dict(default='present', type='str', choices=['present', 'absent']),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ state = module.params.get('state')
+
+ try:
+ client = module.client('s3', retry_decorator=AWSRetry.exponential_backoff(retries=10, delay=3))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ if state == 'present':
+ create_or_update_metrics_configuration(client, module)
+ elif state == 'absent':
+ delete_metrics_configuration(client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/s3_sync.py b/ansible_collections/community/aws/plugins/modules/s3_sync.py
new file mode 100644
index 000000000..80e3db0bd
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/s3_sync.py
@@ -0,0 +1,538 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: s3_sync
+version_added: 1.0.0
+short_description: Efficiently upload multiple files to S3
+description:
+ - The S3 module is great, but it is very slow for a large volume of files- even a dozen will be noticeable. In addition to speed, it handles globbing,
+ inclusions/exclusions, mime types, expiration mapping, recursion, cache control and smart directory mapping.
+options:
+ mode:
+ description:
+ - sync direction.
+ default: 'push'
+ choices: [ 'push' ]
+ type: str
+ file_change_strategy:
+ description:
+ - Difference determination method to allow changes-only syncing. Unlike rsync, files are not patched- they are fully skipped or fully uploaded.
+ - date_size will upload if file sizes don't match or if local file modified date is newer than s3's version
+ - checksum will compare etag values based on s3's implementation of chunked md5s.
+ - force will always upload all files.
+ required: false
+ default: 'date_size'
+ choices: [ 'force', 'checksum', 'date_size' ]
+ type: str
+ bucket:
+ description:
+ - Bucket name.
+ required: true
+ type: str
+ key_prefix:
+ description:
+ - In addition to file path, prepend s3 path with this prefix. Module will add slash at end of prefix if necessary.
+ required: false
+ type: str
+ default: ''
+ file_root:
+ description:
+ - File/directory path for synchronization. This is a local path.
+ - This root path is scrubbed from the key name, so subdirectories will remain as keys.
+ required: true
+ type: path
+ permission:
+ description:
+ - Canned ACL to apply to synced files.
+ - Changing this ACL only changes newly synced files, it does not trigger a full reupload.
+ required: false
+ choices:
+ - 'private'
+ - 'public-read'
+ - 'public-read-write'
+ - 'authenticated-read'
+ - 'aws-exec-read'
+ - 'bucket-owner-read'
+ - 'bucket-owner-full-control'
+ type: str
+ mime_map:
+ description:
+ - >
+ Dict entry from extension to MIME type. This will override any default/sniffed MIME type.
+ For example C({".txt": "application/text", ".yml": "application/text"})
+ required: false
+ type: dict
+ include:
+ description:
+ - Shell pattern-style file matching.
+ - Used before exclude to determine eligible files (for instance, only C("*.gif"))
+ - For multiple patterns, comma-separate them.
+ required: false
+ default: "*"
+ type: str
+ exclude:
+ description:
+ - Shell pattern-style file matching.
+ - Used after include to remove files (for instance, skip C("*.txt"))
+ - For multiple patterns, comma-separate them.
+ required: false
+ default: ".*"
+ type: str
+ cache_control:
+ description:
+ - Cache-Control header set on uploaded objects.
+ - Directives are separated by commas.
+ required: false
+ type: str
+ default: ''
+ storage_class:
+ description:
+ - Storage class to be associated to each object added to the S3 bucket.
+ required: false
+ choices:
+ - 'STANDARD'
+ - 'REDUCED_REDUNDANCY'
+ - 'STANDARD_IA'
+ - 'ONEZONE_IA'
+ - 'INTELLIGENT_TIERING'
+ - 'GLACIER'
+ - 'DEEP_ARCHIVE'
+ - 'OUTPOSTS'
+ default: 'STANDARD'
+ type: str
+ version_added: 1.5.0
+ delete:
+ description:
+ - Remove remote files that exist in bucket but are not present in the file root.
+ required: false
+ default: false
+ type: bool
+
+author: Ted Timmons (@tedder)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+- name: basic upload
+ community.aws.s3_sync:
+ bucket: tedder
+ file_root: roles/s3/files/
+
+- name: basic upload using the glacier storage class
+ community.aws.s3_sync:
+ bucket: tedder
+ file_root: roles/s3/files/
+ storage_class: GLACIER
+
+- name: basic individual file upload
+ community.aws.s3_sync:
+ bucket: tedder
+ file_root: roles/s3/files/file_name
+
+- name: all the options
+ community.aws.s3_sync:
+ bucket: tedder
+ file_root: roles/s3/files
+ mime_map:
+ .yml: application/text
+ .json: application/text
+ key_prefix: config_files/web
+ file_change_strategy: force
+ permission: public-read
+ cache_control: "public, max-age=31536000"
+ storage_class: "GLACIER"
+ include: "*"
+ exclude: "*.txt,.*"
+'''
+
+RETURN = '''
+filelist_initial:
+ description: file listing (dicts) from initial globbing
+ returned: always
+ type: list
+ sample: [{
+ "bytes": 151,
+ "chopped_path": "policy.json",
+ "fullpath": "roles/cf/files/policy.json",
+ "modified_epoch": 1477416706
+ }]
+filelist_local_etag:
+ description: file listing (dicts) including calculated local etag
+ returned: always
+ type: list
+ sample: [{
+ "bytes": 151,
+ "chopped_path": "policy.json",
+ "fullpath": "roles/cf/files/policy.json",
+ "mime_type": "application/json",
+ "modified_epoch": 1477416706,
+ "s3_path": "s3sync/policy.json"
+ }]
+filelist_s3:
+ description: file listing (dicts) including information about previously-uploaded versions
+ returned: always
+ type: list
+ sample: [{
+ "bytes": 151,
+ "chopped_path": "policy.json",
+ "fullpath": "roles/cf/files/policy.json",
+ "mime_type": "application/json",
+ "modified_epoch": 1477416706,
+ "s3_path": "s3sync/policy.json"
+ }]
+filelist_typed:
+ description: file listing (dicts) with calculated or overridden mime types
+ returned: always
+ type: list
+ sample: [{
+ "bytes": 151,
+ "chopped_path": "policy.json",
+ "fullpath": "roles/cf/files/policy.json",
+ "mime_type": "application/json",
+ "modified_epoch": 1477416706
+ }]
+filelist_actionable:
+ description: file listing (dicts) of files that will be uploaded after the strategy decision
+ returned: always
+ type: list
+ sample: [{
+ "bytes": 151,
+ "chopped_path": "policy.json",
+ "fullpath": "roles/cf/files/policy.json",
+ "mime_type": "application/json",
+ "modified_epoch": 1477931256,
+ "s3_path": "s3sync/policy.json",
+ "whysize": "151 / 151",
+ "whytime": "1477931256 / 1477929260"
+ }]
+uploads:
+ description: file listing (dicts) of files that were actually uploaded
+ returned: always
+ type: list
+ sample: [{
+ "bytes": 151,
+ "chopped_path": "policy.json",
+ "fullpath": "roles/cf/files/policy.json",
+ "s3_path": "s3sync/policy.json",
+ "whysize": "151 / 151",
+ "whytime": "1477931637 / 1477931489"
+ }]
+
+'''
+
+import datetime
+import fnmatch
+import mimetypes
+import os
+import stat as osstat # os.stat constants
+
+try:
+ from dateutil import tz
+ HAS_DATEUTIL = True
+except ImportError:
+ HAS_DATEUTIL = False
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_text
+
+# import module snippets
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+
+from ansible_collections.community.aws.plugins.module_utils.etag import calculate_multipart_etag
+
+
+def gather_files(fileroot, include=None, exclude=None):
+ ret = []
+
+ if os.path.isfile(fileroot):
+ fullpath = fileroot
+ fstat = os.stat(fullpath)
+ path_array = fileroot.split('/')
+ chopped_path = path_array[-1]
+ f_size = fstat[osstat.ST_SIZE]
+ f_modified_epoch = fstat[osstat.ST_MTIME]
+ ret.append({
+ 'fullpath': fullpath,
+ 'chopped_path': chopped_path,
+ 'modified_epoch': f_modified_epoch,
+ 'bytes': f_size,
+ })
+
+ else:
+ for (dirpath, dirnames, filenames) in os.walk(fileroot):
+ for fn in filenames:
+ fullpath = os.path.join(dirpath, fn)
+ # include/exclude
+ if include:
+ found = False
+ for x in include.split(','):
+ if fnmatch.fnmatch(fn, x):
+ found = True
+ if not found:
+ # not on the include list, so we don't want it.
+ continue
+
+ if exclude:
+ found = False
+ for x in exclude.split(','):
+ if fnmatch.fnmatch(fn, x):
+ found = True
+ if found:
+ # skip it, even if previously included.
+ continue
+
+ chopped_path = os.path.relpath(fullpath, start=fileroot)
+ fstat = os.stat(fullpath)
+ f_size = fstat[osstat.ST_SIZE]
+ f_modified_epoch = fstat[osstat.ST_MTIME]
+ ret.append({
+ 'fullpath': fullpath,
+ 'chopped_path': chopped_path,
+ 'modified_epoch': f_modified_epoch,
+ 'bytes': f_size,
+ })
+ # dirpath = path *to* the directory
+ # dirnames = subdirs *in* our directory
+ # filenames
+ return ret
+
+
+def calculate_s3_path(filelist, key_prefix=''):
+ ret = []
+ for fileentry in filelist:
+ # don't modify the input dict
+ retentry = fileentry.copy()
+ retentry['s3_path'] = os.path.join(key_prefix, fileentry['chopped_path'])
+ ret.append(retentry)
+ return ret
+
+
+def calculate_local_etag(filelist, key_prefix=''):
+ '''Really, "calculate md5", but since AWS uses their own format, we'll just call
+ it a "local etag". TODO optimization: only calculate if remote key exists.'''
+ ret = []
+ for fileentry in filelist:
+ # don't modify the input dict
+ retentry = fileentry.copy()
+ retentry['local_etag'] = calculate_multipart_etag(fileentry['fullpath'])
+ ret.append(retentry)
+ return ret
+
+
+def determine_mimetypes(filelist, override_map):
+ ret = []
+ for fileentry in filelist:
+ retentry = fileentry.copy()
+ localfile = fileentry['fullpath']
+
+ # reminder: file extension is '.txt', not 'txt'.
+ file_extension = os.path.splitext(localfile)[1]
+ if override_map and override_map.get(file_extension):
+ # override? use it.
+ retentry['mime_type'] = override_map[file_extension]
+ else:
+ # else sniff it
+ retentry['mime_type'], retentry['encoding'] = mimetypes.guess_type(localfile, strict=False)
+
+ # might be None or '' from one of the above. Not a great type but better than nothing.
+ if not retentry['mime_type']:
+ retentry['mime_type'] = 'application/octet-stream'
+
+ ret.append(retentry)
+
+ return ret
+
+
+def head_s3(s3, bucket, s3keys):
+ retkeys = []
+ for entry in s3keys:
+ retentry = entry.copy()
+ try:
+ retentry['s3_head'] = s3.head_object(Bucket=bucket, Key=entry['s3_path'])
+ # 404 (Missing) - File doesn't exist, we'll need to upload
+ # 403 (Denied) - Sometimes we can write but not read, assume we'll need to upload
+ except is_boto3_error_code(['404', '403']):
+ pass
+ retkeys.append(retentry)
+ return retkeys
+
+
+def filter_list(s3, bucket, s3filelist, strategy):
+ keeplist = list(s3filelist)
+
+ for e in keeplist:
+ e['_strategy'] = strategy
+
+ # init/fetch info from S3 if we're going to use it for comparisons
+ if not strategy == 'force':
+ keeplist = head_s3(s3, bucket, s3filelist)
+
+ # now actually run the strategies
+ if strategy == 'checksum':
+ for entry in keeplist:
+ if entry.get('s3_head'):
+ # since we have a remote s3 object, compare the values.
+ if entry['s3_head']['ETag'] == entry['local_etag']:
+ # files match, so remove the entry
+ entry['skip_flag'] = True
+ else:
+ # file etags don't match, keep the entry.
+ pass
+ else: # we don't have an etag, so we'll keep it.
+ pass
+ elif strategy == 'date_size':
+ for entry in keeplist:
+ if entry.get('s3_head'):
+ # fstat = entry['stat']
+ local_modified_epoch = entry['modified_epoch']
+ local_size = entry['bytes']
+
+ # py2's datetime doesn't have a timestamp() field, so we have to revert to something more awkward.
+ # remote_modified_epoch = entry['s3_head']['LastModified'].timestamp()
+ remote_modified_datetime = entry['s3_head']['LastModified']
+ delta = (remote_modified_datetime - datetime.datetime(1970, 1, 1, tzinfo=tz.tzutc()))
+ remote_modified_epoch = delta.seconds + (delta.days * 86400)
+
+ remote_size = entry['s3_head']['ContentLength']
+
+ entry['whytime'] = '{0} / {1}'.format(local_modified_epoch, remote_modified_epoch)
+ entry['whysize'] = '{0} / {1}'.format(local_size, remote_size)
+
+ if local_modified_epoch <= remote_modified_epoch and local_size == remote_size:
+ entry['skip_flag'] = True
+ else:
+ entry['why'] = "no s3_head"
+ # else: probably 'force'. Basically we don't skip with any with other strategies.
+ else:
+ pass
+
+ # prune 'please skip' entries, if any.
+ return [x for x in keeplist if not x.get('skip_flag')]
+
+
+def upload_files(s3, bucket, filelist, params):
+ ret = []
+ for entry in filelist:
+ args = {
+ 'ContentType': entry['mime_type']
+ }
+ if params.get('permission'):
+ args['ACL'] = params['permission']
+ if params.get('cache_control'):
+ args['CacheControl'] = params['cache_control']
+ if params.get('storage_class'):
+ args['StorageClass'] = params['storage_class']
+ # if this fails exception is caught in main()
+ s3.upload_file(entry['fullpath'], bucket, entry['s3_path'], ExtraArgs=args, Callback=None, Config=None)
+ ret.append(entry)
+ return ret
+
+
+def remove_files(s3, sourcelist, params):
+ bucket = params.get('bucket')
+ key_prefix = params.get('key_prefix')
+ paginator = s3.get_paginator('list_objects_v2')
+ current_keys = set(x['Key'] for x in paginator.paginate(Bucket=bucket, Prefix=key_prefix).build_full_result().get('Contents', []))
+ keep_keys = set(to_text(source_file['s3_path']) for source_file in sourcelist)
+ delete_keys = list(current_keys - keep_keys)
+
+ # can delete 1000 objects at a time
+ groups_of_keys = [delete_keys[i:i + 1000] for i in range(0, len(delete_keys), 1000)]
+ for keys in groups_of_keys:
+ s3.delete_objects(Bucket=bucket, Delete={'Objects': [{'Key': key} for key in keys]})
+
+ return delete_keys
+
+
+def main():
+ argument_spec = dict(
+ mode=dict(choices=['push'], default='push'),
+ file_change_strategy=dict(choices=['force', 'date_size', 'checksum'], default='date_size'),
+ bucket=dict(required=True),
+ key_prefix=dict(required=False, default='', no_log=False),
+ file_root=dict(required=True, type='path'),
+ permission=dict(required=False, choices=['private', 'public-read', 'public-read-write', 'authenticated-read',
+ 'aws-exec-read', 'bucket-owner-read', 'bucket-owner-full-control']),
+ mime_map=dict(required=False, type='dict'),
+ exclude=dict(required=False, default=".*"),
+ include=dict(required=False, default="*"),
+ cache_control=dict(required=False, default=''),
+ delete=dict(required=False, type='bool', default=False),
+ storage_class=dict(required=False, default='STANDARD',
+ choices=['STANDARD', 'REDUCED_REDUNDANCY', 'STANDARD_IA', 'ONEZONE_IA',
+ 'INTELLIGENT_TIERING', 'GLACIER', 'DEEP_ARCHIVE', 'OUTPOSTS']),
+ # future options: encoding, metadata, retries
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ )
+
+ if not HAS_DATEUTIL:
+ module.fail_json(msg='dateutil required for this module')
+
+ result = {}
+ mode = module.params['mode']
+
+ try:
+ s3 = module.client('s3')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ if mode == 'push':
+ try:
+ result['filelist_initial'] = gather_files(module.params['file_root'], exclude=module.params['exclude'], include=module.params['include'])
+ result['filelist_typed'] = determine_mimetypes(result['filelist_initial'], module.params.get('mime_map'))
+ result['filelist_s3'] = calculate_s3_path(result['filelist_typed'], module.params['key_prefix'])
+ try:
+ result['filelist_local_etag'] = calculate_local_etag(result['filelist_s3'])
+ except ValueError as e:
+ if module.params['file_change_strategy'] == 'checksum':
+ module.fail_json_aws(e, 'Unable to calculate checksum. If running in FIPS mode, you may need to use another file_change_strategy')
+ result['filelist_local_etag'] = result['filelist_s3'].copy()
+ result['filelist_actionable'] = filter_list(s3, module.params['bucket'], result['filelist_local_etag'], module.params['file_change_strategy'])
+ result['uploads'] = upload_files(s3, module.params['bucket'], result['filelist_actionable'], module.params)
+
+ if module.params['delete']:
+ result['removed'] = remove_files(s3, result['filelist_local_etag'], module.params)
+
+ # mark changed if we actually upload something.
+ if result.get('uploads') or result.get('removed'):
+ result['changed'] = True
+ # result.update(filelist=actionable_filelist)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to push file")
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/s3_website.py b/ansible_collections/community/aws/plugins/modules/s3_website.py
new file mode 100644
index 000000000..81d3169cd
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/s3_website.py
@@ -0,0 +1,321 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: s3_website
+version_added: 1.0.0
+short_description: Configure an s3 bucket as a website
+description:
+ - Configure an s3 bucket as a website
+author: Rob White (@wimnat)
+options:
+ name:
+ description:
+ - "Name of the s3 bucket"
+ required: true
+ type: str
+ error_key:
+ description:
+ - "The object key name to use when a 4XX class error occurs. To remove an error key, set to None."
+ type: str
+ redirect_all_requests:
+ description:
+ - "Describes the redirect behavior for every request to this s3 bucket website endpoint"
+ type: str
+ state:
+ description:
+ - "Add or remove s3 website configuration"
+ choices: [ 'present', 'absent' ]
+ required: true
+ type: str
+ suffix:
+ description:
+ - >
+ Suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to
+ samplebucket/images/ the data that is returned will be for the object with the key name images/index.html). The suffix must not include a slash
+ character.
+ default: index.html
+ type: str
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Configure an s3 bucket to redirect all requests to example.com
+ community.aws.s3_website:
+ name: mybucket.com
+ redirect_all_requests: example.com
+ state: present
+
+- name: Remove website configuration from an s3 bucket
+ community.aws.s3_website:
+ name: mybucket.com
+ state: absent
+
+- name: Configure an s3 bucket as a website with index and error pages
+ community.aws.s3_website:
+ name: mybucket.com
+ suffix: home.htm
+ error_key: errors/404.htm
+ state: present
+
+'''
+
+RETURN = '''
+index_document:
+ description: index document
+ type: complex
+ returned: always
+ contains:
+ suffix:
+ description: suffix that is appended to a request that is for a directory on the website endpoint
+ returned: success
+ type: str
+ sample: index.html
+error_document:
+ description: error document
+ type: complex
+ returned: always
+ contains:
+ key:
+ description: object key name to use when a 4XX class error occurs
+ returned: when error_document parameter set
+ type: str
+ sample: error.html
+redirect_all_requests_to:
+ description: where to redirect requests
+ type: complex
+ returned: always
+ contains:
+ host_name:
+ description: name of the host where requests will be redirected.
+ returned: when redirect all requests parameter set
+ type: str
+ sample: ansible.com
+ protocol:
+ description: protocol to use when redirecting requests.
+ returned: when redirect all requests parameter set
+ type: str
+ sample: https
+routing_rules:
+ description: routing rules
+ type: list
+ returned: always
+ contains:
+ condition:
+ type: complex
+ description: A container for describing a condition that must be met for the specified redirect to apply.
+ contains:
+ http_error_code_returned_equals:
+ description: The HTTP error code when the redirect is applied.
+ returned: always
+ type: str
+ key_prefix_equals:
+ description: object key name prefix when the redirect is applied. For example, to redirect
+ requests for ExamplePage.html, the key prefix will be ExamplePage.html
+ returned: when routing rule present
+ type: str
+ sample: docs/
+ redirect:
+ type: complex
+ description: Container for redirect information.
+ returned: always
+ contains:
+ host_name:
+ description: name of the host where requests will be redirected.
+ returned: when host name set as part of redirect rule
+ type: str
+ sample: ansible.com
+ http_redirect_code:
+ description: The HTTP redirect code to use on the response.
+ returned: when routing rule present
+ type: str
+ protocol:
+ description: Protocol to use when redirecting requests.
+ returned: when routing rule present
+ type: str
+ sample: http
+ replace_key_prefix_with:
+ description: object key prefix to use in the redirect request
+ returned: when routing rule present
+ type: str
+ sample: documents/
+ replace_key_with:
+ description: object key prefix to use in the redirect request
+ returned: when routing rule present
+ type: str
+ sample: documents/
+'''
+
+import time
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+
+
+def _create_redirect_dict(url):
+
+ redirect_dict = {}
+ url_split = url.split(':')
+
+ # Did we split anything?
+ if len(url_split) == 2:
+ redirect_dict[u'Protocol'] = url_split[0]
+ redirect_dict[u'HostName'] = url_split[1].replace('//', '')
+ elif len(url_split) == 1:
+ redirect_dict[u'HostName'] = url_split[0]
+ else:
+ raise ValueError('Redirect URL appears invalid')
+
+ return redirect_dict
+
+
+def _create_website_configuration(suffix, error_key, redirect_all_requests):
+
+ website_configuration = {}
+
+ if error_key is not None:
+ website_configuration['ErrorDocument'] = {'Key': error_key}
+
+ if suffix is not None:
+ website_configuration['IndexDocument'] = {'Suffix': suffix}
+
+ if redirect_all_requests is not None:
+ website_configuration['RedirectAllRequestsTo'] = _create_redirect_dict(redirect_all_requests)
+
+ return website_configuration
+
+
+def enable_or_update_bucket_as_website(client_connection, resource_connection, module):
+
+ bucket_name = module.params.get("name")
+ redirect_all_requests = module.params.get("redirect_all_requests")
+ # If redirect_all_requests is set then don't use the default suffix that has been set
+ if redirect_all_requests is not None:
+ suffix = None
+ else:
+ suffix = module.params.get("suffix")
+ error_key = module.params.get("error_key")
+ changed = False
+
+ try:
+ bucket_website = resource_connection.BucketWebsite(bucket_name)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get bucket")
+
+ try:
+ website_config = client_connection.get_bucket_website(Bucket=bucket_name)
+ except is_boto3_error_code('NoSuchWebsiteConfiguration'):
+ website_config = None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to get website configuration")
+
+ if website_config is None:
+ try:
+ bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to set bucket website configuration")
+ except ValueError as e:
+ module.fail_json(msg=str(e))
+ else:
+ try:
+ if (suffix is not None and website_config['IndexDocument']['Suffix'] != suffix) or \
+ (error_key is not None and website_config['ErrorDocument']['Key'] != error_key) or \
+ (redirect_all_requests is not None and website_config['RedirectAllRequestsTo'] != _create_redirect_dict(redirect_all_requests)):
+
+ try:
+ bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to update bucket website configuration")
+ except KeyError as e:
+ try:
+ bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to update bucket website configuration")
+ except ValueError as e:
+ module.fail_json(msg=str(e))
+
+ # Wait 5 secs before getting the website_config again to give it time to update
+ time.sleep(5)
+
+ website_config = client_connection.get_bucket_website(Bucket=bucket_name)
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(website_config))
+
+
+def disable_bucket_as_website(client_connection, module):
+
+ changed = False
+ bucket_name = module.params.get("name")
+
+ try:
+ client_connection.get_bucket_website(Bucket=bucket_name)
+ except is_boto3_error_code('NoSuchWebsiteConfiguration'):
+ module.exit_json(changed=changed)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to get bucket website")
+
+ try:
+ client_connection.delete_bucket_website(Bucket=bucket_name)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to delete bucket website")
+
+ module.exit_json(changed=changed)
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['present', 'absent']),
+ suffix=dict(type='str', required=False, default='index.html'),
+ error_key=dict(type='str', required=False, no_log=False),
+ redirect_all_requests=dict(type='str', required=False),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['redirect_all_requests', 'suffix'],
+ ['redirect_all_requests', 'error_key']
+ ],
+ )
+
+ try:
+ client_connection = module.client('s3')
+ resource_connection = module.resource('s3')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ enable_or_update_bucket_as_website(client_connection, resource_connection, module)
+ elif state == 'absent':
+ disable_bucket_as_website(client_connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/secretsmanager_secret.py b/ansible_collections/community/aws/plugins/modules/secretsmanager_secret.py
new file mode 100644
index 000000000..851746189
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/secretsmanager_secret.py
@@ -0,0 +1,649 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, REY Remi
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: secretsmanager_secret
+version_added: 1.0.0
+short_description: Manage secrets stored in AWS Secrets Manager
+description:
+ - Create, update, and delete secrets stored in AWS Secrets Manager.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_secret).
+ The usage did not change.
+author:
+ - "REY Remi (@rrey)"
+options:
+ name:
+ description:
+ - Friendly name for the secret you are creating.
+ required: true
+ type: str
+ state:
+ description:
+ - Whether the secret should be exist or not.
+ default: 'present'
+ choices: ['present', 'absent']
+ type: str
+ overwrite:
+ description:
+ - Whether to overwrite an existing secret with the same name.
+ - If set to C(True), an existing secret with the same I(name) will be overwritten.
+ - If set to C(False), a secret with the given I(name) will only be created if none exists.
+ type: bool
+ default: True
+ version_added: 5.3.0
+ recovery_window:
+ description:
+ - Only used if state is absent.
+ - Specifies the number of days that Secrets Manager waits before it can delete the secret.
+ - If set to 0, the deletion is forced without recovery.
+ default: 30
+ type: int
+ description:
+ description:
+ - Specifies a user-provided description of the secret.
+ type: str
+ default: ''
+ replica:
+ description:
+ - Specifies a list of regions and kms_key_ids (optional) to replicate the secret to
+ type: list
+ elements: dict
+ version_added: 5.3.0
+ suboptions:
+ region:
+ description:
+ - Region to replicate secret to.
+ type: str
+ required: true
+ kms_key_id:
+ description:
+ - Specifies the ARN or alias of the AWS KMS customer master key (CMK) in the
+ destination region to be used (alias/aws/secretsmanager is assumed if not specified)
+ type: str
+ required: false
+ kms_key_id:
+ description:
+ - Specifies the ARN or alias of the AWS KMS customer master key (CMK) to be
+ used to encrypt the I(secret) values in the versions stored in this secret.
+ type: str
+ secret_type:
+ description:
+ - Specifies the type of data that you want to encrypt.
+ choices: ['binary', 'string']
+ default: 'string'
+ type: str
+ secret:
+ description:
+ - Specifies string or binary data that you want to encrypt and store in the new version of the secret.
+ - Mutually exclusive with the I(json_secret) option.
+ default: ""
+ type: str
+ json_secret:
+ description:
+ - Specifies JSON-formatted data that you want to encrypt and store in the new version of the
+ secret.
+ - Mutually exclusive with the I(secret) option.
+ type: json
+ version_added: 4.1.0
+ resource_policy:
+ description:
+ - Specifies JSON-formatted resource policy to attach to the secret. Useful when granting cross-account access
+ to secrets.
+ required: false
+ type: json
+ version_added: 3.1.0
+ rotation_lambda:
+ description:
+ - Specifies the ARN of the Lambda function that can rotate the secret.
+ type: str
+ rotation_interval:
+ description:
+ - Specifies the number of days between automatic scheduled rotations of the secret.
+ default: 30
+ type: int
+extends_documentation_fragment:
+ - amazon.aws.ec2
+ - amazon.aws.aws
+ - amazon.aws.boto3
+ - amazon.aws.tags
+notes:
+ - Support for I(purge_tags) was added in release 4.0.0.
+'''
+
+EXAMPLES = r'''
+- name: Add string to AWS Secrets Manager
+ community.aws.secretsmanager_secret:
+ name: 'test_secret_string'
+ state: present
+ secret_type: 'string'
+ secret: "{{ super_secret_string }}"
+
+- name: Add a secret with resource policy attached
+ community.aws.secretsmanager_secret:
+ name: 'test_secret_string'
+ state: present
+ secret_type: 'string'
+ secret: "{{ super_secret_string }}"
+ resource_policy: "{{ lookup('template', 'templates/resource_policy.json.j2', convert_data=False) | string }}"
+
+- name: remove string from AWS Secrets Manager
+ community.aws.secretsmanager_secret:
+ name: 'test_secret_string'
+ state: absent
+ secret_type: 'string'
+ secret: "{{ super_secret_string }}"
+
+- name: Only create a new secret, but do not update if alredy exists by name
+ community.aws.secretsmanager_secret:
+ name: 'random_string'
+ state: present
+ secret_type: 'string'
+ secret: "{{ lookup('community.general.random_string', length=16, special=false) }}"
+ overwrite: false
+'''
+
+RETURN = r'''
+secret:
+ description: The secret information
+ returned: always
+ type: complex
+ contains:
+ arn:
+ description: The ARN of the secret.
+ returned: always
+ type: str
+ sample: arn:aws:secretsmanager:eu-west-1:xxxxxxxxxx:secret:xxxxxxxxxxx
+ description:
+ description: A description of the secret.
+ returned: when the secret has a description
+ type: str
+ sample: An example description
+ last_accessed_date:
+ description: The date the secret was last accessed.
+ returned: always
+ type: str
+ sample: '2018-11-20T01:00:00+01:00'
+ last_changed_date:
+ description: The date the secret was last modified.
+ returned: always
+ type: str
+ sample: '2018-11-20T12:16:38.433000+01:00'
+ name:
+ description: The secret name.
+ returned: always
+ type: str
+ sample: my_secret
+ rotation_enabled:
+ description: The secret rotation status.
+ returned: always
+ type: bool
+ sample: false
+ version_ids_to_stages:
+ description: Provide the secret version ids and the associated secret stage.
+ returned: always
+ type: dict
+ sample: { "dc1ed59b-6d8e-4450-8b41-536dfe4600a9": [ "AWSCURRENT" ] }
+ tags:
+ description:
+ - A list of dictionaries representing the tags associated with the secret in the standard boto3 format.
+ returned: when the secret has tags
+ type: list
+ elements: dict
+ contains:
+ key:
+ description: The name or key of the tag.
+ type: str
+ example: MyTag
+ returned: success
+ value:
+ description: The value of the tag.
+ type: str
+ example: Some value.
+ returned: success
+ tags_dict:
+ description: A dictionary representing the tags associated with the secret.
+ type: dict
+ returned: when the secret has tags
+ example: {'MyTagName': 'Some Value'}
+ version_added: 4.0.0
+'''
+
+from ansible.module_utils._text import to_bytes
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, compare_aws_tags, ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies
+from traceback import format_exc
+import json
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+
+class Secret(object):
+ """An object representation of the Secret described by the self.module args"""
+ def __init__(
+ self, name, secret_type, secret, resource_policy=None, description="", kms_key_id=None,
+ tags=None, lambda_arn=None, rotation_interval=None, replica_regions=None,
+ ):
+ self.name = name
+ self.description = description
+ self.replica_regions = replica_regions
+ self.kms_key_id = kms_key_id
+ if secret_type == "binary":
+ self.secret_type = "SecretBinary"
+ else:
+ self.secret_type = "SecretString"
+ self.secret = secret
+ self.resource_policy = resource_policy
+ self.tags = tags or {}
+ self.rotation_enabled = False
+ if lambda_arn:
+ self.rotation_enabled = True
+ self.rotation_lambda_arn = lambda_arn
+ self.rotation_rules = {"AutomaticallyAfterDays": int(rotation_interval)}
+
+ @property
+ def create_args(self):
+ args = {
+ "Name": self.name
+ }
+ if self.description:
+ args["Description"] = self.description
+ if self.kms_key_id:
+ args["KmsKeyId"] = self.kms_key_id
+ if self.replica_regions:
+ add_replica_regions = []
+ for replica in self.replica_regions:
+ if replica["kms_key_id"]:
+ add_replica_regions.append({'Region': replica["region"],
+ 'KmsKeyId': replica["kms_key_id"]})
+ else:
+ add_replica_regions.append({'Region': replica["region"]})
+ args["AddReplicaRegions"] = add_replica_regions
+ if self.tags:
+ args["Tags"] = ansible_dict_to_boto3_tag_list(self.tags)
+ args[self.secret_type] = self.secret
+ return args
+
+ @property
+ def update_args(self):
+ args = {
+ "SecretId": self.name
+ }
+ if self.description:
+ args["Description"] = self.description
+ if self.kms_key_id:
+ args["KmsKeyId"] = self.kms_key_id
+ args[self.secret_type] = self.secret
+ return args
+
+ @property
+ def secret_resource_policy_args(self):
+ args = {
+ "SecretId": self.name
+ }
+ if self.resource_policy:
+ args["ResourcePolicy"] = self.resource_policy
+ return args
+
+ @property
+ def boto3_tags(self):
+ return ansible_dict_to_boto3_tag_list(self.Tags)
+
+ def as_dict(self):
+ result = self.__dict__
+ result.pop("tags")
+ return snake_dict_to_camel_dict(result)
+
+
+class SecretsManagerInterface(object):
+ """An interface with SecretsManager"""
+
+ def __init__(self, module):
+ self.module = module
+ self.client = self.module.client('secretsmanager')
+
+ def get_secret(self, name):
+ try:
+ secret = self.client.describe_secret(SecretId=name)
+ except self.client.exceptions.ResourceNotFoundException:
+ secret = None
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Failed to describe secret")
+ return secret
+
+ def get_resource_policy(self, name):
+ try:
+ resource_policy = self.client.get_resource_policy(SecretId=name)
+ except self.client.exceptions.ResourceNotFoundException:
+ resource_policy = None
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to get secret resource policy")
+ return resource_policy
+
+ def create_secret(self, secret):
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+ try:
+ created_secret = self.client.create_secret(**secret.create_args)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to create secret")
+
+ if secret.rotation_enabled:
+ response = self.update_rotation(secret)
+ created_secret["VersionId"] = response.get("VersionId")
+ return created_secret
+
+ def update_secret(self, secret):
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+ try:
+ response = self.client.update_secret(**secret.update_args)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to update secret")
+ return response
+
+ def put_resource_policy(self, secret):
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+ try:
+ json.loads(secret.secret_resource_policy_args.get("ResourcePolicy"))
+ except (TypeError, ValueError) as e:
+ self.module.fail_json(msg="Failed to parse resource policy as JSON: %s" % (str(e)), exception=format_exc())
+
+ try:
+ response = self.client.put_resource_policy(**secret.secret_resource_policy_args)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to update secret resource policy")
+ return response
+
+ def remove_replication(self, name, regions):
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+ try:
+ replica_regions = []
+ response = self.client.remove_regions_from_replication(
+ SecretId=name,
+ RemoveReplicaRegions=regions)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to replicate secret")
+ return response
+
+ def replicate_secret(self, name, regions):
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+ try:
+ replica_regions = []
+ for replica in regions:
+ if replica["kms_key_id"]:
+ replica_regions.append({'Region': replica["region"], 'KmsKeyId': replica["kms_key_id"]})
+ else:
+ replica_regions.append({'Region': replica["region"]})
+ response = self.client.replicate_secret_to_regions(
+ SecretId=name,
+ AddReplicaRegions=replica_regions)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to replicate secret")
+ return response
+
+ def restore_secret(self, name):
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+ try:
+ response = self.client.restore_secret(SecretId=name)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to restore secret")
+ return response
+
+ def delete_secret(self, name, recovery_window):
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+ try:
+ if recovery_window == 0:
+ response = self.client.delete_secret(SecretId=name, ForceDeleteWithoutRecovery=True)
+ else:
+ response = self.client.delete_secret(SecretId=name, RecoveryWindowInDays=recovery_window)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to delete secret")
+ return response
+
+ def delete_resource_policy(self, name):
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+ try:
+ response = self.client.delete_resource_policy(SecretId=name)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to delete secret resource policy")
+ return response
+
+ def update_rotation(self, secret):
+ if secret.rotation_enabled:
+ try:
+ response = self.client.rotate_secret(
+ SecretId=secret.name,
+ RotationLambdaARN=secret.rotation_lambda_arn,
+ RotationRules=secret.rotation_rules)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to rotate secret secret")
+ else:
+ try:
+ response = self.client.cancel_rotate_secret(SecretId=secret.name)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to cancel rotation")
+ return response
+
+ def tag_secret(self, secret_name, tags):
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+ try:
+ self.client.tag_resource(SecretId=secret_name, Tags=tags)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to add tag(s) to secret")
+
+ def untag_secret(self, secret_name, tag_keys):
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+ try:
+ self.client.untag_resource(SecretId=secret_name, TagKeys=tag_keys)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to remove tag(s) from secret")
+
+ def secrets_match(self, desired_secret, current_secret):
+ """Compare secrets except tags and rotation
+
+ Args:
+ desired_secret: camel dict representation of the desired secret state.
+ current_secret: secret reference as returned by the secretsmanager api.
+
+ Returns: bool
+ """
+ if desired_secret.description != current_secret.get("Description", ""):
+ return False
+ if desired_secret.kms_key_id != current_secret.get("KmsKeyId"):
+ return False
+ current_secret_value = self.client.get_secret_value(SecretId=current_secret.get("Name"))
+ if desired_secret.secret_type == 'SecretBinary':
+ desired_value = to_bytes(desired_secret.secret)
+ else:
+ desired_value = desired_secret.secret
+ if desired_value != current_secret_value.get(desired_secret.secret_type):
+ return False
+ return True
+
+
+def rotation_match(desired_secret, current_secret):
+ """Compare secrets rotation configuration
+
+ Args:
+ desired_secret: camel dict representation of the desired secret state.
+ current_secret: secret reference as returned by the secretsmanager api.
+
+ Returns: bool
+ """
+ if desired_secret.rotation_enabled != current_secret.get("RotationEnabled", False):
+ return False
+ if desired_secret.rotation_enabled:
+ if desired_secret.rotation_lambda_arn != current_secret.get("RotationLambdaARN"):
+ return False
+ if desired_secret.rotation_rules != current_secret.get("RotationRules"):
+ return False
+ return True
+
+
+def compare_regions(desired_secret, current_secret):
+ """Compare secrets replication configuration
+
+ Args:
+ desired_secret: camel dict representation of the desired secret state.
+ current_secret: secret reference as returned by the secretsmanager api.
+
+ Returns: bool
+ """
+ regions_to_set_replication = []
+ regions_to_remove_replication = []
+
+ if desired_secret.replica_regions is None:
+ return regions_to_set_replication, regions_to_remove_replication
+
+ if desired_secret.replica_regions:
+ regions_to_set_replication = desired_secret.replica_regions
+
+ for current_secret_region in current_secret.get("ReplicationStatus", []):
+ if regions_to_set_replication:
+ for desired_secret_region in regions_to_set_replication:
+ if current_secret_region["Region"] == desired_secret_region["region"]:
+ regions_to_set_replication.remove(desired_secret_region)
+ else:
+ regions_to_remove_replication.append(current_secret_region["Region"])
+ else:
+ regions_to_remove_replication.append(current_secret_region["Region"])
+
+ return regions_to_set_replication, regions_to_remove_replication
+
+
+def main():
+ replica_args = dict(
+ region=dict(type='str', required=True),
+ kms_key_id=dict(type='str', required=False),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec={
+ 'name': dict(required=True),
+ 'state': dict(choices=['present', 'absent'], default='present'),
+ 'overwrite': dict(type='bool', default=True),
+ 'description': dict(default=""),
+ 'replica': dict(type='list', elements='dict', options=replica_args),
+ 'kms_key_id': dict(),
+ 'secret_type': dict(choices=['binary', 'string'], default="string"),
+ 'secret': dict(default="", no_log=True),
+ 'json_secret': dict(type='json', no_log=True),
+ 'resource_policy': dict(type='json', default=None),
+ 'tags': dict(type='dict', default=None, aliases=['resource_tags']),
+ 'purge_tags': dict(type='bool', default=True),
+ 'rotation_lambda': dict(),
+ 'rotation_interval': dict(type='int', default=30),
+ 'recovery_window': dict(type='int', default=30),
+ },
+ mutually_exclusive=[['secret', 'json_secret']],
+ supports_check_mode=True,
+ )
+
+ changed = False
+ state = module.params.get('state')
+ secrets_mgr = SecretsManagerInterface(module)
+ recovery_window = module.params.get('recovery_window')
+ secret = Secret(
+ module.params.get('name'),
+ module.params.get('secret_type'),
+ module.params.get('secret') or module.params.get('json_secret'),
+ description=module.params.get('description'),
+ replica_regions=module.params.get('replica'),
+ kms_key_id=module.params.get('kms_key_id'),
+ resource_policy=module.params.get('resource_policy'),
+ tags=module.params.get('tags'),
+ lambda_arn=module.params.get('rotation_lambda'),
+ rotation_interval=module.params.get('rotation_interval')
+ )
+ purge_tags = module.params.get('purge_tags')
+
+ current_secret = secrets_mgr.get_secret(secret.name)
+
+ if state == 'absent':
+ if current_secret:
+ if not current_secret.get("DeletedDate"):
+ result = camel_dict_to_snake_dict(secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window))
+ changed = True
+ elif current_secret.get("DeletedDate") and recovery_window == 0:
+ result = camel_dict_to_snake_dict(secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window))
+ changed = True
+ else:
+ result = "secret already scheduled for deletion"
+ else:
+ result = "secret does not exist"
+ if state == 'present':
+ if current_secret is None:
+ result = secrets_mgr.create_secret(secret)
+ if secret.resource_policy and result.get("ARN"):
+ result = secrets_mgr.put_resource_policy(secret)
+ changed = True
+ else:
+ # current_secret exists; decide what to do with it
+ if current_secret.get("DeletedDate"):
+ secrets_mgr.restore_secret(secret.name)
+ changed = True
+ if not secrets_mgr.secrets_match(secret, current_secret):
+ overwrite = module.params.get('overwrite')
+ if overwrite:
+ result = secrets_mgr.update_secret(secret)
+ changed = True
+ if not rotation_match(secret, current_secret):
+ result = secrets_mgr.update_rotation(secret)
+ changed = True
+
+ current_resource_policy_response = secrets_mgr.get_resource_policy(secret.name)
+ current_resource_policy = current_resource_policy_response.get("ResourcePolicy")
+ if compare_policies(secret.resource_policy, current_resource_policy):
+ if secret.resource_policy is None and current_resource_policy:
+ result = secrets_mgr.delete_resource_policy(secret.name)
+ else:
+ result = secrets_mgr.put_resource_policy(secret)
+ changed = True
+
+ if module.params.get('tags') is not None:
+ current_tags = boto3_tag_list_to_ansible_dict(current_secret.get('Tags', []))
+ tags_to_add, tags_to_remove = compare_aws_tags(current_tags, secret.tags, purge_tags)
+ if tags_to_add:
+ secrets_mgr.tag_secret(secret.name, ansible_dict_to_boto3_tag_list(tags_to_add))
+ changed = True
+ if tags_to_remove:
+ secrets_mgr.untag_secret(secret.name, tags_to_remove)
+ changed = True
+
+ regions_to_set_replication, regions_to_remove_replication = compare_regions(secret, current_secret)
+ if regions_to_set_replication:
+ secrets_mgr.replicate_secret(secret.name, regions_to_set_replication)
+ changed = True
+ if regions_to_remove_replication:
+ secrets_mgr.remove_replication(secret.name, regions_to_remove_replication)
+ changed = True
+
+ result = camel_dict_to_snake_dict(secrets_mgr.get_secret(secret.name))
+ if result.get('tags', None) is not None:
+ result['tags_dict'] = boto3_tag_list_to_ansible_dict(result.get('tags', []))
+ result.pop("response_metadata")
+
+ module.exit_json(changed=changed, secret=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ses_identity.py b/ansible_collections/community/aws/plugins/modules/ses_identity.py
new file mode 100644
index 000000000..997692df6
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ses_identity.py
@@ -0,0 +1,544 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ses_identity
+version_added: 1.0.0
+short_description: Manages SES email and domain identity
+description:
+ - This module allows the user to manage verified email and domain identity for SES.
+ - This covers verifying and removing identities as well as setting up complaint, bounce
+ and delivery notification settings.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_ses_identity).
+ The usage did not change.
+author:
+ - Ed Costello (@orthanc)
+options:
+ identity:
+ description:
+ - This is the email address or domain to verify / delete.
+ - If this contains an '@' then it will be considered an email. Otherwise it will be considered a domain.
+ required: true
+ type: str
+ state:
+ description: Whether to create(or update) or delete the identity.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ bounce_notifications:
+ description:
+ - Setup the SNS topic used to report bounce notifications.
+ - If omitted, bounce notifications will not be delivered to a SNS topic.
+ - If bounce notifications are not delivered to a SNS topic, I(feedback_forwarding) must be enabled.
+ suboptions:
+ topic:
+ description:
+ - The ARN of the topic to send notifications to.
+ - If omitted, notifications will not be delivered to a SNS topic.
+ include_headers:
+ description:
+ - Whether or not to include headers when delivering to the SNS topic.
+ - If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic.
+ type: bool
+ default: No
+ type: dict
+ complaint_notifications:
+ description:
+ - Setup the SNS topic used to report complaint notifications.
+ - If omitted, complaint notifications will not be delivered to a SNS topic.
+ - If complaint notifications are not delivered to a SNS topic, I(feedback_forwarding) must be enabled.
+ suboptions:
+ topic:
+ description:
+ - The ARN of the topic to send notifications to.
+ - If omitted, notifications will not be delivered to a SNS topic.
+ include_headers:
+ description:
+ - Whether or not to include headers when delivering to the SNS topic.
+ - If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic.
+ type: bool
+ default: No
+ type: dict
+ delivery_notifications:
+ description:
+ - Setup the SNS topic used to report delivery notifications.
+ - If omitted, delivery notifications will not be delivered to a SNS topic.
+ suboptions:
+ topic:
+ description:
+ - The ARN of the topic to send notifications to.
+ - If omitted, notifications will not be delivered to a SNS topic.
+ include_headers:
+ description:
+ - Whether or not to include headers when delivering to the SNS topic.
+ - If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic.
+ type: bool
+ default: No
+ type: dict
+ feedback_forwarding:
+ description:
+ - Whether or not to enable feedback forwarding.
+ - This can only be false if both I(bounce_notifications) and I(complaint_notifications) specify SNS topics.
+ type: 'bool'
+ default: True
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Ensure example@example.com email identity exists
+ community.aws.ses_identity:
+ identity: example@example.com
+ state: present
+
+- name: Delete example@example.com email identity
+ community.aws.ses_identity:
+ email: example@example.com
+ state: absent
+
+- name: Ensure example.com domain identity exists
+ community.aws.ses_identity:
+ identity: example.com
+ state: present
+
+# Create an SNS topic and send bounce and complaint notifications to it
+# instead of emailing the identity owner
+- name: Ensure complaints-topic exists
+ community.aws.sns_topic:
+ name: "complaints-topic"
+ state: present
+ purge_subscriptions: False
+ register: topic_info
+
+- name: Deliver feedback to topic instead of owner email
+ community.aws.ses_identity:
+ identity: example@example.com
+ state: present
+ complaint_notifications:
+ topic: "{{ topic_info.sns_arn }}"
+ include_headers: True
+ bounce_notifications:
+ topic: "{{ topic_info.sns_arn }}"
+ include_headers: False
+ feedback_forwarding: False
+
+# Create an SNS topic for delivery notifications and leave complaints
+# Being forwarded to the identity owner email
+- name: Ensure delivery-notifications-topic exists
+ community.aws.sns_topic:
+ name: "delivery-notifications-topic"
+ state: present
+ purge_subscriptions: False
+ register: topic_info
+
+- name: Delivery notifications to topic
+ community.aws.ses_identity:
+ identity: example@example.com
+ state: present
+ delivery_notifications:
+ topic: "{{ topic_info.sns_arn }}"
+'''
+
+RETURN = '''
+identity:
+ description: The identity being modified.
+ returned: success
+ type: str
+ sample: example@example.com
+identity_arn:
+ description: The arn of the identity being modified.
+ returned: success
+ type: str
+ sample: arn:aws:ses:us-east-1:12345678:identity/example@example.com
+verification_attributes:
+ description: The verification information for the identity.
+ returned: success
+ type: complex
+ sample: {
+ "verification_status": "Pending",
+ "verification_token": "...."
+ }
+ contains:
+ verification_status:
+ description: The verification status of the identity.
+ type: str
+ sample: "Pending"
+ verification_token:
+ description: The verification token for a domain identity.
+ type: str
+notification_attributes:
+ description: The notification setup for the identity.
+ returned: success
+ type: complex
+ sample: {
+ "bounce_topic": "arn:aws:sns:....",
+ "complaint_topic": "arn:aws:sns:....",
+ "delivery_topic": "arn:aws:sns:....",
+ "forwarding_enabled": false,
+ "headers_in_bounce_notifications_enabled": true,
+ "headers_in_complaint_notifications_enabled": true,
+ "headers_in_delivery_notifications_enabled": true
+ }
+ contains:
+ bounce_topic:
+ description:
+ - The ARN of the topic bounce notifications are delivered to.
+ - Omitted if bounce notifications are not delivered to a topic.
+ type: str
+ complaint_topic:
+ description:
+ - The ARN of the topic complaint notifications are delivered to.
+ - Omitted if complaint notifications are not delivered to a topic.
+ type: str
+ delivery_topic:
+ description:
+ - The ARN of the topic delivery notifications are delivered to.
+ - Omitted if delivery notifications are not delivered to a topic.
+ type: str
+ forwarding_enabled:
+ description: Whether or not feedback forwarding is enabled.
+ type: bool
+ headers_in_bounce_notifications_enabled:
+ description: Whether or not headers are included in messages delivered to the bounce topic.
+ type: bool
+ headers_in_complaint_notifications_enabled:
+ description: Whether or not headers are included in messages delivered to the complaint topic.
+ type: bool
+ headers_in_delivery_notifications_enabled:
+ description: Whether or not headers are included in messages delivered to the delivery topic.
+ type: bool
+'''
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+import time
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def get_verification_attributes(connection, module, identity, retries=0, retryDelay=10):
+ # Unpredictably get_identity_verification_attributes doesn't include the identity even when we've
+ # just registered it. Suspect this is an eventual consistency issue on AWS side.
+ # Don't want this complexity exposed users of the module as they'd have to retry to ensure
+ # a consistent return from the module.
+ # To avoid this we have an internal retry that we use only after registering the identity.
+ for attempt in range(0, retries + 1):
+ try:
+ response = connection.get_identity_verification_attributes(Identities=[identity], aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to retrieve identity verification attributes for {identity}'.format(identity=identity))
+ identity_verification = response['VerificationAttributes']
+ if identity in identity_verification:
+ break
+ time.sleep(retryDelay)
+ if identity not in identity_verification:
+ return None
+ return identity_verification[identity]
+
+
+def get_identity_notifications(connection, module, identity, retries=0, retryDelay=10):
+ # Unpredictably get_identity_notifications doesn't include the notifications when we've
+ # just registered the identity.
+ # Don't want this complexity exposed users of the module as they'd have to retry to ensure
+ # a consistent return from the module.
+ # To avoid this we have an internal retry that we use only when getting the current notification
+ # status for return.
+ for attempt in range(0, retries + 1):
+ try:
+ response = connection.get_identity_notification_attributes(Identities=[identity], aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to retrieve identity notification attributes for {identity}'.format(identity=identity))
+ notification_attributes = response['NotificationAttributes']
+
+ # No clear AWS docs on when this happens, but it appears sometimes identities are not included in
+ # in the notification attributes when the identity is first registered. Suspect that this is caused by
+ # eventual consistency within the AWS services. It's been observed in builds so we need to handle it.
+ #
+ # When this occurs, just return None and we'll assume no identity notification settings have been changed
+ # from the default which is reasonable if this is just eventual consistency on creation.
+ # See: https://github.com/ansible/ansible/issues/36065
+ if identity in notification_attributes:
+ break
+ else:
+ # Paranoia check for coding errors, we only requested one identity, so if we get a different one
+ # something has gone very wrong.
+ if len(notification_attributes) != 0:
+ module.fail_json(
+ msg='Unexpected identity found in notification attributes, expected {0} but got {1!r}.'.format(
+ identity,
+ notification_attributes.keys(),
+ )
+ )
+ time.sleep(retryDelay)
+ if identity not in notification_attributes:
+ return None
+ return notification_attributes[identity]
+
+
+def desired_topic(module, notification_type):
+ arg_dict = module.params.get(notification_type.lower() + '_notifications')
+ if arg_dict:
+ return arg_dict.get('topic', None)
+ else:
+ return None
+
+
+def update_notification_topic(connection, module, identity, identity_notifications, notification_type):
+ topic_key = notification_type + 'Topic'
+ if identity_notifications is None:
+ # If there is no configuration for notifications cannot be being sent to topics
+ # hence assume None as the current state.
+ current = None
+ elif topic_key in identity_notifications:
+ current = identity_notifications[topic_key]
+ else:
+ # If there is information on the notifications setup but no information on the
+ # particular notification topic it's pretty safe to assume there's no topic for
+ # this notification. AWS API docs suggest this information will always be
+ # included but best to be defensive
+ current = None
+
+ required = desired_topic(module, notification_type)
+
+ if current != required:
+ try:
+ if not module.check_mode:
+ connection.set_identity_notification_topic(Identity=identity, NotificationType=notification_type, SnsTopic=required, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to set identity notification topic for {identity} {notification_type}'.format(
+ identity=identity,
+ notification_type=notification_type,
+ ))
+ return True
+ return False
+
+
+def update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type):
+ arg_dict = module.params.get(notification_type.lower() + '_notifications')
+ header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled'
+ if identity_notifications is None:
+ # If there is no configuration for topic notifications, headers cannot be being
+ # forwarded, hence assume false.
+ current = False
+ elif header_key in identity_notifications:
+ current = identity_notifications[header_key]
+ else:
+ # AWS API doc indicates that the headers in fields are optional. Unfortunately
+ # it's not clear on what this means. But it's a pretty safe assumption that it means
+ # headers are not included since most API consumers would interpret absence as false.
+ current = False
+
+ if arg_dict is not None and 'include_headers' in arg_dict:
+ required = arg_dict['include_headers']
+ else:
+ required = False
+
+ if current != required:
+ try:
+ if not module.check_mode:
+ connection.set_identity_headers_in_notifications_enabled(Identity=identity, NotificationType=notification_type, Enabled=required,
+ aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to set identity headers in notification for {identity} {notification_type}'.format(
+ identity=identity,
+ notification_type=notification_type,
+ ))
+ return True
+ return False
+
+
+def update_feedback_forwarding(connection, module, identity, identity_notifications):
+ if identity_notifications is None:
+ # AWS requires feedback forwarding to be enabled unless bounces and complaints
+ # are being handled by SNS topics. So in the absence of identity_notifications
+ # information existing feedback forwarding must be on.
+ current = True
+ elif 'ForwardingEnabled' in identity_notifications:
+ current = identity_notifications['ForwardingEnabled']
+ else:
+ # If there is information on the notifications setup but no information on the
+ # forwarding state it's pretty safe to assume forwarding is off. AWS API docs
+ # suggest this information will always be included but best to be defensive
+ current = False
+
+ required = module.params.get('feedback_forwarding')
+
+ if current != required:
+ try:
+ if not module.check_mode:
+ connection.set_identity_feedback_forwarding_enabled(Identity=identity, ForwardingEnabled=required, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to set identity feedback forwarding for {identity}'.format(identity=identity))
+ return True
+ return False
+
+
+def create_mock_notifications_response(module):
+ resp = {
+ "ForwardingEnabled": module.params.get('feedback_forwarding'),
+ }
+ for notification_type in ('Bounce', 'Complaint', 'Delivery'):
+ arg_dict = module.params.get(notification_type.lower() + '_notifications')
+ if arg_dict is not None and 'topic' in arg_dict:
+ resp[notification_type + 'Topic'] = arg_dict['topic']
+
+ header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled'
+ if arg_dict is not None and 'include_headers' in arg_dict:
+ resp[header_key] = arg_dict['include_headers']
+ else:
+ resp[header_key] = False
+ return resp
+
+
+def update_identity_notifications(connection, module):
+ identity = module.params.get('identity')
+ changed = False
+ identity_notifications = get_identity_notifications(connection, module, identity)
+
+ for notification_type in ('Bounce', 'Complaint', 'Delivery'):
+ changed |= update_notification_topic(connection, module, identity, identity_notifications, notification_type)
+ changed |= update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type)
+
+ changed |= update_feedback_forwarding(connection, module, identity, identity_notifications)
+
+ if changed or identity_notifications is None:
+ if module.check_mode:
+ identity_notifications = create_mock_notifications_response(module)
+ else:
+ identity_notifications = get_identity_notifications(connection, module, identity, retries=4)
+ return changed, identity_notifications
+
+
+def validate_params_for_identity_present(module):
+ if module.params.get('feedback_forwarding') is False:
+ if not (desired_topic(module, 'Bounce') and desired_topic(module, 'Complaint')):
+ module.fail_json(msg="Invalid Parameter Value 'False' for 'feedback_forwarding'. AWS requires "
+ "feedback forwarding to be enabled unless bounces and complaints are handled by SNS topics")
+
+
+def create_or_update_identity(connection, module, region, account_id):
+ identity = module.params.get('identity')
+ changed = False
+ verification_attributes = get_verification_attributes(connection, module, identity)
+ if verification_attributes is None:
+ try:
+ if not module.check_mode:
+ if '@' in identity:
+ connection.verify_email_identity(EmailAddress=identity, aws_retry=True)
+ else:
+ connection.verify_domain_identity(Domain=identity, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to verify identity {identity}'.format(identity=identity))
+ if module.check_mode:
+ verification_attributes = {
+ "VerificationStatus": "Pending",
+ }
+ else:
+ verification_attributes = get_verification_attributes(connection, module, identity, retries=4)
+ changed = True
+ elif verification_attributes['VerificationStatus'] not in ('Pending', 'Success'):
+ module.fail_json(msg="Identity " + identity + " in bad status " + verification_attributes['VerificationStatus'],
+ verification_attributes=camel_dict_to_snake_dict(verification_attributes))
+
+ if verification_attributes is None:
+ module.fail_json(msg='Unable to load identity verification attributes after registering identity.')
+
+ notifications_changed, notification_attributes = update_identity_notifications(connection, module)
+ changed |= notifications_changed
+
+ if notification_attributes is None:
+ module.fail_json(msg='Unable to load identity notification attributes.')
+
+ identity_arn = 'arn:aws:ses:' + region + ':' + account_id + ':identity/' + identity
+
+ module.exit_json(
+ changed=changed,
+ identity=identity,
+ identity_arn=identity_arn,
+ verification_attributes=camel_dict_to_snake_dict(verification_attributes),
+ notification_attributes=camel_dict_to_snake_dict(notification_attributes),
+ )
+
+
+def destroy_identity(connection, module):
+ identity = module.params.get('identity')
+ changed = False
+ verification_attributes = get_verification_attributes(connection, module, identity)
+ if verification_attributes is not None:
+ try:
+ if not module.check_mode:
+ connection.delete_identity(Identity=identity, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to delete identity {identity}'.format(identity=identity))
+ changed = True
+
+ module.exit_json(
+ changed=changed,
+ identity=identity,
+ )
+
+
+def get_account_id(module):
+ sts = module.client('sts')
+ try:
+ caller_identity = sts.get_caller_identity()
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to retrieve caller identity')
+ return caller_identity['Account']
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec={
+ "identity": dict(required=True, type='str'),
+ "state": dict(default='present', choices=['present', 'absent']),
+ "bounce_notifications": dict(type='dict'),
+ "complaint_notifications": dict(type='dict'),
+ "delivery_notifications": dict(type='dict'),
+ "feedback_forwarding": dict(default=True, type='bool'),
+ },
+ supports_check_mode=True,
+ )
+
+ for notification_type in ('bounce', 'complaint', 'delivery'):
+ param_name = notification_type + '_notifications'
+ arg_dict = module.params.get(param_name)
+ if arg_dict:
+ extra_keys = [x for x in arg_dict.keys() if x not in ('topic', 'include_headers')]
+ if extra_keys:
+ module.fail_json(msg='Unexpected keys ' + str(extra_keys) + ' in ' + param_name + ' valid keys are topic or include_headers')
+
+ # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs.
+ # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but
+ # the ansible build runs multiple instances of the test in parallel that's caused throttling
+ # failures so apply a jittered backoff to call SES calls.
+ connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff())
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ region = module.region
+ account_id = get_account_id(module)
+ validate_params_for_identity_present(module)
+ create_or_update_identity(connection, module, region, account_id)
+ else:
+ destroy_identity(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ses_identity_policy.py b/ansible_collections/community/aws/plugins/modules/ses_identity_policy.py
new file mode 100644
index 000000000..16d9f1ded
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ses_identity_policy.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ses_identity_policy
+version_added: 1.0.0
+short_description: Manages SES sending authorization policies
+description:
+ - This module allows the user to manage sending authorization policies associated with an SES
+ identity (email or domain).
+ - SES authorization sending policies can be used to control what actors are able to send email
+ on behalf of the validated identity and what conditions must be met by the sent emails.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_ses_identity_policy).
+ The usage did not change.
+author:
+ - Ed Costello (@orthanc)
+
+options:
+ identity:
+ description: |
+ The SES identity to attach or remove a policy from. This can be either the full ARN or just
+ the verified email or domain.
+ required: true
+ type: str
+ policy_name:
+ description: The name used to identify the policy within the scope of the identity it's attached to.
+ required: true
+ type: str
+ policy:
+ description: A properly formatted JSON sending authorization policy. Required when I(state=present).
+ type: json
+ state:
+ description: Whether to create(or update) or delete the authorization policy on the identity.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: add sending authorization policy to domain identity
+ community.aws.ses_identity_policy:
+ identity: example.com
+ policy_name: ExamplePolicy
+ policy: "{{ lookup('template', 'policy.json.j2') }}"
+ state: present
+
+- name: add sending authorization policy to email identity
+ community.aws.ses_identity_policy:
+ identity: example@example.com
+ policy_name: ExamplePolicy
+ policy: "{{ lookup('template', 'policy.json.j2') }}"
+ state: present
+
+- name: add sending authorization policy to identity using ARN
+ community.aws.ses_identity_policy:
+ identity: "arn:aws:ses:us-east-1:12345678:identity/example.com"
+ policy_name: ExamplePolicy
+ policy: "{{ lookup('template', 'policy.json.j2') }}"
+ state: present
+
+- name: remove sending authorization policy
+ community.aws.ses_identity_policy:
+ identity: example.com
+ policy_name: ExamplePolicy
+ state: absent
+'''
+
+RETURN = '''
+policies:
+ description: A list of all policies present on the identity after the operation.
+ returned: success
+ type: list
+ sample: [ExamplePolicy]
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies, AWSRetry
+
+import json
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def get_identity_policy(connection, module, identity, policy_name):
+ try:
+ response = connection.get_identity_policies(Identity=identity, PolicyNames=[policy_name], aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to retrieve identity policy {policy}'.format(policy=policy_name))
+ policies = response['Policies']
+ if policy_name in policies:
+ return policies[policy_name]
+ return None
+
+
+def create_or_update_identity_policy(connection, module):
+ identity = module.params.get('identity')
+ policy_name = module.params.get('policy_name')
+ required_policy = module.params.get('policy')
+ required_policy_dict = json.loads(required_policy)
+
+ changed = False
+ policy = get_identity_policy(connection, module, identity, policy_name)
+ policy_dict = json.loads(policy) if policy else None
+ if compare_policies(policy_dict, required_policy_dict):
+ changed = True
+ try:
+ if not module.check_mode:
+ connection.put_identity_policy(Identity=identity, PolicyName=policy_name, Policy=required_policy, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to put identity policy {policy}'.format(policy=policy_name))
+
+ # Load the list of applied policies to include in the response.
+ # In principle we should be able to just return the response, but given
+ # eventual consistency behaviours in AWS it's plausible that we could
+ # end up with a list that doesn't contain the policy we just added.
+ # So out of paranoia check for this case and if we're missing the policy
+ # just make sure it's present.
+ #
+ # As a nice side benefit this also means the return is correct in check mode
+ try:
+ policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)['PolicyNames']
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to list identity policies')
+ if policy_name is not None and policy_name not in policies_present:
+ policies_present = list(policies_present)
+ policies_present.append(policy_name)
+ module.exit_json(
+ changed=changed,
+ policies=policies_present,
+ )
+
+
+def delete_identity_policy(connection, module):
+ identity = module.params.get('identity')
+ policy_name = module.params.get('policy_name')
+
+ changed = False
+ try:
+ policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)['PolicyNames']
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to list identity policies')
+ if policy_name in policies_present:
+ try:
+ if not module.check_mode:
+ connection.delete_identity_policy(Identity=identity, PolicyName=policy_name, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to delete identity policy {policy}'.format(policy=policy_name))
+ changed = True
+ policies_present = list(policies_present)
+ policies_present.remove(policy_name)
+
+ module.exit_json(
+ changed=changed,
+ policies=policies_present,
+ )
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec={
+ 'identity': dict(required=True, type='str'),
+ 'state': dict(default='present', choices=['present', 'absent']),
+ 'policy_name': dict(required=True, type='str'),
+ 'policy': dict(type='json', default=None),
+ },
+ required_if=[['state', 'present', ['policy']]],
+ supports_check_mode=True,
+ )
+
+ # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs.
+ # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but
+ # the ansible build runs multiple instances of the test in parallel that's caused throttling
+ # failures so apply a jittered backoff to call SES calls.
+ connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff())
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ create_or_update_identity_policy(connection, module)
+ else:
+ delete_identity_policy(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ses_rule_set.py b/ansible_collections/community/aws/plugins/modules/ses_rule_set.py
new file mode 100644
index 000000000..b42ac8088
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ses_rule_set.py
@@ -0,0 +1,252 @@
+#!/usr/bin/python
+# Copyright (c) 2017, Ben Tomasik <ben@tomasik.io>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ses_rule_set
+version_added: 1.0.0
+short_description: Manages SES inbound receipt rule sets
+description:
+ - This module allows you to create, delete, and manage SES receipt rule sets
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_ses_rule_set).
+ The usage did not change.
+author:
+ - "Ben Tomasik (@tomislacker)"
+ - "Ed Costello (@orthanc)"
+options:
+ name:
+ description:
+ - The name of the receipt rule set.
+ required: True
+ type: str
+ state:
+ description:
+ - Whether to create (or update) or destroy the receipt rule set.
+ required: False
+ default: present
+ choices: ["absent", "present"]
+ type: str
+ active:
+ description:
+ - Whether or not this rule set should be the active rule set. Only has an impact if I(state) is C(present).
+ - If omitted, the active rule set will not be changed.
+ - If C(True) then this rule set will be made active and all others inactive.
+ - if C(False) then this rule set will be deactivated. Be careful with this as you can end up with no active rule set.
+ type: bool
+ required: False
+ force:
+ description:
+ - When deleting a rule set, deactivate it first (AWS prevents deletion of the active rule set).
+ type: bool
+ required: False
+ default: False
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = """
+# Note: None of these examples set aws_access_key, aws_secret_key, or region.
+# It is assumed that their matching environment variables are set.
+---
+- name: Create default rule set and activate it if not already
+ community.aws.ses_rule_set:
+ name: default-rule-set
+ state: present
+ active: true
+
+- name: Create some arbitrary rule set but do not activate it
+ community.aws.ses_rule_set:
+ name: arbitrary-rule-set
+ state: present
+
+- name: Explicitly deactivate the default rule set leaving no active rule set
+ community.aws.ses_rule_set:
+ name: default-rule-set
+ state: present
+ active: false
+
+- name: Remove an arbitrary inactive rule set
+ community.aws.ses_rule_set:
+ name: arbitrary-rule-set
+ state: absent
+
+- name: Remove an ruleset even if we have to first deactivate it to remove it
+ community.aws.ses_rule_set:
+ name: default-rule-set
+ state: absent
+ force: true
+"""
+
+RETURN = """
+active:
+ description: if the SES rule set is active
+ returned: success if I(state) is C(present)
+ type: bool
+ sample: true
+rule_sets:
+ description: The list of SES receipt rule sets that exist after any changes.
+ returned: success
+ type: list
+ sample: [{
+ "created_timestamp": "2018-02-25T01:20:32.690000+00:00",
+ "name": "default-rule-set"
+ }]
+"""
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+
+def list_rule_sets(client, module):
+ try:
+ response = client.list_receipt_rule_sets(aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't list rule sets.")
+ return response['RuleSets']
+
+
+def rule_set_in(name, rule_sets):
+ return any(s for s in rule_sets if s['Name'] == name)
+
+
+def ruleset_active(client, module, name):
+ try:
+ active_rule_set = client.describe_active_receipt_rule_set(aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't get the active rule set.")
+ if active_rule_set is not None and 'Metadata' in active_rule_set:
+ return name == active_rule_set['Metadata']['Name']
+ else:
+ # Metadata was not set meaning there is no active rule set
+ return False
+
+
+def deactivate_rule_set(client, module):
+ try:
+ # No ruleset name deactivates all rulesets
+ client.set_active_receipt_rule_set(aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't set active rule set to None.")
+
+
+def update_active_rule_set(client, module, name, desired_active):
+ check_mode = module.check_mode
+
+ active = ruleset_active(client, module, name)
+
+ changed = False
+ if desired_active is not None:
+ if desired_active and not active:
+ if not check_mode:
+ try:
+ client.set_active_receipt_rule_set(RuleSetName=name, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't set active rule set to {0}.".format(name))
+ changed = True
+ active = True
+ elif not desired_active and active:
+ if not check_mode:
+ deactivate_rule_set(client, module)
+ changed = True
+ active = False
+ return changed, active
+
+
+def create_or_update_rule_set(client, module):
+ name = module.params.get('name')
+ check_mode = module.check_mode
+ changed = False
+
+ rule_sets = list_rule_sets(client, module)
+ if not rule_set_in(name, rule_sets):
+ if not check_mode:
+ try:
+ client.create_receipt_rule_set(RuleSetName=name, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't create rule set {0}.".format(name))
+ changed = True
+ rule_sets = list(rule_sets)
+ rule_sets.append({
+ 'Name': name,
+ })
+
+ (active_changed, active) = update_active_rule_set(client, module, name, module.params.get('active'))
+ changed |= active_changed
+
+ module.exit_json(
+ changed=changed,
+ active=active,
+ rule_sets=[camel_dict_to_snake_dict(x) for x in rule_sets],
+ )
+
+
+def remove_rule_set(client, module):
+ name = module.params.get('name')
+ check_mode = module.check_mode
+ changed = False
+
+ rule_sets = list_rule_sets(client, module)
+ if rule_set_in(name, rule_sets):
+ active = ruleset_active(client, module, name)
+ if active and not module.params.get('force'):
+ module.fail_json(
+ msg="Couldn't delete rule set {0} because it is currently active. Set force=true to delete an active ruleset.".format(name),
+ error={
+ "code": "CannotDelete",
+ "message": "Cannot delete active rule set: {0}".format(name),
+ }
+ )
+ if not check_mode:
+ if active and module.params.get('force'):
+ deactivate_rule_set(client, module)
+ try:
+ client.delete_receipt_rule_set(RuleSetName=name, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete rule set {0}.".format(name))
+ changed = True
+ rule_sets = [x for x in rule_sets if x['Name'] != name]
+
+ module.exit_json(
+ changed=changed,
+ rule_sets=[camel_dict_to_snake_dict(x) for x in rule_sets],
+ )
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ active=dict(type='bool'),
+ force=dict(type='bool', default=False),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ state = module.params.get('state')
+
+ # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs.
+ # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but
+ # the ansible build runs multiple instances of the test in parallel that's caused throttling
+ # failures so apply a jittered backoff to call SES calls.
+ client = module.client('ses', retry_decorator=AWSRetry.jittered_backoff())
+
+ if state == 'absent':
+ remove_rule_set(client, module)
+ else:
+ create_or_update_rule_set(client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/sns.py b/ansible_collections/community/aws/plugins/modules/sns.py
new file mode 100644
index 000000000..f72bbfa49
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/sns.py
@@ -0,0 +1,252 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Michael J. Schultz <mjschultz@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: sns
+short_description: Send Amazon Simple Notification Service messages
+version_added: 1.0.0
+description:
+ - Sends a notification to a topic on your Amazon SNS account.
+author:
+ - Michael J. Schultz (@mjschultz)
+ - Paul Arthur (@flowerysong)
+options:
+ msg:
+ description:
+ - Default message for subscriptions without a more specific message.
+ required: true
+ aliases: [ "default" ]
+ type: str
+ subject:
+ description:
+ - Message subject
+ type: str
+ topic:
+ description:
+ - The name or ARN of the topic to publish to.
+ required: true
+ type: str
+ email:
+ description:
+ - Message to send to email subscriptions.
+ type: str
+ email_json:
+ description:
+ - Message to send to email-json subscriptions.
+ type: str
+ sqs:
+ description:
+ - Message to send to SQS subscriptions.
+ type: str
+ sms:
+ description:
+ - Message to send to SMS subscriptions.
+ type: str
+ http:
+ description:
+ - Message to send to HTTP subscriptions.
+ type: str
+ https:
+ description:
+ - Message to send to HTTPS subscriptions.
+ type: str
+ application:
+ description:
+ - Message to send to application subscriptions.
+ type: str
+ lambda:
+ description:
+ - Message to send to Lambda subscriptions.
+ type: str
+ message_attributes:
+ description:
+ - Dictionary of message attributes. These are optional structured data entries to be sent along to the endpoint.
+ - This is in AWS's distinct Name/Type/Value format; see example below.
+ type: dict
+ message_structure:
+ description:
+ - The payload format to use for the message.
+ - This must be C(json) to support protocol-specific messages (C(http), C(https), C(email), C(sms), C(sqs)).
+ - It must be C(string) to support I(message_attributes).
+ default: json
+ choices: ['json', 'string']
+ type: str
+ message_group_id:
+ description:
+ - A tag which is used to process messages that belong to the same group in a FIFO manner.
+ - Has to be included when publishing a message to a fifo topic.
+ - Can contain up to 128 alphanumeric characters and punctuation.
+ type: str
+ version_added: 5.4.0
+ message_deduplication_id:
+ description:
+ - Only in connection with the message_group_id.
+ - Overwrites the auto generated MessageDeduplicationId.
+ - Can contain up to 128 alphanumeric characters and punctuation.
+ - Messages with the same deduplication id getting recognized as the same message.
+ - Gets overwritten by an auto generated token, if the topic has ContentBasedDeduplication set.
+ type: str
+ version_added: 5.4.0
+
+extends_documentation_fragment:
+- amazon.aws.ec2
+- amazon.aws.aws
+- amazon.aws.boto3
+'''
+
+EXAMPLES = """
+- name: Send default notification message via SNS
+ community.aws.sns:
+ msg: '{{ inventory_hostname }} has completed the play.'
+ subject: Deploy complete!
+ topic: deploy
+ delegate_to: localhost
+
+- name: Send notification messages via SNS with short message for SMS
+ community.aws.sns:
+ msg: '{{ inventory_hostname }} has completed the play.'
+ sms: deployed!
+ subject: Deploy complete!
+ topic: deploy
+ delegate_to: localhost
+
+- name: Send message with message_attributes
+ community.aws.sns:
+ topic: "deploy"
+ msg: "message with extra details!"
+ message_attributes:
+ channel:
+ data_type: String
+ string_value: "mychannel"
+ color:
+ data_type: String
+ string_value: "green"
+ delegate_to: localhost
+
+- name: Send message to a fifo topic
+ community.aws.sns:
+ topic: "deploy"
+ msg: "Message with message group id"
+ subject: Deploy complete!
+ message_group_id: "deploy-1"
+ delegate_to: localhost
+"""
+
+RETURN = """
+msg:
+ description: Human-readable diagnostic information
+ returned: always
+ type: str
+ sample: OK
+message_id:
+ description: The message ID of the submitted message
+ returned: when success
+ type: str
+ sample: 2f681ef0-6d76-5c94-99b2-4ae3996ce57b
+sequence_number:
+ description: A 128 bits long sequence number which gets assigned to the message in fifo topics
+ returned: when success
+ type: str
+"""
+
+import json
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.sns import topic_arn_lookup
+
+
+def main():
+ protocols = [
+ 'http',
+ 'https',
+ 'email',
+ 'email_json',
+ 'sms',
+ 'sqs',
+ 'application',
+ 'lambda',
+ ]
+
+ argument_spec = dict(
+ msg=dict(required=True, aliases=['default']),
+ subject=dict(),
+ topic=dict(required=True),
+ message_attributes=dict(type='dict'),
+ message_structure=dict(choices=['json', 'string'], default='json'),
+ message_group_id=dict(),
+ message_deduplication_id=dict(),
+ )
+
+ for p in protocols:
+ argument_spec[p] = dict()
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ sns_kwargs = dict(
+ Message=module.params['msg'],
+ Subject=module.params['subject'],
+ MessageStructure=module.params['message_structure'],
+ )
+
+ if module.params['message_attributes']:
+ if module.params['message_structure'] != 'string':
+ module.fail_json(msg='message_attributes is only supported when the message_structure is "string".')
+ sns_kwargs['MessageAttributes'] = module.params['message_attributes']
+
+ if module.params["message_group_id"]:
+ sns_kwargs["MessageGroupId"] = module.params["message_group_id"]
+ if module.params["message_deduplication_id"]:
+ sns_kwargs["MessageDeduplicationId"] = module.params["message_deduplication_id"]
+
+ dict_msg = {
+ 'default': sns_kwargs['Message']
+ }
+
+ for p in protocols:
+ if module.params[p]:
+ if sns_kwargs['MessageStructure'] != 'json':
+ module.fail_json(msg='Protocol-specific messages are only supported when message_structure is "json".')
+ dict_msg[p.replace('_', '-')] = module.params[p]
+
+ client = module.client('sns')
+
+ topic = module.params['topic']
+ if ':' in topic:
+ # Short names can't contain ':' so we'll assume this is the full ARN
+ sns_kwargs['TopicArn'] = topic
+ else:
+ sns_kwargs['TopicArn'] = topic_arn_lookup(client, module, topic)
+
+ if not sns_kwargs['TopicArn']:
+ module.fail_json(msg='Could not find topic: {0}'.format(topic))
+
+ if sns_kwargs['MessageStructure'] == 'json':
+ sns_kwargs['Message'] = json.dumps(dict_msg)
+
+ try:
+ result = client.publish(**sns_kwargs)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to publish message')
+
+ sns_result = dict(msg="OK", message_id=result["MessageId"])
+
+ if module.params["message_group_id"]:
+ sns_result["sequence_number"] = result["SequenceNumber"]
+
+ module.exit_json(**sns_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/sns_topic.py b/ansible_collections/community/aws/plugins/modules/sns_topic.py
new file mode 100644
index 000000000..3c05be004
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/sns_topic.py
@@ -0,0 +1,697 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: sns_topic
+short_description: Manages AWS SNS topics and subscriptions
+version_added: 1.0.0
+description:
+ - The M(community.aws.sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics.
+author:
+ - "Joel Thompson (@joelthompson)"
+ - "Fernando Jose Pando (@nand0p)"
+ - "Will Thames (@willthames)"
+options:
+ name:
+ description:
+ - The name or ARN of the SNS topic to manage.
+ required: true
+ type: str
+ topic_type:
+ description:
+ - The type of topic that should be created. Either Standard for FIFO (first-in, first-out).
+ - Some regions, including GovCloud regions do not support FIFO topics.
+ Use a default value of 'standard' or omit the option if the region
+ does not support FIFO topics.
+ choices: ["standard", "fifo"]
+ default: 'standard'
+ type: str
+ version_added: 2.0.0
+ state:
+ description:
+ - Whether to create or destroy an SNS topic.
+ default: present
+ choices: ["absent", "present"]
+ type: str
+ display_name:
+ description:
+ - Display name of the topic.
+ type: str
+ policy:
+ description:
+ - Policy to apply to the SNS topic.
+ - Policy body can be YAML or JSON.
+ - This is required for certain use cases for example with S3 bucket notifications.
+ type: dict
+ delivery_policy:
+ description:
+ - Delivery policy to apply to the SNS topic.
+ type: dict
+ suboptions:
+ http:
+ description:
+ - Delivery policy for HTTP(S) messages.
+ - See U(https://docs.aws.amazon.com/sns/latest/dg/sns-message-delivery-retries.html)
+ for more information.
+ type: dict
+ required: false
+ suboptions:
+ disableSubscriptionOverrides:
+ description:
+ - Applies this policy to all subscriptions, even if they have their own policies.
+ type: bool
+ required: false
+ defaultThrottlePolicy:
+ description:
+ - Throttle the rate of messages sent to subsriptions.
+ type: dict
+ suboptions:
+ maxReceivesPerSecond:
+ description:
+ - The maximum number of deliveries per second per subscription.
+ type: int
+ required: true
+ required: false
+ defaultHealthyRetryPolicy:
+ description:
+ - Retry policy for HTTP(S) messages.
+ type: dict
+ required: true
+ suboptions:
+ minDelayTarget:
+ description:
+ - The minimum delay for a retry.
+ type: int
+ required: true
+ maxDelayTarget:
+ description:
+ - The maximum delay for a retry.
+ type: int
+ required: true
+ numRetries:
+ description:
+ - The total number of retries.
+ type: int
+ required: true
+ numMaxDelayRetries:
+ description:
+ - The number of retries with the maximum delay between them.
+ type: int
+ required: true
+ numMinDelayRetries:
+ description:
+ - The number of retries with just the minimum delay between them.
+ type: int
+ required: true
+ numNoDelayRetries:
+ description:
+ - The number of retries to be performmed immediately.
+ type: int
+ required: true
+ backoffFunction:
+ description:
+ - The function for backoff between retries.
+ type: str
+ required: true
+ choices: ['arithmetic', 'exponential', 'geometric', 'linear']
+ subscriptions:
+ description:
+ - List of subscriptions to apply to the topic. Note that AWS requires
+ subscriptions to be confirmed, so you will need to confirm any new
+ subscriptions.
+ suboptions:
+ endpoint:
+ description: Endpoint of subscription.
+ required: true
+ protocol:
+ description: Protocol of subscription.
+ required: true
+ attributes:
+ description: Attributes of subscription. Only supports RawMessageDelievery for SQS endpoints.
+ default: {}
+ version_added: "4.1.0"
+ type: list
+ elements: dict
+ default: []
+ purge_subscriptions:
+ description:
+ - "Whether to purge any subscriptions not listed here. NOTE: AWS does not
+ allow you to purge any PendingConfirmation subscriptions, so if any
+ exist and would be purged, they are silently skipped. This means that
+ somebody could come back later and confirm the subscription. Sorry.
+ Blame Amazon."
+ default: true
+ type: bool
+ content_based_deduplication:
+ description:
+ - Whether to enable content-based deduplication for this topic.
+ - Ignored unless I(topic_type=fifo).
+ - Defaults to C(disabled).
+ choices: ["disabled", "enabled"]
+ type: str
+ version_added: 5.3.0
+notes:
+ - Support for I(tags) and I(purge_tags) was added in release 5.3.0.
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r"""
+
+- name: Create alarm SNS topic
+ community.aws.sns_topic:
+ name: "alarms"
+ state: present
+ display_name: "alarm SNS topic"
+ delivery_policy:
+ http:
+ defaultHealthyRetryPolicy:
+ minDelayTarget: 2
+ maxDelayTarget: 4
+ numRetries: 9
+ numMaxDelayRetries: 5
+ numMinDelayRetries: 2
+ numNoDelayRetries: 2
+ backoffFunction: "linear"
+ disableSubscriptionOverrides: True
+ defaultThrottlePolicy:
+ maxReceivesPerSecond: 10
+ subscriptions:
+ - endpoint: "my_email_address@example.com"
+ protocol: "email"
+ - endpoint: "my_mobile_number"
+ protocol: "sms"
+
+- name: Create a topic permitting S3 bucket notifications
+ community.aws.sns_topic:
+ name: "S3Notifications"
+ state: present
+ display_name: "S3 notifications SNS topic"
+ policy:
+ Id: s3-topic-policy
+ Version: 2012-10-17
+ Statement:
+ - Sid: Statement-id
+ Effect: Allow
+ Resource: "arn:aws:sns:*:*:S3Notifications"
+ Principal:
+ Service: s3.amazonaws.com
+ Action: sns:Publish
+ Condition:
+ ArnLike:
+ aws:SourceArn: "arn:aws:s3:*:*:SomeBucket"
+
+- name: Example deleting a topic
+ community.aws.sns_topic:
+ name: "ExampleTopic"
+ state: absent
+"""
+
+RETURN = r'''
+sns_arn:
+ description: The ARN of the topic you are modifying
+ type: str
+ returned: always
+ sample: "arn:aws:sns:us-east-2:123456789012:my_topic_name"
+sns_topic:
+ description: Dict of sns topic details
+ type: complex
+ returned: always
+ contains:
+ attributes_set:
+ description: list of attributes set during this run
+ returned: always
+ type: list
+ sample: []
+ check_mode:
+ description: whether check mode was on
+ returned: always
+ type: bool
+ sample: false
+ content_based_deduplication:
+ description: Whether or not content_based_deduplication was set
+ returned: always
+ type: str
+ sample: disabled
+ version_added: 5.3.0
+ delivery_policy:
+ description: Delivery policy for the SNS topic
+ returned: when topic is owned by this AWS account
+ type: str
+ sample: >
+ {"http":{"defaultHealthyRetryPolicy":{"minDelayTarget":20,"maxDelayTarget":20,"numRetries":3,"numMaxDelayRetries":0,
+ "numNoDelayRetries":0,"numMinDelayRetries":0,"backoffFunction":"linear"},"disableSubscriptionOverrides":false}}
+ display_name:
+ description: Display name for SNS topic
+ returned: when topic is owned by this AWS account
+ type: str
+ sample: My topic name
+ name:
+ description: Topic name
+ returned: always
+ type: str
+ sample: ansible-test-dummy-topic
+ owner:
+ description: AWS account that owns the topic
+ returned: when topic is owned by this AWS account
+ type: str
+ sample: '123456789012'
+ policy:
+ description: Policy for the SNS topic
+ returned: when topic is owned by this AWS account
+ type: str
+ sample: >
+ {"Version":"2012-10-17","Id":"SomePolicyId","Statement":[{"Sid":"ANewSid","Effect":"Allow","Principal":{"AWS":"arn:aws:iam::123456789012:root"},
+ "Action":"sns:Subscribe","Resource":"arn:aws:sns:us-east-2:123456789012:ansible-test-dummy-topic","Condition":{"StringEquals":{"sns:Protocol":"email"}}}]}
+ state:
+ description: whether the topic is present or absent
+ returned: always
+ type: str
+ sample: present
+ subscriptions:
+ description: List of subscribers to the topic in this AWS account
+ returned: always
+ type: list
+ sample: []
+ subscriptions_added:
+ description: List of subscribers added in this run
+ returned: always
+ type: list
+ sample: []
+ subscriptions_confirmed:
+ description: Count of confirmed subscriptions
+ returned: when topic is owned by this AWS account
+ type: str
+ sample: '0'
+ subscriptions_deleted:
+ description: Count of deleted subscriptions
+ returned: when topic is owned by this AWS account
+ type: str
+ sample: '0'
+ subscriptions_existing:
+ description: List of existing subscriptions
+ returned: always
+ type: list
+ sample: []
+ subscriptions_new:
+ description: List of new subscriptions
+ returned: always
+ type: list
+ sample: []
+ subscriptions_pending:
+ description: Count of pending subscriptions
+ returned: when topic is owned by this AWS account
+ type: str
+ sample: '0'
+ subscriptions_purge:
+ description: Whether or not purge_subscriptions was set
+ returned: always
+ type: bool
+ sample: true
+ topic_arn:
+ description: ARN of the SNS topic (equivalent to sns_arn)
+ returned: when topic is owned by this AWS account
+ type: str
+ sample: arn:aws:sns:us-east-2:123456789012:ansible-test-dummy-topic
+ topic_created:
+ description: Whether the topic was created
+ returned: always
+ type: bool
+ sample: false
+ topic_deleted:
+ description: Whether the topic was deleted
+ returned: always
+ type: bool
+ sample: false
+'''
+
+import json
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.community.aws.plugins.module_utils.sns import list_topics
+from ansible_collections.community.aws.plugins.module_utils.sns import topic_arn_lookup
+from ansible_collections.community.aws.plugins.module_utils.sns import compare_delivery_policies
+from ansible_collections.community.aws.plugins.module_utils.sns import list_topic_subscriptions
+from ansible_collections.community.aws.plugins.module_utils.sns import canonicalize_endpoint
+from ansible_collections.community.aws.plugins.module_utils.sns import get_info
+from ansible_collections.community.aws.plugins.module_utils.sns import update_tags
+
+
+class SnsTopicManager(object):
+ """ Handles SNS Topic creation and destruction """
+
+ def __init__(self,
+ module,
+ name,
+ topic_type,
+ state,
+ display_name,
+ policy,
+ delivery_policy,
+ subscriptions,
+ purge_subscriptions,
+ tags,
+ purge_tags,
+ content_based_deduplication,
+ check_mode):
+
+ self.connection = module.client('sns')
+ self.module = module
+ self.name = name
+ self.topic_type = topic_type
+ self.state = state
+ self.display_name = display_name
+ self.policy = policy
+ self.delivery_policy = scrub_none_parameters(delivery_policy) if delivery_policy else None
+ self.subscriptions = subscriptions
+ self.subscriptions_existing = []
+ self.subscriptions_deleted = []
+ self.subscriptions_added = []
+ self.subscriptions_attributes_set = []
+ self.desired_subscription_attributes = dict()
+ self.purge_subscriptions = purge_subscriptions
+ self.content_based_deduplication = content_based_deduplication
+ self.check_mode = check_mode
+ self.topic_created = False
+ self.topic_deleted = False
+ self.topic_arn = None
+ self.attributes_set = []
+ self.tags = tags
+ self.purge_tags = purge_tags
+
+ def _create_topic(self):
+ attributes = {}
+ tags = []
+
+ # NOTE: Never set FifoTopic = False. Some regions (including GovCloud)
+ # don't support the attribute being set, even to False.
+ if self.topic_type == 'fifo':
+ attributes['FifoTopic'] = 'true'
+ if not self.name.endswith('.fifo'):
+ self.name = self.name + '.fifo'
+
+ if self.tags:
+ tags = ansible_dict_to_boto3_tag_list(self.tags)
+
+ if not self.check_mode:
+ try:
+ response = self.connection.create_topic(Name=self.name,
+ Attributes=attributes,
+ Tags=tags)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't create topic %s" % self.name)
+ self.topic_arn = response['TopicArn']
+ return True
+
+ def _set_topic_attrs(self):
+ changed = False
+ try:
+ topic_attributes = self.connection.get_topic_attributes(TopicArn=self.topic_arn)['Attributes']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't get topic attributes for topic %s" % self.topic_arn)
+
+ if self.display_name and self.display_name != topic_attributes['DisplayName']:
+ changed = True
+ self.attributes_set.append('display_name')
+ if not self.check_mode:
+ try:
+ self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='DisplayName',
+ AttributeValue=self.display_name)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't set display name")
+
+ if self.policy and compare_policies(self.policy, json.loads(topic_attributes['Policy'])):
+ changed = True
+ self.attributes_set.append('policy')
+ if not self.check_mode:
+ try:
+ self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='Policy',
+ AttributeValue=json.dumps(self.policy))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't set topic policy")
+
+ # Set content-based deduplication attribute. Ignore if topic_type is not fifo.
+ if ("FifoTopic" in topic_attributes and topic_attributes["FifoTopic"] == "true") and \
+ self.content_based_deduplication:
+ enabled = "true" if self.content_based_deduplication in 'enabled' else "false"
+ if enabled != topic_attributes['ContentBasedDeduplication']:
+ changed = True
+ self.attributes_set.append('content_based_deduplication')
+ if not self.check_mode:
+ try:
+ self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='ContentBasedDeduplication',
+ AttributeValue=enabled)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't set content-based deduplication")
+
+ if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or
+ compare_delivery_policies(self.delivery_policy, json.loads(topic_attributes['DeliveryPolicy']))):
+ changed = True
+ self.attributes_set.append('delivery_policy')
+ if not self.check_mode:
+ try:
+ self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='DeliveryPolicy',
+ AttributeValue=json.dumps(self.delivery_policy))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't set topic delivery policy")
+ return changed
+
+ def _set_topic_subs(self):
+ changed = False
+ subscriptions_existing_list = set()
+ desired_subscriptions = [(sub['protocol'],
+ canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in
+ self.subscriptions]
+
+ for sub in list_topic_subscriptions(self.connection, self.module, self.topic_arn):
+ sub_key = (sub['Protocol'], sub['Endpoint'])
+ subscriptions_existing_list.add(sub_key)
+ if (self.purge_subscriptions and sub_key not in desired_subscriptions and
+ sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted')):
+ changed = True
+ self.subscriptions_deleted.append(sub_key)
+ if not self.check_mode:
+ try:
+ self.connection.unsubscribe(SubscriptionArn=sub['SubscriptionArn'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't unsubscribe from topic")
+
+ for protocol, endpoint in set(desired_subscriptions).difference(subscriptions_existing_list):
+ changed = True
+ self.subscriptions_added.append((protocol, endpoint))
+ if not self.check_mode:
+ try:
+ self.connection.subscribe(TopicArn=self.topic_arn, Protocol=protocol, Endpoint=endpoint)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't subscribe to topic %s" % self.topic_arn)
+ return changed
+
+ def _init_desired_subscription_attributes(self):
+ for sub in self.subscriptions:
+ sub_key = (sub['protocol'], canonicalize_endpoint(sub['protocol'], sub['endpoint']))
+ tmp_dict = sub.get('attributes', {})
+ # aws sdk expects values to be strings
+ # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sns.html#SNS.Client.set_subscription_attributes
+ for k, v in tmp_dict.items():
+ tmp_dict[k] = str(v)
+
+ self.desired_subscription_attributes[sub_key] = tmp_dict
+
+ def _set_topic_subs_attributes(self):
+ changed = False
+ for sub in list_topic_subscriptions(self.connection, self.module, self.topic_arn):
+ sub_key = (sub['Protocol'], sub['Endpoint'])
+ sub_arn = sub['SubscriptionArn']
+ if not self.desired_subscription_attributes.get(sub_key):
+ # subscription attributes aren't defined in desired, skipping
+ continue
+
+ try:
+ sub_current_attributes = self.connection.get_subscription_attributes(SubscriptionArn=sub_arn)['Attributes']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, "Couldn't get subscription attributes for subscription %s" % sub_arn)
+
+ raw_message = self.desired_subscription_attributes[sub_key].get('RawMessageDelivery')
+ if raw_message is not None and 'RawMessageDelivery' in sub_current_attributes:
+ if sub_current_attributes['RawMessageDelivery'].lower() != raw_message.lower():
+ changed = True
+ if not self.check_mode:
+ try:
+ self.connection.set_subscription_attributes(SubscriptionArn=sub_arn,
+ AttributeName='RawMessageDelivery',
+ AttributeValue=raw_message)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, "Couldn't set RawMessageDelivery subscription attribute")
+
+ return changed
+
+ def _delete_subscriptions(self):
+ # NOTE: subscriptions in 'PendingConfirmation' timeout in 3 days
+ # https://forums.aws.amazon.com/thread.jspa?threadID=85993
+ subscriptions = list_topic_subscriptions(self.connection, self.module, self.topic_arn)
+ if not subscriptions:
+ return False
+ for sub in subscriptions:
+ if sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted'):
+ self.subscriptions_deleted.append(sub['SubscriptionArn'])
+ if not self.check_mode:
+ try:
+ self.connection.unsubscribe(SubscriptionArn=sub['SubscriptionArn'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't unsubscribe from topic")
+ return True
+
+ def _delete_topic(self):
+ self.topic_deleted = True
+ if not self.check_mode:
+ try:
+ self.connection.delete_topic(TopicArn=self.topic_arn)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't delete topic %s" % self.topic_arn)
+ return True
+
+ def _name_is_arn(self):
+ return self.name.startswith('arn:')
+
+ def ensure_ok(self):
+ changed = False
+ self.populate_topic_arn()
+ if not self.topic_arn:
+ changed = self._create_topic()
+ if self.topic_arn in list_topics(self.connection, self.module):
+ changed |= self._set_topic_attrs()
+ elif self.display_name or self.policy or self.delivery_policy:
+ self.module.fail_json(msg="Cannot set display name, policy or delivery policy for SNS topics not owned by this account")
+ changed |= self._set_topic_subs()
+ self._init_desired_subscription_attributes()
+ if self.topic_arn in list_topics(self.connection, self.module):
+ changed |= self._set_topic_subs_attributes()
+ elif any(self.desired_subscription_attributes.values()):
+ self.module.fail_json(msg="Cannot set subscription attributes for SNS topics not owned by this account")
+ # Check tagging
+ changed |= update_tags(self.connection, self.module, self.topic_arn)
+
+ return changed
+
+ def ensure_gone(self):
+ changed = False
+ self.populate_topic_arn()
+ if self.topic_arn:
+ if self.topic_arn not in list_topics(self.connection, self.module):
+ self.module.fail_json(msg="Cannot use state=absent with third party ARN. Use subscribers=[] to unsubscribe")
+ changed = self._delete_subscriptions()
+ changed |= self._delete_topic()
+ return changed
+
+ def populate_topic_arn(self):
+ if self._name_is_arn():
+ self.topic_arn = self.name
+ return
+
+ name = self.name
+ if self.topic_type == 'fifo' and not name.endswith('.fifo'):
+ name += ".fifo"
+ self.topic_arn = topic_arn_lookup(self.connection, self.module, name)
+
+
+def main():
+ # We're kinda stuck with CamelCase here, it would be nice to switch to
+ # snake_case, but we'd need to purge out the alias entries
+ http_retry_args = dict(
+ minDelayTarget=dict(type='int', required=True),
+ maxDelayTarget=dict(type='int', required=True),
+ numRetries=dict(type='int', required=True),
+ numMaxDelayRetries=dict(type='int', required=True),
+ numMinDelayRetries=dict(type='int', required=True),
+ numNoDelayRetries=dict(type='int', required=True),
+ backoffFunction=dict(type='str', required=True, choices=['arithmetic', 'exponential', 'geometric', 'linear']),
+ )
+ http_delivery_args = dict(
+ defaultHealthyRetryPolicy=dict(type='dict', required=True, options=http_retry_args),
+ disableSubscriptionOverrides=dict(type='bool', required=False),
+ defaultThrottlePolicy=dict(
+ type='dict', required=False,
+ options=dict(
+ maxReceivesPerSecond=dict(type='int', required=True),
+ ),
+ ),
+ )
+ delivery_args = dict(
+ http=dict(type='dict', required=False, options=http_delivery_args),
+ )
+
+ argument_spec = dict(
+ name=dict(required=True),
+ topic_type=dict(type='str', default='standard', choices=['standard', 'fifo']),
+ state=dict(default='present', choices=['present', 'absent']),
+ display_name=dict(),
+ policy=dict(type='dict'),
+ delivery_policy=dict(type='dict', options=delivery_args),
+ subscriptions=dict(default=[], type='list', elements='dict'),
+ purge_subscriptions=dict(type='bool', default=True),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ content_based_deduplication=dict(choices=['enabled', 'disabled'])
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ name = module.params.get('name')
+ topic_type = module.params.get('topic_type')
+ state = module.params.get('state')
+ display_name = module.params.get('display_name')
+ policy = module.params.get('policy')
+ delivery_policy = module.params.get('delivery_policy')
+ subscriptions = module.params.get('subscriptions')
+ purge_subscriptions = module.params.get('purge_subscriptions')
+ content_based_deduplication = module.params.get('content_based_deduplication')
+ check_mode = module.check_mode
+ tags = module.params.get('tags')
+ purge_tags = module.params.get('purge_tags')
+
+ sns_topic = SnsTopicManager(module,
+ name,
+ topic_type,
+ state,
+ display_name,
+ policy,
+ delivery_policy,
+ subscriptions,
+ purge_subscriptions,
+ tags,
+ purge_tags,
+ content_based_deduplication,
+ check_mode)
+
+ if state == 'present':
+ changed = sns_topic.ensure_ok()
+ elif state == 'absent':
+ changed = sns_topic.ensure_gone()
+
+ sns_facts = dict(changed=changed,
+ sns_arn=sns_topic.topic_arn,
+ sns_topic=get_info(sns_topic.connection, module, sns_topic.topic_arn))
+
+ module.exit_json(**sns_facts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/sns_topic_info.py b/ansible_collections/community/aws/plugins/modules/sns_topic_info.py
new file mode 100644
index 000000000..ca6dd1aab
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/sns_topic_info.py
@@ -0,0 +1,173 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: sns_topic_info
+short_description: sns_topic_info module
+version_added: 3.2.0
+description:
+- The M(community.aws.sns_topic_info) module allows to get all AWS SNS topics or properties of a specific AWS SNS topic.
+author:
+- "Alina Buzachis (@alinabuzachis)"
+options:
+ topic_arn:
+ description: The ARN of the AWS SNS topic for which you wish to find subscriptions or list attributes.
+ required: false
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+- name: list all the topics
+ community.aws.sns_topic_info:
+ register: sns_topic_list
+
+- name: get info on specific topic
+ community.aws.sns_topic_info:
+ topic_arn: "{{ sns_arn }}"
+ register: sns_topic_info
+'''
+
+RETURN = r'''
+result:
+ description:
+ - The result contaning the details of one or all AWS SNS topics.
+ returned: success
+ type: list
+ contains:
+ sns_arn:
+ description: The ARN of the topic.
+ type: str
+ returned: always
+ sample: "arn:aws:sns:us-east-2:123456789012:my_topic_name"
+ sns_topic:
+ description: Dict of sns topic details.
+ type: complex
+ returned: always
+ contains:
+ content_based_deduplication:
+ description: Whether or not content_based_deduplication was set
+ returned: always
+ type: str
+ sample: "true"
+ delivery_policy:
+ description: Delivery policy for the SNS topic.
+ returned: when topic is owned by this AWS account
+ type: str
+ sample: >
+ {"http":{"defaultHealthyRetryPolicy":{"minDelayTarget":20,"maxDelayTarget":20,"numRetries":3,"numMaxDelayRetries":0,
+ "numNoDelayRetries":0,"numMinDelayRetries":0,"backoffFunction":"linear"},"disableSubscriptionOverrides":false}}
+ display_name:
+ description: Display name for SNS topic.
+ returned: when topic is owned by this AWS account
+ type: str
+ sample: My topic name
+ owner:
+ description: AWS account that owns the topic.
+ returned: when topic is owned by this AWS account
+ type: str
+ sample: '123456789012'
+ policy:
+ description: Policy for the SNS topic.
+ returned: when topic is owned by this AWS account
+ type: str
+ sample: >
+ {"Version":"2012-10-17","Id":"SomePolicyId","Statement":[{"Sid":"ANewSid","Effect":"Allow","Principal":{"AWS":"arn:aws:iam::123456789012:root"},
+ "Action":"sns:Subscribe","Resource":"arn:aws:sns:us-east-2:123456789012:ansible-test-dummy-topic","Condition":{"StringEquals":{"sns:Protocol":"email"}}}]}
+ subscriptions:
+ description: List of subscribers to the topic in this AWS account.
+ returned: always
+ type: list
+ sample: []
+ subscriptions_added:
+ description: List of subscribers added in this run.
+ returned: always
+ type: list
+ sample: []
+ subscriptions_confirmed:
+ description: Count of confirmed subscriptions.
+ returned: when topic is owned by this AWS account
+ type: str
+ sample: '0'
+ subscriptions_deleted:
+ description: Count of deleted subscriptions.
+ returned: when topic is owned by this AWS account
+ type: str
+ sample: '0'
+ subscriptions_existing:
+ description: List of existing subscriptions.
+ returned: always
+ type: list
+ sample: []
+ subscriptions_new:
+ description: List of new subscriptions.
+ returned: always
+ type: list
+ sample: []
+ subscriptions_pending:
+ description: Count of pending subscriptions.
+ returned: when topic is owned by this AWS account
+ type: str
+ sample: '0'
+ subscriptions_purge:
+ description: Whether or not purge_subscriptions was set.
+ returned: always
+ type: bool
+ sample: true
+ topic_arn:
+ description: ARN of the SNS topic (equivalent to sns_arn).
+ returned: when topic is owned by this AWS account
+ type: str
+ sample: arn:aws:sns:us-east-2:123456789012:ansible-test-dummy-topic
+ topic_type:
+ description: The type of topic.
+ type: str
+ sample: "standard"
+'''
+
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.community.aws.plugins.module_utils.sns import list_topics
+from ansible_collections.community.aws.plugins.module_utils.sns import get_info
+
+
+def main():
+ argument_spec = dict(
+ topic_arn=dict(type='str', required=False),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ topic_arn = module.params.get('topic_arn')
+
+ try:
+ connection = module.client('sns', retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS.')
+
+ if topic_arn:
+ results = dict(sns_arn=topic_arn, sns_topic=get_info(connection, module, topic_arn))
+ else:
+ results = list_topics(connection, module)
+
+ module.exit_json(result=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/sqs_queue.py b/ansible_collections/community/aws/plugins/modules/sqs_queue.py
new file mode 100644
index 000000000..211e64b26
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/sqs_queue.py
@@ -0,0 +1,525 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: sqs_queue
+version_added: 1.0.0
+short_description: Creates or deletes AWS SQS queues
+description:
+ - Create or delete AWS SQS queues.
+ - Update attributes on existing queues.
+author:
+ - Alan Loi (@loia)
+ - Fernando Jose Pando (@nand0p)
+ - Nadir Lloret (@nadirollo)
+ - Dennis Podkovyrin (@sbj-ss)
+options:
+ state:
+ description:
+ - Create or delete the queue.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ name:
+ description:
+ - Name of the queue.
+ required: true
+ type: str
+ queue_type:
+ description:
+ - Standard or FIFO queue.
+ - I(queue_type) can only be set at queue creation and will otherwise be
+ ignored.
+ choices: ['standard', 'fifo']
+ default: 'standard'
+ type: str
+ deduplication_scope:
+ description:
+ - Deduplication scope for FIFO queues.
+ - C(messageGroup) is required for high throughput FIFO.
+ - Defaults to C(queue) on creation.
+ choices: ['queue', 'messageGroup']
+ type: str
+ version_added: 5.3.0
+ fifo_throughput_limit:
+ description:
+ - Throughput limit for FIFO queues.
+ - C(perMessageGroupId) is required for high throughput FIFO.
+ - Defaults to C(perQueue) on creation.
+ choices: ['perQueue', 'perMessageGroupId']
+ type: str
+ version_added: 5.3.0
+ visibility_timeout:
+ description:
+ - The default visibility timeout in seconds.
+ aliases: [default_visibility_timeout]
+ type: int
+ message_retention_period:
+ description:
+ - The message retention period in seconds.
+ type: int
+ maximum_message_size:
+ description:
+ - The maximum message size in bytes.
+ type: int
+ delay_seconds:
+ description:
+ - The delivery delay in seconds.
+ aliases: [delivery_delay]
+ type: int
+ receive_message_wait_time_seconds:
+ description:
+ - The receive message wait time in seconds.
+ aliases: [receive_message_wait_time]
+ type: int
+ policy:
+ description:
+ - Policy to attach to the queue.
+ - Policy body can be YAML or JSON.
+ - This is required for certain use cases for example with S3 bucket notifications.
+ type: dict
+ redrive_policy:
+ description:
+ - JSON dict with the redrive_policy (see example).
+ type: dict
+ kms_master_key_id:
+ description:
+ - The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK.
+ - Specifying a valid I(kms_master_key_id) will enable encryption automatically.
+ type: str
+ kms_data_key_reuse_period_seconds:
+ description:
+ - The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again.
+ aliases: [kms_data_key_reuse_period]
+ type: int
+ content_based_deduplication:
+ type: bool
+ description:
+ - Enables content-based deduplication. Used for FIFOs only.
+ - Defaults to C(false).
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+ - amazon.aws.tags
+'''
+
+RETURN = r'''
+content_based_deduplication:
+ description: Enables content-based deduplication. Used for FIFOs only.
+ type: bool
+ returned: always
+ sample: True
+fifo_throughput_limit:
+ description: Which throughput limit strategy is applied.
+ type: str
+ returned: always
+ sample: perQueue
+deduplication_scope:
+ description: The deduplication setting.
+ type: str
+ returned: always
+ sample: messageGroup
+visibility_timeout:
+ description: The default visibility timeout in seconds.
+ type: int
+ returned: always
+ sample: 30
+delay_seconds:
+ description: The delivery delay in seconds.
+ type: int
+ returned: always
+ sample: 0
+kms_master_key_id:
+ description: The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK.
+ type: str
+ returned: if value exists
+ sample: alias/MyAlias
+kms_data_key_reuse_period_seconds:
+ description: The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again.
+ type: int
+ returned: always
+ sample: 300
+maximum_message_size:
+ description: The maximum message size in bytes.
+ type: int
+ returned: always
+ sample: 262144
+message_retention_period:
+ description: The message retention period in seconds.
+ type: int
+ returned: always
+ sample: 345600
+name:
+ description: Name of the SQS Queue
+ type: str
+ returned: always
+ sample: "queuename-987d2de0"
+queue_arn:
+ description: The queue's Amazon resource name (ARN).
+ type: str
+ returned: on success
+ sample: 'arn:aws:sqs:us-east-1:123456789012:queuename-987d2de0'
+queue_url:
+ description: URL to access the queue
+ type: str
+ returned: on success
+ sample: 'https://queue.amazonaws.com/123456789012/MyQueue'
+receive_message_wait_time_seconds:
+ description: The receive message wait time in seconds.
+ type: int
+ returned: always
+ sample: 0
+region:
+ description: Region that the queue was created within
+ type: str
+ returned: always
+ sample: 'us-east-1'
+tags:
+ description: List of queue tags
+ type: dict
+ returned: always
+ sample: '{"Env": "prod"}'
+'''
+
+EXAMPLES = r'''
+- name: Create SQS queue with redrive policy
+ community.aws.sqs_queue:
+ name: my-queue
+ region: ap-southeast-2
+ default_visibility_timeout: 120
+ message_retention_period: 86400
+ maximum_message_size: 1024
+ delivery_delay: 30
+ receive_message_wait_time: 20
+ policy: "{{ json_dict }}"
+ redrive_policy:
+ maxReceiveCount: 5
+ deadLetterTargetArn: arn:aws:sqs:eu-west-1:123456789012:my-dead-queue
+
+- name: Drop redrive policy
+ community.aws.sqs_queue:
+ name: my-queue
+ region: ap-southeast-2
+ redrive_policy: {}
+
+- name: Create FIFO queue
+ community.aws.sqs_queue:
+ name: fifo-queue
+ region: ap-southeast-2
+ queue_type: fifo
+ content_based_deduplication: true
+
+- name: Tag queue
+ community.aws.sqs_queue:
+ name: fifo-queue
+ region: ap-southeast-2
+ tags:
+ example: SomeValue
+
+- name: Configure Encryption, automatically uses a new data key every hour
+ community.aws.sqs_queue:
+ name: fifo-queue
+ region: ap-southeast-2
+ kms_master_key_id: alias/MyQueueKey
+ kms_data_key_reuse_period_seconds: 3600
+
+- name: Example queue allowing s3 bucket notifications
+ sqs_queue:
+ name: "S3Notifications"
+ default_visibility_timeout: 120
+ message_retention_period: 86400
+ maximum_message_size: 1024
+ delivery_delay: 30
+ receive_message_wait_time: 20
+ policy:
+ Version: 2012-10-17
+ Id: s3-queue-policy
+ Statement:
+ - Sid: allowNotifications
+ Effect: Allow
+ Principal:
+ Service: s3.amazonaws.com
+ Action:
+ - SQS:SendMessage
+ Resource: "arn:aws:sqs:*:*:S3Notifications"
+ Condition:
+ ArnLike:
+ aws:SourceArn: "arn:aws:s3:*:*:SomeBucket"
+
+- name: Delete SQS queue
+ community.aws.sqs_queue:
+ name: my-queue
+ region: ap-southeast-2
+ state: absent
+'''
+
+import json
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies
+
+
+def get_queue_name(module, is_fifo=False):
+ name = module.params.get('name')
+ if not is_fifo or name.endswith('.fifo'):
+ return name
+ return name + '.fifo'
+
+
+# NonExistentQueue is explicitly expected when a queue doesn't exist
+@AWSRetry.jittered_backoff()
+def get_queue_url(client, name):
+ try:
+ return client.get_queue_url(QueueName=name)['QueueUrl']
+ except is_boto3_error_code('AWS.SimpleQueueService.NonExistentQueue'):
+ return None
+
+
+def describe_queue(client, queue_url):
+ """
+ Description a queue in snake format
+ """
+ attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['All'], aws_retry=True)['Attributes']
+ description = dict(attributes)
+ description.pop('Policy', None)
+ description.pop('RedrivePolicy', None)
+ description = camel_dict_to_snake_dict(description)
+ description['policy'] = attributes.get('Policy', None)
+ description['redrive_policy'] = attributes.get('RedrivePolicy', None)
+
+ # Boto3 returns everything as a string, convert them back to integers/dicts if
+ # that's what we expected.
+ for key, value in description.items():
+ if value is None:
+ continue
+
+ if key in ['policy', 'redrive_policy']:
+ policy = json.loads(value)
+ description[key] = policy
+ continue
+
+ if key == 'content_based_deduplication':
+ try:
+ description[key] = bool(value)
+ except (TypeError, ValueError):
+ pass
+
+ try:
+ if value == str(int(value)):
+ description[key] = int(value)
+ except (TypeError, ValueError):
+ pass
+
+ return description
+
+
+def create_or_update_sqs_queue(client, module):
+ is_fifo = (module.params.get('queue_type') == 'fifo')
+ kms_master_key_id = module.params.get('kms_master_key_id')
+ queue_name = get_queue_name(module, is_fifo)
+ result = dict(
+ name=queue_name,
+ region=module.params.get('region'),
+ changed=False,
+ )
+
+ queue_url = get_queue_url(client, queue_name)
+ result['queue_url'] = queue_url
+
+ # Create a dict() to hold attributes that will be passed to boto3
+ create_attributes = {}
+
+ if not queue_url:
+ if is_fifo:
+ create_attributes['FifoQueue'] = "True"
+ if kms_master_key_id:
+ create_attributes['KmsMasterKeyId'] = kms_master_key_id
+ result['changed'] = True
+ if module.check_mode:
+ return result
+ queue_url = client.create_queue(QueueName=queue_name, Attributes=create_attributes, aws_retry=True)['QueueUrl']
+
+ changed, arn = update_sqs_queue(module, client, queue_url)
+ result['changed'] |= changed
+ result['queue_arn'] = arn
+
+ changed, tags = update_tags(client, queue_url, module)
+ result['changed'] |= changed
+ result['tags'] = tags
+
+ result.update(describe_queue(client, queue_url))
+
+ COMPATABILITY_KEYS = dict(
+ delay_seconds='delivery_delay',
+ receive_message_wait_time_seconds='receive_message_wait_time',
+ visibility_timeout='default_visibility_timeout',
+ kms_data_key_reuse_period_seconds='kms_data_key_reuse_period',
+ )
+ for key in list(result.keys()):
+
+ # The return values changed between boto and boto3, add the old keys too
+ # for backwards compatibility
+ return_name = COMPATABILITY_KEYS.get(key)
+ if return_name:
+ result[return_name] = result.get(key)
+
+ return result
+
+
+def update_sqs_queue(module, client, queue_url):
+ check_mode = module.check_mode
+ changed = False
+ existing_attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['All'], aws_retry=True)['Attributes']
+ new_attributes = snake_dict_to_camel_dict(module.params, capitalize_first=True)
+ attributes_to_set = dict()
+
+ # Boto3 SQS deals with policies as strings, we want to deal with them as
+ # dicts
+ if module.params.get('policy') is not None:
+ policy = module.params.get('policy')
+ current_value = existing_attributes.get('Policy', '{}')
+ current_policy = json.loads(current_value)
+ if compare_policies(current_policy, policy):
+ attributes_to_set['Policy'] = json.dumps(policy)
+ changed = True
+ if module.params.get('redrive_policy') is not None:
+ policy = module.params.get('redrive_policy')
+ current_value = existing_attributes.get('RedrivePolicy', '{}')
+ current_policy = json.loads(current_value)
+ if compare_policies(current_policy, policy):
+ attributes_to_set['RedrivePolicy'] = json.dumps(policy)
+ changed = True
+
+ for attribute, value in existing_attributes.items():
+ # We handle these as a special case because they're IAM policies
+ if attribute in ['Policy', 'RedrivePolicy']:
+ continue
+
+ if attribute not in new_attributes.keys():
+ continue
+
+ if new_attributes.get(attribute) is None:
+ continue
+
+ new_value = new_attributes[attribute]
+
+ if isinstance(new_value, bool):
+ new_value = str(new_value).lower()
+ value = str(value).lower()
+
+ if str(new_value) == str(value):
+ continue
+
+ # Boto3 expects strings
+ attributes_to_set[attribute] = str(new_value)
+ changed = True
+
+ if changed and not check_mode:
+ client.set_queue_attributes(QueueUrl=queue_url, Attributes=attributes_to_set, aws_retry=True)
+
+ return changed, existing_attributes.get('queue_arn')
+
+
+def delete_sqs_queue(client, module):
+ is_fifo = (module.params.get('queue_type') == 'fifo')
+ queue_name = get_queue_name(module, is_fifo)
+ result = dict(
+ name=queue_name,
+ region=module.params.get('region'),
+ changed=False
+ )
+
+ queue_url = get_queue_url(client, queue_name)
+ if not queue_url:
+ return result
+
+ result['changed'] = bool(queue_url)
+ if not module.check_mode:
+ AWSRetry.jittered_backoff()(client.delete_queue)(QueueUrl=queue_url)
+
+ return result
+
+
+def update_tags(client, queue_url, module):
+ new_tags = module.params.get('tags')
+ purge_tags = module.params.get('purge_tags')
+ if new_tags is None:
+ return False, {}
+
+ try:
+ existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True)['Tags']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, KeyError) as e:
+ existing_tags = {}
+
+ tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags)
+
+ if not module.check_mode:
+ if tags_to_remove:
+ client.untag_queue(QueueUrl=queue_url, TagKeys=tags_to_remove, aws_retry=True)
+ if tags_to_add:
+ client.tag_queue(QueueUrl=queue_url, Tags=tags_to_add)
+ existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True).get('Tags', {})
+ else:
+ existing_tags = new_tags
+
+ changed = bool(tags_to_remove) or bool(tags_to_add)
+ return changed, existing_tags
+
+
+def main():
+
+ argument_spec = dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ name=dict(type='str', required=True),
+ queue_type=dict(type='str', default='standard', choices=['standard', 'fifo']),
+ delay_seconds=dict(type='int', aliases=['delivery_delay']),
+ maximum_message_size=dict(type='int'),
+ message_retention_period=dict(type='int'),
+ policy=dict(type='dict'),
+ receive_message_wait_time_seconds=dict(type='int', aliases=['receive_message_wait_time']),
+ redrive_policy=dict(type='dict'),
+ visibility_timeout=dict(type='int', aliases=['default_visibility_timeout']),
+ kms_master_key_id=dict(type='str'),
+ fifo_throughput_limit=dict(type='str', choices=["perQueue", "perMessageGroupId"]),
+ deduplication_scope=dict(type='str', choices=['queue', 'messageGroup']),
+ kms_data_key_reuse_period_seconds=dict(type='int', aliases=['kms_data_key_reuse_period'], no_log=False),
+ content_based_deduplication=dict(type='bool'),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ state = module.params.get('state')
+ retry_decorator = AWSRetry.jittered_backoff(catch_extra_error_codes=['AWS.SimpleQueueService.NonExistentQueue'])
+ try:
+ client = module.client('sqs', retry_decorator=retry_decorator)
+ if state == 'present':
+ result = create_or_update_sqs_queue(client, module)
+ elif state == 'absent':
+ result = delete_sqs_queue(client, module)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to control sqs queue')
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ssm_parameter.py b/ansible_collections/community/aws/plugins/modules/ssm_parameter.py
new file mode 100644
index 000000000..c435305c2
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ssm_parameter.py
@@ -0,0 +1,597 @@
+#!/usr/bin/python
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ssm_parameter
+version_added: 1.0.0
+short_description: Manage key-value pairs in AWS Systems Manager Parameter Store
+description:
+ - Manage key-value pairs in AWS Systems Manager (SSM) Parameter Store.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_ssm_parameter_store).
+ The usage did not change.
+options:
+ name:
+ description:
+ - Parameter key name.
+ required: true
+ type: str
+ description:
+ description:
+ - Parameter key description.
+ required: false
+ type: str
+ value:
+ description:
+ - Parameter value.
+ required: false
+ type: str
+ state:
+ description:
+ - Creates or modifies an existing parameter.
+ - Deletes a parameter.
+ required: false
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ string_type:
+ description:
+ - Parameter String type.
+ required: false
+ choices: ['String', 'StringList', 'SecureString']
+ default: String
+ type: str
+ aliases: ['type']
+ decryption:
+ description:
+ - Work with SecureString type to get plain text secrets
+ type: bool
+ required: false
+ default: true
+ key_id:
+ description:
+ - AWS KMS key to decrypt the secrets.
+ - The default key (C(alias/aws/ssm)) is automatically generated the first
+ time it's requested.
+ required: false
+ default: alias/aws/ssm
+ type: str
+ overwrite_value:
+ description:
+ - Option to overwrite an existing value if it already exists.
+ required: false
+ choices: ['never', 'changed', 'always']
+ default: changed
+ type: str
+ tier:
+ description:
+ - Parameter store tier type.
+ required: false
+ choices: ['Standard', 'Advanced', 'Intelligent-Tiering']
+ default: Standard
+ type: str
+ version_added: 1.5.0
+seealso:
+ - ref: amazon.aws.aws_ssm lookup <ansible_collections.amazon.aws.aws_ssm_lookup>
+ description: The documentation for the C(amazon.aws.aws_ssm) lookup plugin.
+
+author:
+ - "Davinder Pal (@116davinder) <dpsangwal@gmail.com>"
+ - "Nathan Webster (@nathanwebsterdotme)"
+ - "Bill Wang (@ozbillwang) <ozbillwang@gmail.com>"
+ - "Michael De La Rue (@mikedlr)"
+
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+ - amazon.aws.tags
+
+notes:
+ - Support for I(tags) and I(purge_tags) was added in release 5.3.0.
+
+'''
+
+EXAMPLES = '''
+- name: Create or update key/value pair in AWS SSM parameter store
+ community.aws.ssm_parameter:
+ name: "Hello"
+ description: "This is your first key"
+ value: "World"
+
+- name: Delete the key
+ community.aws.ssm_parameter:
+ name: "Hello"
+ state: absent
+
+- name: Create or update secure key/value pair with default KMS key (aws/ssm)
+ community.aws.ssm_parameter:
+ name: "Hello"
+ description: "This is your first key"
+ string_type: "SecureString"
+ value: "World"
+
+- name: Create or update secure key/value pair with nominated KMS key
+ community.aws.ssm_parameter:
+ name: "Hello"
+ description: "This is your first key"
+ string_type: "SecureString"
+ key_id: "alias/demo"
+ value: "World"
+
+- name: Always update a parameter store value and create a new version
+ community.aws.ssm_parameter:
+ name: "overwrite_example"
+ description: "This example will always overwrite the value"
+ string_type: "String"
+ value: "Test1234"
+ overwrite_value: "always"
+
+- name: Create or update key/value pair in AWS SSM parameter store with tier
+ community.aws.ssm_parameter:
+ name: "Hello"
+ description: "This is your first key"
+ value: "World"
+ tier: "Advanced"
+
+- name: recommend to use with aws_ssm lookup plugin
+ ansible.builtin.debug:
+ msg: "{{ lookup('amazon.aws.aws_ssm', 'Hello') }}"
+
+- name: Create or update key/value pair in AWS SSM parameter store w/ tags
+ community.aws.ssm_parameter:
+ name: "Hello"
+ description: "This is your first key"
+ value: "World"
+ tags:
+ Environment: "dev"
+ Version: "1.0"
+ Confidentiality: "low"
+ Tag With Space: "foo bar"
+
+- name: Add or update a tag on an existing parameter w/o removing existing tags
+ community.aws.ssm_parameter:
+ name: "Hello"
+ purge_tags: false
+ tags:
+ Contact: "person1"
+
+- name: Delete all tags on an existing parameter
+ community.aws.ssm_parameter:
+ name: "Hello"
+ tags: {}
+'''
+
+RETURN = '''
+parameter_metadata:
+ type: dict
+ description:
+ - Information about a parameter.
+ - Does not include the value of the parameter as this can be sensitive
+ information.
+ returned: success
+ contains:
+ data_type:
+ type: str
+ description: Parameter Data type.
+ example: text
+ returned: success
+ description:
+ type: str
+ description: Parameter key description.
+ example: This is your first key
+ returned: success
+ last_modified_date:
+ type: str
+ description: Time and date that the parameter was last modified.
+ example: '2022-06-20T09:56:58.573000+00:00'
+ returned: success
+ last_modified_user:
+ type: str
+ description: ARN of the last user to modify the parameter.
+ example: 'arn:aws:sts::123456789012:assumed-role/example-role/session=example'
+ returned: success
+ name:
+ type: str
+ description: Parameter key name.
+ example: Hello
+ returned: success
+ policies:
+ type: list
+ description: A list of policies associated with a parameter.
+ elements: dict
+ returned: success
+ contains:
+ policy_text:
+ type: str
+ description: The JSON text of the policy.
+ returned: success
+ policy_type:
+ type: str
+ description: The type of policy.
+ example: Expiration
+ returned: success
+ policy_status:
+ type: str
+ description: The status of the policy.
+ example: Pending
+ returned: success
+ tier:
+ type: str
+ description: Parameter tier.
+ example: Standard
+ returned: success
+ type:
+ type: str
+ description: Parameter type
+ example: String
+ returned: success
+ version:
+ type: int
+ description: Parameter version number
+ example: 3
+ returned: success
+ tags:
+ description: A dictionary representing the tags associated with the parameter.
+ type: dict
+ returned: when the parameter has tags
+ example: {'MyTagName': 'Some Value'}
+ version_added: 5.3.0
+'''
+
+import time
+
+try:
+ import botocore
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.community.aws.plugins.module_utils.base import BaseWaiterFactory
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+
+class ParameterWaiterFactory(BaseWaiterFactory):
+ def __init__(self, module):
+ client = module.client('ssm')
+ super(ParameterWaiterFactory, self).__init__(module, client)
+
+ @property
+ def _waiter_model_data(self):
+ data = super(ParameterWaiterFactory, self)._waiter_model_data
+ ssm_data = dict(
+ parameter_exists=dict(
+ operation='DescribeParameters',
+ delay=1, maxAttempts=20,
+ acceptors=[
+ dict(state='retry', matcher='error', expected='ParameterNotFound'),
+ dict(state='retry', matcher='path', expected=True, argument='length(Parameters[].Name) == `0`'),
+ dict(state='success', matcher='path', expected=True, argument='length(Parameters[].Name) > `0`'),
+ ]
+ ),
+ parameter_deleted=dict(
+ operation='DescribeParameters',
+ delay=1, maxAttempts=20,
+ acceptors=[
+ dict(state='retry', matcher='path', expected=True, argument='length(Parameters[].Name) > `0`'),
+ dict(state='success', matcher='path', expected=True, argument='length(Parameters[]) == `0`'),
+ dict(state='success', matcher='error', expected='ParameterNotFound'),
+ ]
+ ),
+ )
+ data.update(ssm_data)
+ return data
+
+
+def _wait_exists(client, module, name):
+ if module.check_mode:
+ return
+ wf = ParameterWaiterFactory(module)
+ waiter = wf.get_waiter('parameter_exists')
+ try:
+ waiter.wait(
+ ParameterFilters=[{'Key': 'Name', "Values": [name]}],
+ )
+ except botocore.exceptions.WaiterError:
+ module.warn("Timeout waiting for parameter to exist")
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe parameter while waiting for creation")
+
+
+def _wait_updated(client, module, name, version):
+ # Unfortunately we can't filter on the Version, as such we need something custom.
+ if module.check_mode:
+ return
+ for x in range(1, 10):
+ try:
+ parameter = describe_parameter(client, module, ParameterFilters=[{"Key": "Name", "Values": [name]}])
+ if parameter.get('Version', 0) > version:
+ return
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe parameter while waiting for update")
+ time.sleep(1)
+
+
+def _wait_deleted(client, module, name):
+ if module.check_mode:
+ return
+ wf = ParameterWaiterFactory(module)
+ waiter = wf.get_waiter('parameter_deleted')
+ try:
+ waiter.wait(
+ ParameterFilters=[{'Key': 'Name', "Values": [name]}],
+ )
+ except botocore.exceptions.WaiterError:
+ module.warn("Timeout waiting for parameter to exist")
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe parameter while waiting for deletion")
+
+
+def tag_parameter(client, module, parameter_name, tags):
+ try:
+ return client.add_tags_to_resource(aws_retry=True, ResourceType='Parameter',
+ ResourceId=parameter_name, Tags=tags)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to add tag(s) to parameter")
+
+
+def untag_parameter(client, module, parameter_name, tag_keys):
+ try:
+ return client.remove_tags_from_resource(aws_retry=True, ResourceType='Parameter',
+ ResourceId=parameter_name, TagKeys=tag_keys)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to remove tag(s) from parameter")
+
+
+def get_parameter_tags(client, module, parameter_name):
+ try:
+ tags = client.list_tags_for_resource(aws_retry=True, ResourceType='Parameter',
+ ResourceId=parameter_name)['TagList']
+ tags_dict = boto3_tag_list_to_ansible_dict(tags)
+ return tags_dict
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to retrieve parameter tags")
+
+
+def update_parameter_tags(client, module, parameter_name, supplied_tags):
+ changed = False
+ response = {}
+
+ if supplied_tags is None:
+ return False, response
+
+ current_tags = get_parameter_tags(client, module, parameter_name)
+ tags_to_add, tags_to_remove = compare_aws_tags(current_tags, supplied_tags,
+ module.params.get('purge_tags'))
+
+ if tags_to_add:
+ if module.check_mode:
+ return True, response
+ response = tag_parameter(client, module, parameter_name,
+ ansible_dict_to_boto3_tag_list(tags_to_add))
+ changed = True
+ if tags_to_remove:
+ if module.check_mode:
+ return True, response
+ response = untag_parameter(client, module, parameter_name, tags_to_remove)
+ changed = True
+
+ return changed, response
+
+
+def update_parameter(client, module, **args):
+ changed = False
+ response = {}
+ if module.check_mode:
+ return True, response
+
+ try:
+ response = client.put_parameter(aws_retry=True, **args)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as exc:
+ module.fail_json_aws(exc, msg="setting parameter")
+
+ return changed, response
+
+
+@AWSRetry.jittered_backoff()
+def describe_parameter(client, module, **args):
+ paginator = client.get_paginator('describe_parameters')
+ existing_parameter = paginator.paginate(**args).build_full_result()
+
+ if not existing_parameter['Parameters']:
+ return None
+
+ tags_dict = get_parameter_tags(client, module, module.params.get('name'))
+ existing_parameter['Parameters'][0]['tags'] = tags_dict
+
+ return existing_parameter['Parameters'][0]
+
+
+def create_update_parameter(client, module):
+ changed = False
+ existing_parameter = None
+ response = {}
+
+ args = dict(
+ Name=module.params.get('name'),
+ Type=module.params.get('string_type'),
+ Tier=module.params.get('tier')
+ )
+
+ if (module.params.get('overwrite_value') in ("always", "changed")):
+ args.update(Overwrite=True)
+ else:
+ args.update(Overwrite=False)
+
+ if module.params.get('value') is not None:
+ args.update(Value=module.params.get('value'))
+
+ if module.params.get('description'):
+ args.update(Description=module.params.get('description'))
+
+ if module.params.get('string_type') == 'SecureString':
+ args.update(KeyId=module.params.get('key_id'))
+
+ try:
+ existing_parameter = client.get_parameter(aws_retry=True, Name=args['Name'], WithDecryption=True)
+ except botocore.exceptions.ClientError:
+ pass
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="fetching parameter")
+
+ if existing_parameter:
+ original_version = existing_parameter['Parameter']['Version']
+ if 'Value' not in args:
+ args['Value'] = existing_parameter['Parameter']['Value']
+
+ if (module.params.get('overwrite_value') == 'always'):
+ (changed, response) = update_parameter(client, module, **args)
+
+ elif (module.params.get('overwrite_value') == 'changed'):
+ if existing_parameter['Parameter']['Type'] != args['Type']:
+ (changed, response) = update_parameter(client, module, **args)
+
+ elif existing_parameter['Parameter']['Value'] != args['Value']:
+ (changed, response) = update_parameter(client, module, **args)
+
+ elif args.get('Description'):
+ # Description field not available from get_parameter function so get it from describe_parameters
+ try:
+ describe_existing_parameter = describe_parameter(
+ client, module,
+ ParameterFilters=[{"Key": "Name", "Values": [args['Name']]}])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="getting description value")
+
+ if describe_existing_parameter.get('Description') != args['Description']:
+ (changed, response) = update_parameter(client, module, **args)
+ if changed:
+ _wait_updated(client, module, module.params.get('name'), original_version)
+
+ # Handle tag updates for existing parameters
+ if module.params.get('overwrite_value') != 'never':
+ tags_changed, tags_response = update_parameter_tags(
+ client, module, existing_parameter['Parameter']['Name'],
+ module.params.get('tags'))
+
+ changed = changed or tags_changed
+
+ if tags_response:
+ response['tag_updates'] = tags_response
+
+ else:
+ # Add tags in initial creation request
+ if module.params.get('tags'):
+ args.update(Tags=ansible_dict_to_boto3_tag_list(module.params.get('tags')))
+ # Overwrite=True conflicts with tags and is not needed for new param
+ args.update(Overwrite=False)
+
+ (changed, response) = update_parameter(client, module, **args)
+ _wait_exists(client, module, module.params.get('name'))
+
+ return changed, response
+
+
+def delete_parameter(client, module):
+ response = {}
+
+ try:
+ existing_parameter = client.get_parameter(aws_retry=True, Name=module.params.get('name'), WithDecryption=True)
+ except is_boto3_error_code('ParameterNotFound'):
+ return False, {}
+ except botocore.exceptions.ClientError:
+ # If we can't describe the parameter we may still be able to delete it
+ existing_parameter = True
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="setting parameter")
+
+ if not existing_parameter:
+ return False, {}
+ if module.check_mode:
+ return True, {}
+
+ try:
+ response = client.delete_parameter(
+ aws_retry=True,
+ Name=module.params.get('name')
+ )
+ except is_boto3_error_code('ParameterNotFound'):
+ return False, {}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="deleting parameter")
+
+ _wait_deleted(client, module, module.params.get('name'))
+
+ return True, response
+
+
+def setup_client(module):
+ retry_decorator = AWSRetry.jittered_backoff()
+ connection = module.client('ssm', retry_decorator=retry_decorator)
+ return connection
+
+
+def setup_module_object():
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(),
+ value=dict(required=False, no_log=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ string_type=dict(default='String', choices=['String', 'StringList', 'SecureString'], aliases=['type']),
+ decryption=dict(default=True, type='bool'),
+ key_id=dict(default="alias/aws/ssm"),
+ overwrite_value=dict(default='changed', choices=['never', 'changed', 'always']),
+ tier=dict(default='Standard', choices=['Standard', 'Advanced', 'Intelligent-Tiering']),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ )
+
+ return AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+
+def main():
+ module = setup_module_object()
+ state = module.params.get('state')
+ client = setup_client(module)
+
+ invocations = {
+ "present": create_update_parameter,
+ "absent": delete_parameter,
+ }
+ (changed, response) = invocations[state](client, module)
+
+ result = {"response": response}
+
+ try:
+ parameter_metadata = describe_parameter(
+ client, module,
+ ParameterFilters=[{"Key": "Name", "Values": [module.params.get('name')]}])
+ except is_boto3_error_code('ParameterNotFound'):
+ return False, {}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="to describe parameter")
+ if parameter_metadata:
+ result['parameter_metadata'] = camel_dict_to_snake_dict(parameter_metadata,
+ ignore_list=['tags'])
+
+ module.exit_json(changed=changed, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine.py b/ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine.py
new file mode 100644
index 000000000..c141610bb
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+# Copyright (c) 2019, Tom De Keyser (@tdekeyser)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: stepfunctions_state_machine
+version_added: 1.0.0
+short_description: Manage AWS Step Functions state machines
+description:
+ - Create, update and delete state machines in AWS Step Functions.
+ - Calling the module in C(state=present) for an existing AWS Step Functions state machine
+ will attempt to update the state machine definition, IAM Role, or tags with the provided data.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_step_functions_state_machine).
+ The usage did not change.
+
+options:
+ name:
+ description:
+ - Name of the state machine.
+ required: true
+ type: str
+ definition:
+ description:
+ - The Amazon States Language definition of the state machine. See
+ U(https://docs.aws.amazon.com/step-functions/latest/dg/concepts-amazon-states-language.html) for more
+ information on the Amazon States Language.
+ - Required when I(state=present).
+ type: json
+ role_arn:
+ description:
+ - The ARN of the IAM Role that will be used by the state machine for its executions.
+ - Required when I(state=present).
+ type: str
+ state:
+ description:
+ - Desired state for the state machine.
+ default: present
+ choices: [ present, absent ]
+ type: str
+
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+ - amazon.aws.tags
+author:
+ - Tom De Keyser (@tdekeyser)
+'''
+
+EXAMPLES = '''
+# Create a new AWS Step Functions state machine
+- name: Setup HelloWorld state machine
+ community.aws.stepfunctions_state_machine:
+ name: "HelloWorldStateMachine"
+ definition: "{{ lookup('file','state_machine.json') }}"
+ role_arn: arn:aws:iam::123456789012:role/service-role/invokeLambdaStepFunctionsRole
+ tags:
+ project: helloWorld
+
+# Update an existing state machine
+- name: Change IAM Role and tags of HelloWorld state machine
+ community.aws.stepfunctions_state_machine:
+ name: HelloWorldStateMachine
+ definition: "{{ lookup('file','state_machine.json') }}"
+ role_arn: arn:aws:iam::123456789012:role/service-role/anotherStepFunctionsRole
+ tags:
+ otherTag: aDifferentTag
+
+# Remove the AWS Step Functions state machine
+- name: Delete HelloWorld state machine
+ community.aws.stepfunctions_state_machine:
+ name: HelloWorldStateMachine
+ state: absent
+'''
+
+RETURN = '''
+state_machine_arn:
+ description: ARN of the AWS Step Functions state machine
+ type: str
+ returned: always
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_tag_list,
+ AWSRetry,
+ compare_aws_tags,
+ boto3_tag_list_to_ansible_dict,
+ )
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def manage_state_machine(state, sfn_client, module):
+ state_machine_arn = get_state_machine_arn(sfn_client, module)
+
+ if state == 'present':
+ if state_machine_arn is None:
+ create(sfn_client, module)
+ else:
+ update(state_machine_arn, sfn_client, module)
+ elif state == 'absent':
+ if state_machine_arn is not None:
+ remove(state_machine_arn, sfn_client, module)
+
+ check_mode(module, msg='State is up-to-date.')
+ module.exit_json(changed=False, state_machine_arn=state_machine_arn)
+
+
+def create(sfn_client, module):
+ check_mode(module, msg='State machine would be created.', changed=True)
+
+ tags = module.params.get('tags')
+ sfn_tags = ansible_dict_to_boto3_tag_list(tags, tag_name_key_name='key', tag_value_key_name='value') if tags else []
+
+ state_machine = sfn_client.create_state_machine(
+ name=module.params.get('name'),
+ definition=module.params.get('definition'),
+ roleArn=module.params.get('role_arn'),
+ tags=sfn_tags
+ )
+ module.exit_json(changed=True, state_machine_arn=state_machine.get('stateMachineArn'))
+
+
+def remove(state_machine_arn, sfn_client, module):
+ check_mode(module, msg='State machine would be deleted: {0}'.format(state_machine_arn), changed=True)
+
+ sfn_client.delete_state_machine(stateMachineArn=state_machine_arn)
+ module.exit_json(changed=True, state_machine_arn=state_machine_arn)
+
+
+def update(state_machine_arn, sfn_client, module):
+ tags_to_add, tags_to_remove = compare_tags(state_machine_arn, sfn_client, module)
+
+ if params_changed(state_machine_arn, sfn_client, module) or tags_to_add or tags_to_remove:
+ check_mode(module, msg='State machine would be updated: {0}'.format(state_machine_arn), changed=True)
+
+ sfn_client.update_state_machine(
+ stateMachineArn=state_machine_arn,
+ definition=module.params.get('definition'),
+ roleArn=module.params.get('role_arn')
+ )
+ sfn_client.untag_resource(
+ resourceArn=state_machine_arn,
+ tagKeys=tags_to_remove
+ )
+ sfn_client.tag_resource(
+ resourceArn=state_machine_arn,
+ tags=ansible_dict_to_boto3_tag_list(tags_to_add, tag_name_key_name='key', tag_value_key_name='value')
+ )
+
+ module.exit_json(changed=True, state_machine_arn=state_machine_arn)
+
+
+def compare_tags(state_machine_arn, sfn_client, module):
+ new_tags = module.params.get('tags')
+ current_tags = sfn_client.list_tags_for_resource(resourceArn=state_machine_arn).get('tags')
+ return compare_aws_tags(boto3_tag_list_to_ansible_dict(current_tags), new_tags if new_tags else {}, module.params.get('purge_tags'))
+
+
+def params_changed(state_machine_arn, sfn_client, module):
+ """
+ Check whether the state machine definition or IAM Role ARN is different
+ from the existing state machine parameters.
+ """
+ current = sfn_client.describe_state_machine(stateMachineArn=state_machine_arn)
+ return current.get('definition') != module.params.get('definition') or current.get('roleArn') != module.params.get('role_arn')
+
+
+def get_state_machine_arn(sfn_client, module):
+ """
+ Finds the state machine ARN based on the name parameter. Returns None if
+ there is no state machine with this name.
+ """
+ target_name = module.params.get('name')
+ all_state_machines = sfn_client.list_state_machines(aws_retry=True).get('stateMachines')
+
+ for state_machine in all_state_machines:
+ if state_machine.get('name') == target_name:
+ return state_machine.get('stateMachineArn')
+
+
+def check_mode(module, msg='', changed=False):
+ if module.check_mode:
+ module.exit_json(changed=changed, output=msg)
+
+
+def main():
+ module_args = dict(
+ name=dict(type='str', required=True),
+ definition=dict(type='json'),
+ role_arn=dict(type='str'),
+ state=dict(choices=['present', 'absent'], default='present'),
+ tags=dict(default=None, type='dict', aliases=['resource_tags']),
+ purge_tags=dict(default=True, type='bool'),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=module_args,
+ required_if=[('state', 'present', ['role_arn']), ('state', 'present', ['definition'])],
+ supports_check_mode=True
+ )
+
+ sfn_client = module.client('stepfunctions', retry_decorator=AWSRetry.jittered_backoff(retries=5))
+ state = module.params.get('state')
+
+ try:
+ manage_state_machine(state, sfn_client, module)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to manage state machine')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine_execution.py b/ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine_execution.py
new file mode 100644
index 000000000..aacfa987f
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine_execution.py
@@ -0,0 +1,196 @@
+#!/usr/bin/python
+# Copyright (c) 2019, Prasad Katti (@prasadkatti)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: stepfunctions_state_machine_execution
+version_added: 1.0.0
+
+short_description: Start or stop execution of an AWS Step Functions state machine
+
+description:
+ - Start or stop execution of a state machine in AWS Step Functions.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_step_functions_state_machine_execution).
+ The usage did not change.
+
+options:
+ action:
+ description: Desired action (C(start) or C(stop)) for a state machine execution.
+ default: start
+ choices: [ start, stop ]
+ type: str
+ name:
+ description: Name of the execution.
+ type: str
+ execution_input:
+ description: The JSON input data for the execution.
+ type: json
+ default: {}
+ state_machine_arn:
+ description: The ARN of the state machine that will be executed.
+ type: str
+ execution_arn:
+ description: The ARN of the execution you wish to stop.
+ type: str
+ cause:
+ description: A detailed explanation of the cause for stopping the execution.
+ type: str
+ default: ''
+ error:
+ description: The error code of the failure to pass in when stopping the execution.
+ type: str
+ default: ''
+
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+
+author:
+ - Prasad Katti (@prasadkatti)
+'''
+
+EXAMPLES = '''
+- name: Start an execution of a state machine
+ community.aws.stepfunctions_state_machine_execution:
+ name: an_execution_name
+ execution_input: '{ "IsHelloWorldExample": true }'
+ state_machine_arn: "arn:aws:states:us-west-2:123456789012:stateMachine:HelloWorldStateMachine"
+
+- name: Stop an execution of a state machine
+ community.aws.stepfunctions_state_machine_execution:
+ action: stop
+ execution_arn: "arn:aws:states:us-west-2:123456789012:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8"
+ cause: "cause of task failure"
+ error: "error code of the failure"
+'''
+
+RETURN = '''
+execution_arn:
+ description: ARN of the AWS Step Functions state machine execution.
+ type: str
+ returned: if action == start and changed == True
+ sample: "arn:aws:states:us-west-2:123456789012:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8"
+start_date:
+ description: The date the execution is started.
+ type: str
+ returned: if action == start and changed == True
+ sample: "2019-11-02T22:39:49.071000-07:00"
+stop_date:
+ description: The date the execution is stopped.
+ type: str
+ returned: if action == stop
+ sample: "2019-11-02T22:39:49.071000-07:00"
+'''
+
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+
+
+def start_execution(module, sfn_client):
+ '''
+ start_execution uses execution name to determine if a previous execution already exists.
+ If an execution by the provided name exists, call client.start_execution will not be called.
+ '''
+
+ state_machine_arn = module.params.get('state_machine_arn')
+ name = module.params.get('name')
+ execution_input = module.params.get('execution_input')
+
+ try:
+ # list_executions is eventually consistent
+ page_iterators = sfn_client.get_paginator('list_executions').paginate(stateMachineArn=state_machine_arn)
+
+ for execution in page_iterators.build_full_result()['executions']:
+ if name == execution['name']:
+ check_mode(module, msg='State machine execution already exists.', changed=False)
+ module.exit_json(changed=False)
+
+ check_mode(module, msg='State machine execution would be started.', changed=True)
+ res_execution = sfn_client.start_execution(
+ stateMachineArn=state_machine_arn,
+ name=name,
+ input=execution_input
+ )
+ except is_boto3_error_code('ExecutionAlreadyExists'):
+ # this will never be executed anymore
+ module.exit_json(changed=False)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to start execution.")
+
+ module.exit_json(changed=True, **camel_dict_to_snake_dict(res_execution))
+
+
+def stop_execution(module, sfn_client):
+
+ cause = module.params.get('cause')
+ error = module.params.get('error')
+ execution_arn = module.params.get('execution_arn')
+
+ try:
+ # describe_execution is eventually consistent
+ execution_status = sfn_client.describe_execution(executionArn=execution_arn)['status']
+ if execution_status != 'RUNNING':
+ check_mode(module, msg='State machine execution is not running.', changed=False)
+ module.exit_json(changed=False)
+
+ check_mode(module, msg='State machine execution would be stopped.', changed=True)
+ res = sfn_client.stop_execution(
+ executionArn=execution_arn,
+ cause=cause,
+ error=error
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to stop execution.")
+
+ module.exit_json(changed=True, **camel_dict_to_snake_dict(res))
+
+
+def check_mode(module, msg='', changed=False):
+ if module.check_mode:
+ module.exit_json(changed=changed, output=msg)
+
+
+def main():
+ module_args = dict(
+ action=dict(choices=['start', 'stop'], default='start'),
+ name=dict(type='str'),
+ execution_input=dict(type='json', default={}),
+ state_machine_arn=dict(type='str'),
+ cause=dict(type='str', default=''),
+ error=dict(type='str', default=''),
+ execution_arn=dict(type='str')
+ )
+ module = AnsibleAWSModule(
+ argument_spec=module_args,
+ required_if=[('action', 'start', ['name', 'state_machine_arn']),
+ ('action', 'stop', ['execution_arn']),
+ ],
+ supports_check_mode=True
+ )
+
+ sfn_client = module.client('stepfunctions')
+
+ action = module.params.get('action')
+ if action == "start":
+ start_execution(module, sfn_client)
+ else:
+ stop_execution(module, sfn_client)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/storagegateway_info.py b/ansible_collections/community/aws/plugins/modules/storagegateway_info.py
new file mode 100644
index 000000000..3f3c3ae2f
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/storagegateway_info.py
@@ -0,0 +1,362 @@
+#!/usr/bin/python
+# Copyright: (c) 2018, Loic BLOT (@nerzhul) <loic.blot@unix-experience.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# This module is sponsored by E.T.A.I. (www.etai.fr)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: storagegateway_info
+version_added: 1.0.0
+short_description: Fetch AWS Storage Gateway information
+description:
+ - Fetch AWS Storage Gateway information
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_sgw_info).
+ The usage did not change.
+author:
+ - Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
+options:
+ gather_local_disks:
+ description:
+ - Gather local disks attached to the storage gateway.
+ type: bool
+ required: false
+ default: true
+ gather_tapes:
+ description:
+ - Gather tape information for storage gateways in tape mode.
+ type: bool
+ required: false
+ default: true
+ gather_file_shares:
+ description:
+ - Gather file share information for storage gateways in s3 mode.
+ type: bool
+ required: false
+ default: true
+ gather_volumes:
+ description:
+ - Gather volume information for storage gateways in iSCSI (cached & stored) modes.
+ type: bool
+ required: false
+ default: true
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+RETURN = '''
+gateways:
+ description: list of gateway objects
+ returned: always
+ type: complex
+ contains:
+ gateway_arn:
+ description: "Storage Gateway ARN"
+ returned: always
+ type: str
+ sample: "arn:aws:storagegateway:eu-west-1:123456789012:gateway/sgw-9999F888"
+ gateway_id:
+ description: "Storage Gateway ID"
+ returned: always
+ type: str
+ sample: "sgw-9999F888"
+ gateway_name:
+ description: "Storage Gateway friendly name"
+ returned: always
+ type: str
+ sample: "my-sgw-01"
+ gateway_operational_state:
+ description: "Storage Gateway operational state"
+ returned: always
+ type: str
+ sample: "ACTIVE"
+ gateway_type:
+ description: "Storage Gateway type"
+ returned: always
+ type: str
+ sample: "FILE_S3"
+ file_shares:
+ description: "Storage gateway file shares"
+ returned: when gateway_type == "FILE_S3"
+ type: complex
+ contains:
+ file_share_arn:
+ description: "File share ARN"
+ returned: always
+ type: str
+ sample: "arn:aws:storagegateway:eu-west-1:123456789012:share/share-AF999C88"
+ file_share_id:
+ description: "File share ID"
+ returned: always
+ type: str
+ sample: "share-AF999C88"
+ file_share_status:
+ description: "File share status"
+ returned: always
+ type: str
+ sample: "AVAILABLE"
+ tapes:
+ description: "Storage Gateway tapes"
+ returned: when gateway_type == "VTL"
+ type: complex
+ contains:
+ tape_arn:
+ description: "Tape ARN"
+ returned: always
+ type: str
+ sample: "arn:aws:storagegateway:eu-west-1:123456789012:tape/tape-AF999C88"
+ tape_barcode:
+ description: "Tape ARN"
+ returned: always
+ type: str
+ sample: "tape-AF999C88"
+ tape_size_in_bytes:
+ description: "Tape ARN"
+ returned: always
+ type: int
+ sample: 555887569
+ tape_status:
+ description: "Tape ARN"
+ returned: always
+ type: str
+ sample: "AVAILABLE"
+ local_disks:
+ description: "Storage gateway local disks"
+ returned: always
+ type: complex
+ contains:
+ disk_allocation_type:
+ description: "Disk allocation type"
+ returned: always
+ type: str
+ sample: "CACHE STORAGE"
+ disk_id:
+ description: "Disk ID on the system"
+ returned: always
+ type: str
+ sample: "pci-0000:00:1f.0"
+ disk_node:
+ description: "Disk parent block device"
+ returned: always
+ type: str
+ sample: "/dev/sdb"
+ disk_path:
+ description: "Disk path used for the cache"
+ returned: always
+ type: str
+ sample: "/dev/nvme1n1"
+ disk_size_in_bytes:
+ description: "Disk size in bytes"
+ returned: always
+ type: int
+ sample: 107374182400
+ disk_status:
+ description: "Disk status"
+ returned: always
+ type: str
+ sample: "present"
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: "Get AWS storage gateway information"
+ community.aws.aws_sgw_info:
+
+- name: "Get AWS storage gateway information for region eu-west-3"
+ community.aws.aws_sgw_info:
+ region: eu-west-3
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+class SGWInformationManager(object):
+ def __init__(self, client, module):
+ self.client = client
+ self.module = module
+ self.name = self.module.params.get('name')
+
+ def fetch(self):
+ gateways = self.list_gateways()
+ for gateway in gateways:
+ if self.module.params.get('gather_local_disks'):
+ self.list_local_disks(gateway)
+ # File share gateway
+ if gateway["gateway_type"] == "FILE_S3" and self.module.params.get('gather_file_shares'):
+ self.list_gateway_file_shares(gateway)
+ # Volume tape gateway
+ elif gateway["gateway_type"] == "VTL" and self.module.params.get('gather_tapes'):
+ self.list_gateway_vtl(gateway)
+ # iSCSI gateway
+ elif gateway["gateway_type"] in ["CACHED", "STORED"] and self.module.params.get('gather_volumes'):
+ self.list_gateway_volumes(gateway)
+
+ self.module.exit_json(gateways=gateways)
+
+ """
+ List all storage gateways for the AWS endpoint.
+ """
+ def list_gateways(self):
+ try:
+ paginator = self.client.get_paginator('list_gateways')
+ response = paginator.paginate(
+ PaginationConfig={
+ 'PageSize': 100,
+ }
+ ).build_full_result()
+
+ gateways = []
+ for gw in response["Gateways"]:
+ gateways.append(camel_dict_to_snake_dict(gw))
+
+ return gateways
+
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't list storage gateways")
+
+ """
+ Read file share objects from AWS API response.
+ Drop the gateway_arn attribute from response, as it will be duplicate with parent object.
+ """
+ @staticmethod
+ def _read_gateway_fileshare_response(fileshares, aws_reponse):
+ for share in aws_reponse["FileShareInfoList"]:
+ share_obj = camel_dict_to_snake_dict(share)
+ if "gateway_arn" in share_obj:
+ del share_obj["gateway_arn"]
+ fileshares.append(share_obj)
+
+ return aws_reponse["NextMarker"] if "NextMarker" in aws_reponse else None
+
+ """
+ List file shares attached to AWS storage gateway when in S3 mode.
+ """
+ def list_gateway_file_shares(self, gateway):
+ try:
+ response = self.client.list_file_shares(
+ GatewayARN=gateway["gateway_arn"],
+ Limit=100
+ )
+
+ gateway["file_shares"] = []
+ marker = self._read_gateway_fileshare_response(gateway["file_shares"], response)
+
+ while marker is not None:
+ response = self.client.list_file_shares(
+ GatewayARN=gateway["gateway_arn"],
+ Marker=marker,
+ Limit=100
+ )
+
+ marker = self._read_gateway_fileshare_response(gateway["file_shares"], response)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't list gateway file shares")
+
+ """
+ List storage gateway local disks
+ """
+ def list_local_disks(self, gateway):
+ try:
+ gateway['local_disks'] = [camel_dict_to_snake_dict(disk) for disk in
+ self.client.list_local_disks(GatewayARN=gateway["gateway_arn"])['Disks']]
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't list storage gateway local disks")
+
+ """
+ Read tape objects from AWS API response.
+ Drop the gateway_arn attribute from response, as it will be duplicate with parent object.
+ """
+ @staticmethod
+ def _read_gateway_tape_response(tapes, aws_response):
+ for tape in aws_response["TapeInfos"]:
+ tape_obj = camel_dict_to_snake_dict(tape)
+ if "gateway_arn" in tape_obj:
+ del tape_obj["gateway_arn"]
+ tapes.append(tape_obj)
+
+ return aws_response["Marker"] if "Marker" in aws_response else None
+
+ """
+ List VTL & VTS attached to AWS storage gateway in VTL mode
+ """
+ def list_gateway_vtl(self, gateway):
+ try:
+ response = self.client.list_tapes(
+ Limit=100
+ )
+
+ gateway["tapes"] = []
+ marker = self._read_gateway_tape_response(gateway["tapes"], response)
+
+ while marker is not None:
+ response = self.client.list_tapes(
+ Marker=marker,
+ Limit=100
+ )
+
+ marker = self._read_gateway_tape_response(gateway["tapes"], response)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't list storage gateway tapes")
+
+ """
+ List volumes attached to AWS storage gateway in CACHED or STORAGE mode
+ """
+ def list_gateway_volumes(self, gateway):
+ try:
+ paginator = self.client.get_paginator('list_volumes')
+ response = paginator.paginate(
+ GatewayARN=gateway["gateway_arn"],
+ PaginationConfig={
+ 'PageSize': 100,
+ }
+ ).build_full_result()
+
+ gateway["volumes"] = []
+ for volume in response["VolumeInfos"]:
+ volume_obj = camel_dict_to_snake_dict(volume)
+ if "gateway_arn" in volume_obj:
+ del volume_obj["gateway_arn"]
+ if "gateway_id" in volume_obj:
+ del volume_obj["gateway_id"]
+
+ gateway["volumes"].append(volume_obj)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e, msg="Couldn't list storage gateway volumes")
+
+
+def main():
+ argument_spec = dict(
+ gather_local_disks=dict(type='bool', default=True),
+ gather_tapes=dict(type='bool', default=True),
+ gather_file_shares=dict(type='bool', default=True),
+ gather_volumes=dict(type='bool', default=True)
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ client = module.client('storagegateway')
+
+ if client is None: # this should never happen
+ module.fail_json(msg='Unknown error, failed to create storagegateway client, no information available.')
+
+ SGWInformationManager(client, module).fetch()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/sts_assume_role.py b/ansible_collections/community/aws/plugins/modules/sts_assume_role.py
new file mode 100644
index 000000000..8e5a3b4fe
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/sts_assume_role.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sts_assume_role
+version_added: 1.0.0
+short_description: Assume a role using AWS Security Token Service and obtain temporary credentials
+description:
+ - Assume a role using AWS Security Token Service and obtain temporary credentials.
+author:
+ - Boris Ekelchik (@bekelchik)
+ - Marek Piatek (@piontas)
+options:
+ role_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the role that the caller is
+ assuming U(https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs).
+ required: true
+ type: str
+ role_session_name:
+ description:
+ - Name of the role's session - will be used by CloudTrail.
+ required: true
+ type: str
+ policy:
+ description:
+ - Supplemental policy to use in addition to assumed role's policies.
+ type: str
+ duration_seconds:
+ description:
+ - The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 43200 seconds (12 hours).
+ - The max depends on the IAM role's sessions duration setting.
+ - By default, the value is set to 3600 seconds.
+ type: int
+ external_id:
+ description:
+ - A unique identifier that is used by third parties to assume a role in their customers' accounts.
+ type: str
+ mfa_serial_number:
+ description:
+ - The identification number of the MFA device that is associated with the user who is making the AssumeRole call.
+ type: str
+ mfa_token:
+ description:
+ - The value provided by the MFA device, if the trust policy of the role being assumed requires MFA.
+ type: str
+notes:
+ - In order to use the assumed role in a following playbook task you must pass the access_key, access_secret and access_token.
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+RETURN = '''
+sts_creds:
+ description: The temporary security credentials, which include an access key ID, a secret access key, and a security (or session) token
+ returned: always
+ type: dict
+ sample:
+ access_key: XXXXXXXXXXXXXXXXXXXX
+ expiration: '2017-11-11T11:11:11+00:00'
+ secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+sts_user:
+ description: The Amazon Resource Name (ARN) and the assumed role ID
+ returned: always
+ type: dict
+ sample:
+ assumed_role_id: arn:aws:sts::123456789012:assumed-role/demo/Bob
+ arn: ARO123EXAMPLE123:Bob
+changed:
+ description: True if obtaining the credentials succeeds
+ type: bool
+ returned: always
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Assume an existing role (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html)
+- community.aws.sts_assume_role:
+ role_arn: "arn:aws:iam::123456789012:role/someRole"
+ role_session_name: "someRoleSession"
+ register: assumed_role
+
+# Use the assumed role above to tag an instance in account 123456789012
+- amazon.aws.ec2_tag:
+ aws_access_key: "{{ assumed_role.sts_creds.access_key }}"
+ aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}"
+ security_token: "{{ assumed_role.sts_creds.session_token }}"
+ resource: i-xyzxyz01
+ state: present
+ tags:
+ MyNewTag: value
+
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import ClientError, ParamValidationError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def _parse_response(response):
+ credentials = response.get('Credentials', {})
+ user = response.get('AssumedRoleUser', {})
+
+ sts_cred = {
+ 'access_key': credentials.get('AccessKeyId'),
+ 'secret_key': credentials.get('SecretAccessKey'),
+ 'session_token': credentials.get('SessionToken'),
+ 'expiration': credentials.get('Expiration')
+
+ }
+ sts_user = camel_dict_to_snake_dict(user)
+ return sts_cred, sts_user
+
+
+def assume_role_policy(connection, module):
+ params = {
+ 'RoleArn': module.params.get('role_arn'),
+ 'RoleSessionName': module.params.get('role_session_name'),
+ 'Policy': module.params.get('policy'),
+ 'DurationSeconds': module.params.get('duration_seconds'),
+ 'ExternalId': module.params.get('external_id'),
+ 'SerialNumber': module.params.get('mfa_serial_number'),
+ 'TokenCode': module.params.get('mfa_token')
+ }
+ changed = False
+
+ kwargs = dict((k, v) for k, v in params.items() if v is not None)
+
+ try:
+ response = connection.assume_role(**kwargs)
+ changed = True
+ except (ClientError, ParamValidationError) as e:
+ module.fail_json_aws(e)
+
+ sts_cred, sts_user = _parse_response(response)
+ module.exit_json(changed=changed, sts_creds=sts_cred, sts_user=sts_user)
+
+
+def main():
+ argument_spec = dict(
+ role_arn=dict(required=True),
+ role_session_name=dict(required=True),
+ duration_seconds=dict(required=False, default=None, type='int'),
+ external_id=dict(required=False, default=None),
+ policy=dict(required=False, default=None),
+ mfa_serial_number=dict(required=False, default=None),
+ mfa_token=dict(required=False, default=None, no_log=True)
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ connection = module.client('sts')
+
+ assume_role_policy(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/sts_session_token.py b/ansible_collections/community/aws/plugins/modules/sts_session_token.py
new file mode 100644
index 000000000..03df560e9
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/sts_session_token.py
@@ -0,0 +1,142 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sts_session_token
+version_added: 1.0.0
+short_description: Obtain a session token from the AWS Security Token Service
+description:
+ - Obtain a session token from the AWS Security Token Service.
+author: Victor Costan (@pwnall)
+options:
+ duration_seconds:
+ description:
+ - The duration, in seconds, of the session token.
+ See U(https://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html#API_GetSessionToken_RequestParameters)
+ for acceptable and default values.
+ type: int
+ mfa_serial_number:
+ description:
+ - The identification number of the MFA device that is associated with the user who is making the GetSessionToken call.
+ type: str
+ mfa_token:
+ description:
+ - The value provided by the MFA device, if the trust policy of the user requires MFA.
+ type: str
+notes:
+ - In order to use the session token in a following playbook task you must pass the I(access_key), I(access_secret) and I(access_token).
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+RETURN = """
+sts_creds:
+ description: The Credentials object returned by the AWS Security Token Service
+ returned: always
+ type: list
+ sample:
+ access_key: ASXXXXXXXXXXXXXXXXXX
+ expiration: "2016-04-08T11:59:47+00:00"
+ secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+changed:
+ description: True if obtaining the credentials succeeds
+ type: bool
+ returned: always
+"""
+
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html)
+- name: Get a session token
+ community.aws.sts_session_token:
+ duration_seconds: 3600
+ register: session_credentials
+
+- name: Use the session token obtained above to tag an instance in account 123456789012
+ amazon.aws.ec2_tag:
+ aws_access_key: "{{ session_credentials.sts_creds.access_key }}"
+ aws_secret_key: "{{ session_credentials.sts_creds.secret_key }}"
+ security_token: "{{ session_credentials.sts_creds.session_token }}"
+ resource: i-xyzxyz01
+ state: present
+ tags:
+ MyNewTag: value
+
+'''
+
+try:
+ import botocore
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+def normalize_credentials(credentials):
+ access_key = credentials.get('AccessKeyId', None)
+ secret_key = credentials.get('SecretAccessKey', None)
+ session_token = credentials.get('SessionToken', None)
+ expiration = credentials.get('Expiration', None)
+ return {
+ 'access_key': access_key,
+ 'secret_key': secret_key,
+ 'session_token': session_token,
+ 'expiration': expiration
+ }
+
+
+def get_session_token(connection, module):
+ duration_seconds = module.params.get('duration_seconds')
+ mfa_serial_number = module.params.get('mfa_serial_number')
+ mfa_token = module.params.get('mfa_token')
+ changed = False
+
+ args = {}
+ if duration_seconds is not None:
+ args['DurationSeconds'] = duration_seconds
+ if mfa_serial_number is not None:
+ args['SerialNumber'] = mfa_serial_number
+ if mfa_token is not None:
+ args['TokenCode'] = mfa_token
+
+ try:
+ response = connection.get_session_token(**args)
+ changed = True
+ except ClientError as e:
+ module.fail_json(msg=e)
+
+ credentials = normalize_credentials(response.get('Credentials', {}))
+ module.exit_json(changed=changed, sts_creds=credentials)
+
+
+def main():
+ argument_spec = dict(
+ duration_seconds=dict(required=False, default=None, type='int'),
+ mfa_serial_number=dict(required=False, default=None),
+ mfa_token=dict(required=False, default=None, no_log=True),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ try:
+ connection = module.client('sts')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ get_session_token(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/waf_condition.py b/ansible_collections/community/aws/plugins/modules/waf_condition.py
new file mode 100644
index 000000000..63585d50c
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/waf_condition.py
@@ -0,0 +1,742 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Will Thames
+# Copyright (c) 2015 Mike Mochan
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: waf_condition
+short_description: Create and delete WAF Conditions
+version_added: 1.0.0
+description:
+ - Read the AWS documentation for WAF
+ U(https://aws.amazon.com/documentation/waf/)
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_waf_condition).
+ The usage did not change.
+
+author:
+ - Will Thames (@willthames)
+ - Mike Mochan (@mmochan)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+
+options:
+ name:
+ description: Name of the Web Application Firewall condition to manage.
+ required: true
+ type: str
+ type:
+ description: The type of matching to perform.
+ choices:
+ - byte
+ - geo
+ - ip
+ - regex
+ - size
+ - sql
+ - xss
+ type: str
+ required: true
+ filters:
+ description:
+ - A list of the filters against which to match.
+ - For I(type=byte), valid keys are I(field_to_match), I(position), I(header), I(transformation) and I(target_string).
+ - For I(type=geo), the only valid key is I(country).
+ - For I(type=ip), the only valid key is I(ip_address).
+ - For I(type=regex), valid keys are I(field_to_match), I(transformation) and I(regex_pattern).
+ - For I(type=size), valid keys are I(field_to_match), I(transformation), I(comparison) and I(size).
+ - For I(type=sql), valid keys are I(field_to_match) and I(transformation).
+ - For I(type=xss), valid keys are I(field_to_match) and I(transformation).
+ - Required when I(state=present).
+ type: list
+ elements: dict
+ suboptions:
+ field_to_match:
+ description:
+ - The field upon which to perform the match.
+ - Valid when I(type=byte), I(type=regex), I(type=sql) or I(type=xss).
+ type: str
+ choices: ['uri', 'query_string', 'header', 'method', 'body']
+ position:
+ description:
+ - Where in the field the match needs to occur.
+ - Only valid when I(type=byte).
+ type: str
+ choices: ['exactly', 'starts_with', 'ends_with', 'contains', 'contains_word']
+ header:
+ description:
+ - Which specific header should be matched.
+ - Required when I(field_to_match=header).
+ - Valid when I(type=byte).
+ type: str
+ transformation:
+ description:
+ - A transform to apply on the field prior to performing the match.
+ - Valid when I(type=byte), I(type=regex), I(type=sql) or I(type=xss).
+ type: str
+ choices: ['none', 'compress_white_space', 'html_entity_decode', 'lowercase', 'cmd_line', 'url_decode']
+ country:
+ description:
+ - Value of geo constraint (typically a two letter country code).
+ - The only valid key when I(type=geo).
+ type: str
+ ip_address:
+ description:
+ - An IP Address or CIDR to match.
+ - The only valid key when I(type=ip).
+ type: str
+ regex_pattern:
+ description:
+ - A dict describing the regular expressions used to perform the match.
+ - Only valid when I(type=regex).
+ type: dict
+ suboptions:
+ name:
+ description: A name to describe the set of patterns.
+ type: str
+ regex_strings:
+ description: A list of regular expressions to match.
+ type: list
+ elements: str
+ comparison:
+ description:
+ - What type of comparison to perform.
+ - Only valid key when I(type=size).
+ type: str
+ choices: ['EQ', 'NE', 'LE', 'LT', 'GE', 'GT']
+ size:
+ description:
+ - The size of the field (in bytes).
+ - Only valid key when I(type=size).
+ type: int
+ target_string:
+ description:
+ - The string to search for.
+ - May be up to 50 bytes.
+ - Valid when I(type=byte).
+ type: str
+ purge_filters:
+ description:
+ - Whether to remove existing filters from a condition if not passed in I(filters).
+ default: false
+ type: bool
+ waf_regional:
+ description: Whether to use C(waf-regional) module.
+ default: false
+ required: false
+ type: bool
+ state:
+ description: Whether the condition should be C(present) or C(absent).
+ choices:
+ - present
+ - absent
+ default: present
+ type: str
+'''
+
+EXAMPLES = r'''
+ - name: create WAF byte condition
+ community.aws.waf_condition:
+ name: my_byte_condition
+ filters:
+ - field_to_match: header
+ position: STARTS_WITH
+ target_string: Hello
+ header: Content-type
+ type: byte
+
+ - name: create WAF geo condition
+ community.aws.waf_condition:
+ name: my_geo_condition
+ filters:
+ - country: US
+ - country: AU
+ - country: AT
+ type: geo
+
+ - name: create IP address condition
+ community.aws.waf_condition:
+ name: "{{ resource_prefix }}_ip_condition"
+ filters:
+ - ip_address: "10.0.0.0/8"
+ - ip_address: "192.168.0.0/24"
+ type: ip
+
+ - name: create WAF regex condition
+ community.aws.waf_condition:
+ name: my_regex_condition
+ filters:
+ - field_to_match: query_string
+ regex_pattern:
+ name: greetings
+ regex_strings:
+ - '[hH]ello'
+ - '^Hi there'
+ - '.*Good Day to You'
+ type: regex
+
+ - name: create WAF size condition
+ community.aws.waf_condition:
+ name: my_size_condition
+ filters:
+ - field_to_match: query_string
+ size: 300
+ comparison: GT
+ type: size
+
+ - name: create WAF sql injection condition
+ community.aws.waf_condition:
+ name: my_sql_condition
+ filters:
+ - field_to_match: query_string
+ transformation: url_decode
+ type: sql
+
+ - name: create WAF xss condition
+ community.aws.waf_condition:
+ name: my_xss_condition
+ filters:
+ - field_to_match: query_string
+ transformation: url_decode
+ type: xss
+
+'''
+
+RETURN = r'''
+condition:
+ description: Condition returned by operation.
+ returned: always
+ type: complex
+ contains:
+ condition_id:
+ description: Type-agnostic ID for the condition.
+ returned: when state is present
+ type: str
+ sample: dd74b1ff-8c06-4a4f-897a-6b23605de413
+ byte_match_set_id:
+ description: ID for byte match set.
+ returned: always
+ type: str
+ sample: c4882c96-837b-44a2-a762-4ea87dbf812b
+ byte_match_tuples:
+ description: List of byte match tuples.
+ returned: always
+ type: complex
+ contains:
+ field_to_match:
+ description: Field to match.
+ returned: always
+ type: complex
+ contains:
+ data:
+ description: Which specific header (if type is header).
+ type: str
+ sample: content-type
+ type:
+ description: Type of field
+ type: str
+ sample: HEADER
+ positional_constraint:
+ description: Position in the field to match.
+ type: str
+ sample: STARTS_WITH
+ target_string:
+ description: String to look for.
+ type: str
+ sample: Hello
+ text_transformation:
+ description: Transformation to apply to the field before matching.
+ type: str
+ sample: NONE
+ geo_match_constraints:
+ description: List of geographical constraints.
+ returned: when type is geo and state is present
+ type: complex
+ contains:
+ type:
+ description: Type of geo constraint.
+ type: str
+ sample: Country
+ value:
+ description: Value of geo constraint (typically a country code).
+ type: str
+ sample: AT
+ geo_match_set_id:
+ description: ID of the geo match set.
+ returned: when type is geo and state is present
+ type: str
+ sample: dd74b1ff-8c06-4a4f-897a-6b23605de413
+ ip_set_descriptors:
+ description: list of IP address filters
+ returned: when type is ip and state is present
+ type: complex
+ contains:
+ type:
+ description: Type of IP address (IPV4 or IPV6).
+ returned: always
+ type: str
+ sample: IPV4
+ value:
+ description: IP address.
+ returned: always
+ type: str
+ sample: 10.0.0.0/8
+ ip_set_id:
+ description: ID of condition.
+ returned: when type is ip and state is present
+ type: str
+ sample: 78ad334a-3535-4036-85e6-8e11e745217b
+ name:
+ description: Name of condition.
+ returned: when state is present
+ type: str
+ sample: my_waf_condition
+ regex_match_set_id:
+ description: ID of the regex match set.
+ returned: when type is regex and state is present
+ type: str
+ sample: 5ea3f6a8-3cd3-488b-b637-17b79ce7089c
+ regex_match_tuples:
+ description: List of regex matches.
+ returned: when type is regex and state is present
+ type: complex
+ contains:
+ field_to_match:
+ description: Field on which the regex match is applied.
+ type: complex
+ contains:
+ type:
+ description: The field name.
+ returned: when type is regex and state is present
+ type: str
+ sample: QUERY_STRING
+ regex_pattern_set_id:
+ description: ID of the regex pattern.
+ type: str
+ sample: 6fdf7f2d-9091-445c-aef2-98f3c051ac9e
+ text_transformation:
+ description: transformation applied to the text before matching
+ type: str
+ sample: NONE
+ size_constraint_set_id:
+ description: ID of the size constraint set.
+ returned: when type is size and state is present
+ type: str
+ sample: de84b4b3-578b-447e-a9a0-0db35c995656
+ size_constraints:
+ description: List of size constraints to apply.
+ returned: when type is size and state is present
+ type: complex
+ contains:
+ comparison_operator:
+ description: Comparison operator to apply.
+ type: str
+ sample: GT
+ field_to_match:
+ description: Field on which the size constraint is applied.
+ type: complex
+ contains:
+ type:
+ description: Field name.
+ type: str
+ sample: QUERY_STRING
+ size:
+ description: Size to compare against the field.
+ type: int
+ sample: 300
+ text_transformation:
+ description: Transformation applied to the text before matching.
+ type: str
+ sample: NONE
+ sql_injection_match_set_id:
+ description: ID of the SQL injection match set.
+ returned: when type is sql and state is present
+ type: str
+ sample: de84b4b3-578b-447e-a9a0-0db35c995656
+ sql_injection_match_tuples:
+ description: List of SQL injection match sets.
+ returned: when type is sql and state is present
+ type: complex
+ contains:
+ field_to_match:
+ description: Field on which the SQL injection match is applied.
+ type: complex
+ contains:
+ type:
+ description: Field name.
+ type: str
+ sample: QUERY_STRING
+ text_transformation:
+ description: Transformation applied to the text before matching.
+ type: str
+ sample: URL_DECODE
+ xss_match_set_id:
+ description: ID of the XSS match set.
+ returned: when type is xss and state is present
+ type: str
+ sample: de84b4b3-578b-447e-a9a0-0db35c995656
+ xss_match_tuples:
+ description: List of XSS match sets.
+ returned: when type is xss and state is present
+ type: complex
+ contains:
+ field_to_match:
+ description: Field on which the XSS match is applied.
+ type: complex
+ contains:
+ type:
+ description: Field name
+ type: str
+ sample: QUERY_STRING
+ text_transformation:
+ description: transformation applied to the text before matching.
+ type: str
+ sample: URL_DECODE
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies
+from ansible_collections.amazon.aws.plugins.module_utils.waf import MATCH_LOOKUP
+from ansible_collections.amazon.aws.plugins.module_utils.waf import run_func_with_change_token_backoff
+from ansible_collections.amazon.aws.plugins.module_utils.waf import get_rule_with_backoff
+from ansible_collections.amazon.aws.plugins.module_utils.waf import list_regional_rules_with_backoff
+from ansible_collections.amazon.aws.plugins.module_utils.waf import list_rules_with_backoff
+
+
+class Condition(object):
+
+ def __init__(self, client, module):
+ self.client = client
+ self.module = module
+ self.type = module.params['type']
+ self.method_suffix = MATCH_LOOKUP[self.type]['method']
+ self.conditionset = MATCH_LOOKUP[self.type]['conditionset']
+ self.conditionsets = MATCH_LOOKUP[self.type]['conditionset'] + 's'
+ self.conditionsetid = MATCH_LOOKUP[self.type]['conditionset'] + 'Id'
+ self.conditiontuple = MATCH_LOOKUP[self.type]['conditiontuple']
+ self.conditiontuples = MATCH_LOOKUP[self.type]['conditiontuple'] + 's'
+ self.conditiontype = MATCH_LOOKUP[self.type]['type']
+
+ def format_for_update(self, condition_set_id):
+ # Prep kwargs
+ kwargs = dict()
+ kwargs['Updates'] = list()
+
+ for filtr in self.module.params.get('filters'):
+ # Only for ip_set
+ if self.type == 'ip':
+ # there might be a better way of detecting an IPv6 address
+ if ':' in filtr.get('ip_address'):
+ ip_type = 'IPV6'
+ else:
+ ip_type = 'IPV4'
+ condition_insert = {'Type': ip_type, 'Value': filtr.get('ip_address')}
+
+ # Specific for geo_match_set
+ if self.type == 'geo':
+ condition_insert = dict(Type='Country', Value=filtr.get('country'))
+
+ # Common For everything but ip_set and geo_match_set
+ if self.type not in ('ip', 'geo'):
+
+ condition_insert = dict(FieldToMatch=dict(Type=filtr.get('field_to_match').upper()),
+ TextTransformation=filtr.get('transformation', 'none').upper())
+
+ if filtr.get('field_to_match').upper() == "HEADER":
+ if filtr.get('header'):
+ condition_insert['FieldToMatch']['Data'] = filtr.get('header').lower()
+ else:
+ self.module.fail_json(msg=str("DATA required when HEADER requested"))
+
+ # Specific for byte_match_set
+ if self.type == 'byte':
+ condition_insert['TargetString'] = filtr.get('target_string')
+ condition_insert['PositionalConstraint'] = filtr.get('position')
+
+ # Specific for size_constraint_set
+ if self.type == 'size':
+ condition_insert['ComparisonOperator'] = filtr.get('comparison')
+ condition_insert['Size'] = filtr.get('size')
+
+ # Specific for regex_match_set
+ if self.type == 'regex':
+ condition_insert['RegexPatternSetId'] = self.ensure_regex_pattern_present(filtr.get('regex_pattern'))['RegexPatternSetId']
+
+ kwargs['Updates'].append({'Action': 'INSERT', self.conditiontuple: condition_insert})
+
+ kwargs[self.conditionsetid] = condition_set_id
+ return kwargs
+
+ def format_for_deletion(self, condition):
+ return {'Updates': [{'Action': 'DELETE', self.conditiontuple: current_condition_tuple}
+ for current_condition_tuple in condition[self.conditiontuples]],
+ self.conditionsetid: condition[self.conditionsetid]}
+
+ @AWSRetry.exponential_backoff()
+ def list_regex_patterns_with_backoff(self, **params):
+ return self.client.list_regex_pattern_sets(**params)
+
+ @AWSRetry.exponential_backoff()
+ def get_regex_pattern_set_with_backoff(self, regex_pattern_set_id):
+ return self.client.get_regex_pattern_set(RegexPatternSetId=regex_pattern_set_id)
+
+ def list_regex_patterns(self):
+ # at time of writing(2017-11-20) no regex pattern paginator exists
+ regex_patterns = []
+ params = {}
+ while True:
+ try:
+ response = self.list_regex_patterns_with_backoff(**params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Could not list regex patterns')
+ regex_patterns.extend(response['RegexPatternSets'])
+ if 'NextMarker' in response:
+ params['NextMarker'] = response['NextMarker']
+ else:
+ break
+ return regex_patterns
+
+ def get_regex_pattern_by_name(self, name):
+ existing_regex_patterns = self.list_regex_patterns()
+ regex_lookup = dict((item['Name'], item['RegexPatternSetId']) for item in existing_regex_patterns)
+ if name in regex_lookup:
+ return self.get_regex_pattern_set_with_backoff(regex_lookup[name])['RegexPatternSet']
+ else:
+ return None
+
+ def ensure_regex_pattern_present(self, regex_pattern):
+ name = regex_pattern['name']
+
+ pattern_set = self.get_regex_pattern_by_name(name)
+ if not pattern_set:
+ pattern_set = run_func_with_change_token_backoff(self.client, self.module, {'Name': name},
+ self.client.create_regex_pattern_set)['RegexPatternSet']
+ missing = set(regex_pattern['regex_strings']) - set(pattern_set['RegexPatternStrings'])
+ extra = set(pattern_set['RegexPatternStrings']) - set(regex_pattern['regex_strings'])
+ if not missing and not extra:
+ return pattern_set
+ updates = [{'Action': 'INSERT', 'RegexPatternString': pattern} for pattern in missing]
+ updates.extend([{'Action': 'DELETE', 'RegexPatternString': pattern} for pattern in extra])
+ run_func_with_change_token_backoff(self.client, self.module,
+ {'RegexPatternSetId': pattern_set['RegexPatternSetId'], 'Updates': updates},
+ self.client.update_regex_pattern_set, wait=True)
+ return self.get_regex_pattern_set_with_backoff(pattern_set['RegexPatternSetId'])['RegexPatternSet']
+
+ def delete_unused_regex_pattern(self, regex_pattern_set_id):
+ try:
+ regex_pattern_set = self.client.get_regex_pattern_set(RegexPatternSetId=regex_pattern_set_id)['RegexPatternSet']
+ updates = list()
+ for regex_pattern_string in regex_pattern_set['RegexPatternStrings']:
+ updates.append({'Action': 'DELETE', 'RegexPatternString': regex_pattern_string})
+ run_func_with_change_token_backoff(self.client, self.module,
+ {'RegexPatternSetId': regex_pattern_set_id, 'Updates': updates},
+ self.client.update_regex_pattern_set)
+
+ run_func_with_change_token_backoff(self.client, self.module,
+ {'RegexPatternSetId': regex_pattern_set_id},
+ self.client.delete_regex_pattern_set, wait=True)
+ except is_boto3_error_code('WAFNonexistentItemException'):
+ return
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ self.module.fail_json_aws(e, msg='Could not delete regex pattern')
+
+ def get_condition_by_name(self, name):
+ all_conditions = [d for d in self.list_conditions() if d['Name'] == name]
+ if all_conditions:
+ return all_conditions[0][self.conditionsetid]
+
+ @AWSRetry.exponential_backoff()
+ def get_condition_by_id_with_backoff(self, condition_set_id):
+ params = dict()
+ params[self.conditionsetid] = condition_set_id
+ func = getattr(self.client, 'get_' + self.method_suffix)
+ return func(**params)[self.conditionset]
+
+ def get_condition_by_id(self, condition_set_id):
+ try:
+ return self.get_condition_by_id_with_backoff(condition_set_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Could not get condition')
+
+ def list_conditions(self):
+ method = 'list_' + self.method_suffix + 's'
+ try:
+ paginator = self.client.get_paginator(method)
+ func = paginator.paginate().build_full_result
+ except botocore.exceptions.OperationNotPageableError:
+ # list_geo_match_sets and list_regex_match_sets do not have a paginator
+ func = getattr(self.client, method)
+ try:
+ return func()[self.conditionsets]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Could not list %s conditions' % self.type)
+
+ def tidy_up_regex_patterns(self, regex_match_set):
+ all_regex_match_sets = self.list_conditions()
+ all_match_set_patterns = list()
+ for rms in all_regex_match_sets:
+ all_match_set_patterns.extend(conditiontuple['RegexPatternSetId']
+ for conditiontuple in self.get_condition_by_id(rms[self.conditionsetid])[self.conditiontuples])
+ for filtr in regex_match_set[self.conditiontuples]:
+ if filtr['RegexPatternSetId'] not in all_match_set_patterns:
+ self.delete_unused_regex_pattern(filtr['RegexPatternSetId'])
+
+ def find_condition_in_rules(self, condition_set_id):
+ rules_in_use = []
+ try:
+ if self.client.__class__.__name__ == 'WAF':
+ all_rules = list_rules_with_backoff(self.client)
+ elif self.client.__class__.__name__ == 'WAFRegional':
+ all_rules = list_regional_rules_with_backoff(self.client)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Could not list rules')
+ for rule in all_rules:
+ try:
+ rule_details = get_rule_with_backoff(self.client, rule['RuleId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Could not get rule details')
+ if condition_set_id in [predicate['DataId'] for predicate in rule_details['Predicates']]:
+ rules_in_use.append(rule_details['Name'])
+ return rules_in_use
+
+ def find_and_delete_condition(self, condition_set_id):
+ current_condition = self.get_condition_by_id(condition_set_id)
+ in_use_rules = self.find_condition_in_rules(condition_set_id)
+ if in_use_rules:
+ rulenames = ', '.join(in_use_rules)
+ self.module.fail_json(msg="Condition %s is in use by %s" % (current_condition['Name'], rulenames))
+ if current_condition[self.conditiontuples]:
+ # Filters are deleted using update with the DELETE action
+ func = getattr(self.client, 'update_' + self.method_suffix)
+ params = self.format_for_deletion(current_condition)
+ try:
+ # We do not need to wait for the conditiontuple delete because we wait later for the delete_* call
+ run_func_with_change_token_backoff(self.client, self.module, params, func)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Could not delete filters from condition')
+ func = getattr(self.client, 'delete_' + self.method_suffix)
+ params = dict()
+ params[self.conditionsetid] = condition_set_id
+ try:
+ run_func_with_change_token_backoff(self.client, self.module, params, func, wait=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Could not delete condition')
+ # tidy up regex patterns
+ if self.type == 'regex':
+ self.tidy_up_regex_patterns(current_condition)
+ return True, {}
+
+ def find_missing(self, update, current_condition):
+ missing = []
+ for desired in update['Updates']:
+ found = False
+ desired_condition = desired[self.conditiontuple]
+ current_conditions = current_condition[self.conditiontuples]
+ for condition in current_conditions:
+ if not compare_policies(condition, desired_condition):
+ found = True
+ if not found:
+ missing.append(desired)
+ return missing
+
+ def find_and_update_condition(self, condition_set_id):
+ current_condition = self.get_condition_by_id(condition_set_id)
+ update = self.format_for_update(condition_set_id)
+ missing = self.find_missing(update, current_condition)
+ if self.module.params.get('purge_filters'):
+ extra = [{'Action': 'DELETE', self.conditiontuple: current_tuple}
+ for current_tuple in current_condition[self.conditiontuples]
+ if current_tuple not in [desired[self.conditiontuple] for desired in update['Updates']]]
+ else:
+ extra = []
+ changed = bool(missing or extra)
+ if changed:
+ update['Updates'] = missing + extra
+ func = getattr(self.client, 'update_' + self.method_suffix)
+ try:
+ result = run_func_with_change_token_backoff(self.client, self.module, update, func, wait=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Could not update condition')
+ return changed, self.get_condition_by_id(condition_set_id)
+
+ def ensure_condition_present(self):
+ name = self.module.params['name']
+ condition_set_id = self.get_condition_by_name(name)
+ if condition_set_id:
+ return self.find_and_update_condition(condition_set_id)
+ else:
+ params = dict()
+ params['Name'] = name
+ func = getattr(self.client, 'create_' + self.method_suffix)
+ try:
+ condition = run_func_with_change_token_backoff(self.client, self.module, params, func)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Could not create condition')
+ return self.find_and_update_condition(condition[self.conditionset][self.conditionsetid])
+
+ def ensure_condition_absent(self):
+ condition_set_id = self.get_condition_by_name(self.module.params['name'])
+ if condition_set_id:
+ return self.find_and_delete_condition(condition_set_id)
+ return False, {}
+
+
+def main():
+ filters_subspec = dict(
+ country=dict(),
+ field_to_match=dict(choices=['uri', 'query_string', 'header', 'method', 'body']),
+ header=dict(),
+ transformation=dict(choices=['none', 'compress_white_space',
+ 'html_entity_decode', 'lowercase',
+ 'cmd_line', 'url_decode']),
+ position=dict(choices=['exactly', 'starts_with', 'ends_with',
+ 'contains', 'contains_word']),
+ comparison=dict(choices=['EQ', 'NE', 'LE', 'LT', 'GE', 'GT']),
+ target_string=dict(), # Bytes
+ size=dict(type='int'),
+ ip_address=dict(),
+ regex_pattern=dict(),
+ )
+ argument_spec = dict(
+ name=dict(required=True),
+ type=dict(required=True, choices=['byte', 'geo', 'ip', 'regex', 'size', 'sql', 'xss']),
+ filters=dict(type='list', elements='dict'),
+ purge_filters=dict(type='bool', default=False),
+ waf_regional=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[['state', 'present', ['filters']]])
+ state = module.params.get('state')
+
+ resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
+ client = module.client(resource)
+
+ condition = Condition(client, module)
+
+ if state == 'present':
+ (changed, results) = condition.ensure_condition_present()
+ # return a condition agnostic ID for use by waf_rule
+ results['ConditionId'] = results[condition.conditionsetid]
+ else:
+ (changed, results) = condition.ensure_condition_absent()
+
+ module.exit_json(changed=changed, condition=camel_dict_to_snake_dict(results))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/waf_info.py b/ansible_collections/community/aws/plugins/modules/waf_info.py
new file mode 100644
index 000000000..6a49a886e
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/waf_info.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: waf_info
+short_description: Retrieve information for WAF ACLs, Rules, Conditions and Filters
+version_added: 1.0.0
+description:
+ - Retrieve information for WAF ACLs, Rules, Conditions and Filters.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_waf_info).
+ The usage did not change.
+options:
+ name:
+ description:
+ - The name of a Web Application Firewall.
+ type: str
+ waf_regional:
+ description: Whether to use the C(waf-regional) module.
+ default: false
+ required: false
+ type: bool
+
+author:
+ - Mike Mochan (@mmochan)
+ - Will Thames (@willthames)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+- name: obtain all WAF information
+ community.aws.waf_info:
+
+- name: obtain all information for a single WAF
+ community.aws.waf_info:
+ name: test_waf
+
+- name: obtain all information for a single WAF Regional
+ community.aws.waf_info:
+ name: test_waf
+ waf_regional: true
+'''
+
+RETURN = '''
+wafs:
+ description: The WAFs that match the passed arguments.
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: A friendly name or description of the WebACL.
+ returned: always
+ type: str
+ sample: test_waf
+ default_action:
+ description: The action to perform if none of the Rules contained in the WebACL match.
+ returned: always
+ type: int
+ sample: BLOCK
+ metric_name:
+ description: A friendly name or description for the metrics for this WebACL.
+ returned: always
+ type: str
+ sample: test_waf_metric
+ rules:
+ description: An array that contains the action for each Rule in a WebACL , the priority of the Rule.
+ returned: always
+ type: complex
+ contains:
+ action:
+ description: The action to perform if the Rule matches.
+ returned: always
+ type: str
+ sample: BLOCK
+ metric_name:
+ description: A friendly name or description for the metrics for this Rule.
+ returned: always
+ type: str
+ sample: ipblockrule
+ name:
+ description: A friendly name or description of the Rule.
+ returned: always
+ type: str
+ sample: ip_block_rule
+ predicates:
+ description: The Predicates list contains a Predicate for each
+ ByteMatchSet, IPSet, SizeConstraintSet, SqlInjectionMatchSet or XssMatchSet
+ object in a Rule.
+ returned: always
+ type: list
+ sample:
+ [
+ {
+ "byte_match_set_id": "47b822b5-abcd-1234-faaf-1234567890",
+ "byte_match_tuples": [
+ {
+ "field_to_match": {
+ "type": "QUERY_STRING"
+ },
+ "positional_constraint": "STARTS_WITH",
+ "target_string": "bobbins",
+ "text_transformation": "NONE"
+ }
+ ],
+ "name": "bobbins",
+ "negated": false,
+ "type": "ByteMatch"
+ }
+ ]
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.waf import list_web_acls, get_web_acl
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=False),
+ waf_regional=dict(type='bool', default=False)
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
+ client = module.client(resource)
+ web_acls = list_web_acls(client, module)
+ name = module.params['name']
+ if name:
+ web_acls = [web_acl for web_acl in web_acls if
+ web_acl['Name'] == name]
+ if not web_acls:
+ module.fail_json(msg="WAF named %s not found" % name)
+ module.exit_json(wafs=[get_web_acl(client, module, web_acl['WebACLId'])
+ for web_acl in web_acls])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/waf_rule.py b/ansible_collections/community/aws/plugins/modules/waf_rule.py
new file mode 100644
index 000000000..a994b1831
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/waf_rule.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Will Thames
+# Copyright (c) 2015 Mike Mochan
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: waf_rule
+short_description: Create and delete WAF Rules
+version_added: 1.0.0
+description:
+ - Read the AWS documentation for WAF
+ U(https://aws.amazon.com/documentation/waf/).
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_waf_rule).
+ The usage did not change.
+
+author:
+ - Mike Mochan (@mmochan)
+ - Will Thames (@willthames)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+
+options:
+ name:
+ description: Name of the Web Application Firewall rule.
+ required: true
+ type: str
+ metric_name:
+ description:
+ - A friendly name or description for the metrics for the rule.
+ - The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name may not contain whitespace.
+ - You can't change I(metric_name) after you create the rule.
+ - Defaults to the same as I(name) with disallowed characters removed.
+ type: str
+ state:
+ description: Whether the rule should be present or absent.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ conditions:
+ description: >
+ List of conditions used in the rule. M(community.aws.waf_condition) can be used to create new conditions.
+ type: list
+ elements: dict
+ suboptions:
+ type:
+ required: true
+ type: str
+ choices: ['byte','geo','ip','size','sql','xss']
+ description: The type of rule to match.
+ negated:
+ required: true
+ type: bool
+ description: Whether the condition should be negated.
+ condition:
+ required: true
+ type: str
+ description: The name of the condition. The condition must already exist.
+ purge_conditions:
+ description:
+ - Whether or not to remove conditions that are not passed when updating I(conditions).
+ default: false
+ type: bool
+ waf_regional:
+ description: Whether to use C(waf-regional) module.
+ default: false
+ required: false
+ type: bool
+'''
+
+EXAMPLES = r'''
+ - name: create WAF rule
+ community.aws.waf_rule:
+ name: my_waf_rule
+ conditions:
+ - name: my_regex_condition
+ type: regex
+ negated: false
+ - name: my_geo_condition
+ type: geo
+ negated: false
+ - name: my_byte_condition
+ type: byte
+ negated: true
+
+ - name: remove WAF rule
+ community.aws.waf_rule:
+ name: "my_waf_rule"
+ state: absent
+'''
+
+RETURN = r'''
+rule:
+ description: WAF rule contents
+ returned: always
+ type: complex
+ contains:
+ metric_name:
+ description: Metric name for the rule.
+ returned: always
+ type: str
+ sample: ansibletest1234rule
+ name:
+ description: Friendly name for the rule.
+ returned: always
+ type: str
+ sample: ansible-test-1234_rule
+ predicates:
+ description: List of conditions used in the rule.
+ returned: always
+ type: complex
+ contains:
+ data_id:
+ description: ID of the condition.
+ returned: always
+ type: str
+ sample: 8251acdb-526c-42a8-92bc-d3d13e584166
+ negated:
+ description: Whether the sense of the condition is negated.
+ returned: always
+ type: bool
+ sample: false
+ type:
+ description: type of the condition.
+ returned: always
+ type: str
+ sample: ByteMatch
+ rule_id:
+ description: ID of the WAF rule.
+ returned: always
+ type: str
+ sample: 15de0cbc-9204-4e1f-90e6-69b2f415c261
+'''
+
+import re
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.waf import (
+ MATCH_LOOKUP,
+ list_regional_rules_with_backoff,
+ list_rules_with_backoff,
+ run_func_with_change_token_backoff,
+ get_web_acl_with_backoff,
+ list_web_acls_with_backoff,
+ list_regional_web_acls_with_backoff,
+)
+
+
+def get_rule_by_name(client, module, name):
+ rules = [d['RuleId'] for d in list_rules(client, module) if d['Name'] == name]
+ if rules:
+ return rules[0]
+
+
+def get_rule(client, module, rule_id):
+ try:
+ return client.get_rule(RuleId=rule_id)['Rule']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not get WAF rule')
+
+
+def list_rules(client, module):
+ if client.__class__.__name__ == 'WAF':
+ try:
+ return list_rules_with_backoff(client)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not list WAF rules')
+ elif client.__class__.__name__ == 'WAFRegional':
+ try:
+ return list_regional_rules_with_backoff(client)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not list WAF Regional rules')
+
+
+def list_regional_rules(client, module):
+ try:
+ return list_regional_rules_with_backoff(client)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not list WAF rules')
+
+
+def find_and_update_rule(client, module, rule_id):
+ rule = get_rule(client, module, rule_id)
+ rule_id = rule['RuleId']
+
+ existing_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP)
+ desired_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP)
+ all_conditions = dict()
+
+ for condition_type in MATCH_LOOKUP:
+ method = 'list_' + MATCH_LOOKUP[condition_type]['method'] + 's'
+ all_conditions[condition_type] = dict()
+ try:
+ paginator = client.get_paginator(method)
+ func = paginator.paginate().build_full_result
+ except (KeyError, botocore.exceptions.OperationNotPageableError):
+ # list_geo_match_sets and list_regex_match_sets do not have a paginator
+ # and throw different exceptions
+ func = getattr(client, method)
+ try:
+ pred_results = func()[MATCH_LOOKUP[condition_type]['conditionset'] + 's']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not list %s conditions' % condition_type)
+ for pred in pred_results:
+ pred['DataId'] = pred[MATCH_LOOKUP[condition_type]['conditionset'] + 'Id']
+ all_conditions[condition_type][pred['Name']] = camel_dict_to_snake_dict(pred)
+ all_conditions[condition_type][pred['DataId']] = camel_dict_to_snake_dict(pred)
+
+ for condition in module.params['conditions']:
+ desired_conditions[condition['type']][condition['name']] = condition
+
+ reverse_condition_types = dict((v['type'], k) for (k, v) in MATCH_LOOKUP.items())
+ for condition in rule['Predicates']:
+ existing_conditions[reverse_condition_types[condition['Type']]][condition['DataId']] = camel_dict_to_snake_dict(condition)
+
+ insertions = list()
+ deletions = list()
+
+ for condition_type in desired_conditions:
+ for (condition_name, condition) in desired_conditions[condition_type].items():
+ if condition_name not in all_conditions[condition_type]:
+ module.fail_json(msg="Condition %s of type %s does not exist" % (condition_name, condition_type))
+ condition['data_id'] = all_conditions[condition_type][condition_name]['data_id']
+ if condition['data_id'] not in existing_conditions[condition_type]:
+ insertions.append(format_for_insertion(condition))
+
+ if module.params['purge_conditions']:
+ for condition_type in existing_conditions:
+ deletions.extend([format_for_deletion(condition) for condition in existing_conditions[condition_type].values()
+ if not all_conditions[condition_type][condition['data_id']]['name'] in desired_conditions[condition_type]])
+
+ changed = bool(insertions or deletions)
+ update = {
+ 'RuleId': rule_id,
+ 'Updates': insertions + deletions
+ }
+ if changed:
+ try:
+ run_func_with_change_token_backoff(client, module, update, client.update_rule, wait=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not update rule conditions')
+
+ return changed, get_rule(client, module, rule_id)
+
+
+def format_for_insertion(condition):
+ return dict(Action='INSERT',
+ Predicate=dict(Negated=condition['negated'],
+ Type=MATCH_LOOKUP[condition['type']]['type'],
+ DataId=condition['data_id']))
+
+
+def format_for_deletion(condition):
+ return dict(Action='DELETE',
+ Predicate=dict(Negated=condition['negated'],
+ Type=condition['type'],
+ DataId=condition['data_id']))
+
+
+def remove_rule_conditions(client, module, rule_id):
+ conditions = get_rule(client, module, rule_id)['Predicates']
+ updates = [format_for_deletion(camel_dict_to_snake_dict(condition)) for condition in conditions]
+ try:
+ run_func_with_change_token_backoff(client, module, {'RuleId': rule_id, 'Updates': updates}, client.update_rule)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not remove rule conditions')
+
+
+def ensure_rule_present(client, module):
+ name = module.params['name']
+ rule_id = get_rule_by_name(client, module, name)
+ params = dict()
+ if rule_id:
+ return find_and_update_rule(client, module, rule_id)
+ else:
+ params['Name'] = module.params['name']
+ metric_name = module.params['metric_name']
+ if not metric_name:
+ metric_name = re.sub(r'[^a-zA-Z0-9]', '', module.params['name'])
+ params['MetricName'] = metric_name
+ try:
+ new_rule = run_func_with_change_token_backoff(client, module, params, client.create_rule)['Rule']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not create rule')
+ return find_and_update_rule(client, module, new_rule['RuleId'])
+
+
+def find_rule_in_web_acls(client, module, rule_id):
+ web_acls_in_use = []
+ try:
+ if client.__class__.__name__ == 'WAF':
+ all_web_acls = list_web_acls_with_backoff(client)
+ elif client.__class__.__name__ == 'WAFRegional':
+ all_web_acls = list_regional_web_acls_with_backoff(client)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not list Web ACLs')
+ for web_acl in all_web_acls:
+ try:
+ web_acl_details = get_web_acl_with_backoff(client, web_acl['WebACLId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not get Web ACL details')
+ if rule_id in [rule['RuleId'] for rule in web_acl_details['Rules']]:
+ web_acls_in_use.append(web_acl_details['Name'])
+ return web_acls_in_use
+
+
+def ensure_rule_absent(client, module):
+ rule_id = get_rule_by_name(client, module, module.params['name'])
+ in_use_web_acls = find_rule_in_web_acls(client, module, rule_id)
+ if in_use_web_acls:
+ web_acl_names = ', '.join(in_use_web_acls)
+ module.fail_json(msg="Rule %s is in use by Web ACL(s) %s" %
+ (module.params['name'], web_acl_names))
+ if rule_id:
+ remove_rule_conditions(client, module, rule_id)
+ try:
+ return True, run_func_with_change_token_backoff(client, module, {'RuleId': rule_id}, client.delete_rule, wait=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not delete rule')
+ return False, {}
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ metric_name=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ conditions=dict(type='list', elements='dict'),
+ purge_conditions=dict(type='bool', default=False),
+ waf_regional=dict(type='bool', default=False),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+ state = module.params.get('state')
+
+ resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
+ client = module.client(resource)
+ if state == 'present':
+ (changed, results) = ensure_rule_present(client, module)
+ else:
+ (changed, results) = ensure_rule_absent(client, module)
+
+ module.exit_json(changed=changed, rule=camel_dict_to_snake_dict(results))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/waf_web_acl.py b/ansible_collections/community/aws/plugins/modules/waf_web_acl.py
new file mode 100644
index 000000000..9d5ad59e4
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/waf_web_acl.py
@@ -0,0 +1,364 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: waf_web_acl
+short_description: Create and delete WAF Web ACLs
+version_added: 1.0.0
+description:
+ - Module for WAF classic, for WAF v2 use the I(wafv2_*) modules.
+ - Read the AWS documentation for WAF U(https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html).
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_waf_web_acl).
+ The usage did not change.
+
+author:
+ - Mike Mochan (@mmochan)
+ - Will Thames (@willthames)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+
+options:
+ name:
+ description: Name of the Web Application Firewall ACL to manage.
+ required: true
+ type: str
+ default_action:
+ description: The action that you want AWS WAF to take when a request doesn't
+ match the criteria specified in any of the Rule objects that are associated with the WebACL.
+ choices:
+ - block
+ - allow
+ - count
+ type: str
+ state:
+ description: Whether the Web ACL should be present or absent.
+ choices:
+ - present
+ - absent
+ default: present
+ type: str
+ metric_name:
+ description:
+ - A friendly name or description for the metrics for this WebACL.
+ - The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain whitespace.
+ - You can't change I(metric_name) after you create the WebACL.
+ - Metric name will default to I(name) with disallowed characters stripped out.
+ type: str
+ rules:
+ description:
+ - A list of rules that the Web ACL will enforce.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description: Name of the rule.
+ type: str
+ required: true
+ action:
+ description: The action to perform.
+ type: str
+ required: true
+ priority:
+ description: The priority of the action. Priorities must be unique. Lower numbered priorities are evaluated first.
+ type: int
+ required: true
+ type:
+ description: The type of rule.
+ choices:
+ - rate_based
+ - regular
+ type: str
+ purge_rules:
+ description:
+ - Whether to remove rules that aren't passed with I(rules).
+ default: False
+ type: bool
+ waf_regional:
+ description: Whether to use C(waf-regional) module.
+ default: false
+ required: false
+ type: bool
+'''
+
+EXAMPLES = r'''
+ - name: create web ACL
+ community.aws.waf_web_acl:
+ name: my_web_acl
+ rules:
+ - name: my_rule
+ priority: 1
+ action: block
+ default_action: block
+ purge_rules: true
+ state: present
+
+ - name: delete the web acl
+ community.aws.waf_web_acl:
+ name: my_web_acl
+ state: absent
+'''
+
+RETURN = r'''
+web_acl:
+ description: contents of the Web ACL.
+ returned: always
+ type: complex
+ contains:
+ default_action:
+ description: Default action taken by the Web ACL if no rules match.
+ returned: always
+ type: dict
+ sample:
+ type: BLOCK
+ metric_name:
+ description: Metric name used as an identifier.
+ returned: always
+ type: str
+ sample: mywebacl
+ name:
+ description: Friendly name of the Web ACL.
+ returned: always
+ type: str
+ sample: my web acl
+ rules:
+ description: List of rules.
+ returned: always
+ type: complex
+ contains:
+ action:
+ description: Action taken by the WAF when the rule matches.
+ returned: always
+ type: complex
+ sample:
+ type: ALLOW
+ priority:
+ description: priority number of the rule (lower numbers are run first).
+ returned: always
+ type: int
+ sample: 2
+ rule_id:
+ description: Rule ID.
+ returned: always
+ type: str
+ sample: a6fc7ab5-287b-479f-8004-7fd0399daf75
+ type:
+ description: Type of rule (either REGULAR or RATE_BASED).
+ returned: always
+ type: str
+ sample: REGULAR
+ web_acl_id:
+ description: Unique identifier of Web ACL.
+ returned: always
+ type: str
+ sample: 10fff965-4b6b-46e2-9d78-24f6d2e2d21c
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+import re
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.waf import (
+ list_regional_rules_with_backoff,
+ list_regional_web_acls_with_backoff,
+ list_rules_with_backoff,
+ list_web_acls_with_backoff,
+ run_func_with_change_token_backoff,
+)
+
+
+def get_web_acl_by_name(client, module, name):
+ acls = [d['WebACLId'] for d in list_web_acls(client, module) if d['Name'] == name]
+ if acls:
+ return acls[0]
+ else:
+ return acls
+
+
+def create_rule_lookup(client, module):
+ if client.__class__.__name__ == 'WAF':
+ try:
+ rules = list_rules_with_backoff(client)
+ return dict((rule['Name'], rule) for rule in rules)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not list rules')
+ elif client.__class__.__name__ == 'WAFRegional':
+ try:
+ rules = list_regional_rules_with_backoff(client)
+ return dict((rule['Name'], rule) for rule in rules)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not list regional rules')
+
+
+def get_web_acl(client, module, web_acl_id):
+ try:
+ return client.get_web_acl(WebACLId=web_acl_id)['WebACL']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not get Web ACL with id %s' % web_acl_id)
+
+
+def list_web_acls(client, module,):
+ if client.__class__.__name__ == 'WAF':
+ try:
+ return list_web_acls_with_backoff(client)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not get Web ACLs')
+ elif client.__class__.__name__ == 'WAFRegional':
+ try:
+ return list_regional_web_acls_with_backoff(client)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not get Web ACLs')
+
+
+def find_and_update_web_acl(client, module, web_acl_id):
+ acl = get_web_acl(client, module, web_acl_id)
+ rule_lookup = create_rule_lookup(client, module)
+ existing_rules = acl['Rules']
+ desired_rules = [{'RuleId': rule_lookup[rule['name']]['RuleId'],
+ 'Priority': rule['priority'],
+ 'Action': {'Type': rule['action'].upper()},
+ 'Type': rule.get('type', 'regular').upper()}
+ for rule in module.params['rules']]
+ missing = [rule for rule in desired_rules if rule not in existing_rules]
+ extras = []
+ if module.params['purge_rules']:
+ extras = [rule for rule in existing_rules if rule not in desired_rules]
+
+ insertions = [format_for_update(rule, 'INSERT') for rule in missing]
+ deletions = [format_for_update(rule, 'DELETE') for rule in extras]
+ changed = bool(insertions + deletions)
+
+ # Purge rules before adding new ones in case a deletion shares the same
+ # priority as an insertion.
+ params = {
+ 'WebACLId': acl['WebACLId'],
+ 'DefaultAction': acl['DefaultAction']
+ }
+ change_tokens = []
+ if deletions:
+ try:
+ params['Updates'] = deletions
+ result = run_func_with_change_token_backoff(client, module, params, client.update_web_acl)
+ change_tokens.append(result['ChangeToken'])
+ get_waiter(
+ client, 'change_token_in_sync',
+ ).wait(
+ ChangeToken=result['ChangeToken']
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not update Web ACL')
+ if insertions:
+ try:
+ params['Updates'] = insertions
+ result = run_func_with_change_token_backoff(client, module, params, client.update_web_acl)
+ change_tokens.append(result['ChangeToken'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not update Web ACL')
+ if change_tokens:
+ for token in change_tokens:
+ get_waiter(
+ client, 'change_token_in_sync',
+ ).wait(
+ ChangeToken=token
+ )
+ if changed:
+ acl = get_web_acl(client, module, web_acl_id)
+ return changed, acl
+
+
+def format_for_update(rule, action):
+ return dict(
+ Action=action,
+ ActivatedRule=dict(
+ Priority=rule['Priority'],
+ RuleId=rule['RuleId'],
+ Action=dict(
+ Type=rule['Action']['Type']
+ )
+ )
+ )
+
+
+def remove_rules_from_web_acl(client, module, web_acl_id):
+ acl = get_web_acl(client, module, web_acl_id)
+ deletions = [format_for_update(rule, 'DELETE') for rule in acl['Rules']]
+ try:
+ params = {'WebACLId': acl['WebACLId'], 'DefaultAction': acl['DefaultAction'], 'Updates': deletions}
+ run_func_with_change_token_backoff(client, module, params, client.update_web_acl)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not remove rule')
+
+
+def ensure_web_acl_present(client, module):
+ changed = False
+ result = None
+ name = module.params['name']
+ web_acl_id = get_web_acl_by_name(client, module, name)
+ if web_acl_id:
+ (changed, result) = find_and_update_web_acl(client, module, web_acl_id)
+ else:
+ metric_name = module.params['metric_name']
+ if not metric_name:
+ metric_name = re.sub(r'[^A-Za-z0-9]', '', module.params['name'])
+ default_action = module.params['default_action'].upper()
+ try:
+ params = {'Name': name, 'MetricName': metric_name, 'DefaultAction': {'Type': default_action}}
+ new_web_acl = run_func_with_change_token_backoff(client, module, params, client.create_web_acl)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not create Web ACL')
+ (changed, result) = find_and_update_web_acl(client, module, new_web_acl['WebACL']['WebACLId'])
+ return changed, result
+
+
+def ensure_web_acl_absent(client, module):
+ web_acl_id = get_web_acl_by_name(client, module, module.params['name'])
+ if web_acl_id:
+ web_acl = get_web_acl(client, module, web_acl_id)
+ if web_acl['Rules']:
+ remove_rules_from_web_acl(client, module, web_acl_id)
+ try:
+ run_func_with_change_token_backoff(client, module, {'WebACLId': web_acl_id}, client.delete_web_acl, wait=True)
+ return True, {}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Could not delete Web ACL')
+ return False, {}
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ default_action=dict(choices=['block', 'allow', 'count']),
+ metric_name=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ rules=dict(type='list', elements='dict'),
+ purge_rules=dict(type='bool', default=False),
+ waf_regional=dict(type='bool', default=False)
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[['state', 'present', ['default_action', 'rules']]])
+ state = module.params.get('state')
+
+ resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
+ client = module.client(resource)
+ if state == 'present':
+ (changed, results) = ensure_web_acl_present(client, module)
+ else:
+ (changed, results) = ensure_web_acl_absent(client, module)
+
+ module.exit_json(changed=changed, web_acl=camel_dict_to_snake_dict(results))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/wafv2_ip_set.py b/ansible_collections/community/aws/plugins/modules/wafv2_ip_set.py
new file mode 100644
index 000000000..7a9011e9b
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/wafv2_ip_set.py
@@ -0,0 +1,360 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: wafv2_ip_set
+version_added: 1.5.0
+author:
+ - "Markus Bergholz (@markuman)"
+short_description: wafv2_ip_set
+description:
+ - Create, modify and delete IP sets for WAFv2.
+options:
+ state:
+ description:
+ - Whether the rule is present or absent.
+ choices: ["present", "absent"]
+ required: true
+ type: str
+ name:
+ description:
+ - The name of the IP set.
+ required: true
+ type: str
+ description:
+ description:
+ - Description of the IP set.
+ required: false
+ type: str
+ scope:
+ description:
+ - Specifies whether this is for an AWS CloudFront distribution or for a regional application,
+ such as API Gateway or Application LoadBalancer.
+ choices: ["CLOUDFRONT","REGIONAL"]
+ required: true
+ type: str
+ ip_address_version:
+ description:
+ - Specifies whether this is an IPv4 or an IPv6 IP set.
+ - Required when I(state=present).
+ choices: ["IPV4","IPV6"]
+ type: str
+ addresses:
+ description:
+ - Contains an array of strings that specify one or more IP addresses or blocks of IP addresses in
+ Classless Inter-Domain Routing (CIDR) notation.
+ - Required when I(state=present).
+ - When I(state=absent) and I(addresses) is defined, only the given IP addresses will be removed
+ from the IP set. The entire IP set itself will stay present.
+ type: list
+ elements: str
+ purge_addresses:
+ description:
+ - When set to C(no), keep the existing addresses in place. Will modify and add, but will not delete.
+ default: true
+ type: bool
+
+notes:
+ - Support for I(purge_tags) was added in release 4.0.0.
+
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+ - amazon.aws.tags
+
+'''
+
+EXAMPLES = '''
+- name: test ip set
+ wafv2_ip_set:
+ name: test02
+ state: present
+ description: hallo eins
+ scope: REGIONAL
+ ip_address_version: IPV4
+ addresses:
+ - 8.8.8.8/32
+ - 8.8.4.4/32
+ tags:
+ A: B
+ C: D
+'''
+
+RETURN = """
+addresses:
+ description: Current addresses of the ip set
+ sample:
+ - 8.8.8.8/32
+ - 8.8.4.4/32
+ returned: Always, as long as the ip set exists
+ type: list
+arn:
+ description: IP set arn
+ sample: "arn:aws:wafv2:eu-central-1:11111111:regional/ipset/test02/4b007330-2934-4dc5-af24-82dcb3aeb127"
+ type: str
+ returned: Always, as long as the ip set exists
+description:
+ description: Description of the ip set
+ sample: Some IP set description
+ returned: Always, as long as the ip set exists
+ type: str
+ip_address_version:
+ description: IP version of the ip set
+ sample: IPV4
+ type: str
+ returned: Always, as long as the ip set exists
+name:
+ description: IP set name
+ sample: test02
+ returned: Always, as long as the ip set exists
+ type: str
+"""
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags
+from ansible_collections.community.aws.plugins.module_utils.wafv2 import ensure_wafv2_tags
+
+
+class IpSet:
+ def __init__(self, wafv2, name, scope, fail_json_aws):
+ self.wafv2 = wafv2
+ self.name = name
+ self.scope = scope
+ self.fail_json_aws = fail_json_aws
+ self.existing_set, self.id, self.locktoken, self.arn = self.get_set()
+
+ def description(self):
+ return self.existing_set.get('Description')
+
+ def _format_set(self, ip_set):
+ if ip_set is None:
+ return None
+ return camel_dict_to_snake_dict(self.existing_set, ignore_list=['tags'])
+
+ def get(self):
+ return self._format_set(self.existing_set)
+
+ def remove(self):
+ try:
+ response = self.wafv2.delete_ip_set(
+ Name=self.name,
+ Scope=self.scope,
+ Id=self.id,
+ LockToken=self.locktoken
+ )
+ except (BotoCoreError, ClientError) as e:
+ self.fail_json_aws(e, msg="Failed to remove wafv2 ip set.")
+ return {}
+
+ def create(self, description, ip_address_version, addresses, tags):
+ req_obj = {
+ 'Name': self.name,
+ 'Scope': self.scope,
+ 'IPAddressVersion': ip_address_version,
+ 'Addresses': addresses,
+ }
+
+ if description:
+ req_obj['Description'] = description
+
+ if tags:
+ req_obj['Tags'] = ansible_dict_to_boto3_tag_list(tags)
+
+ try:
+ response = self.wafv2.create_ip_set(**req_obj)
+ except (BotoCoreError, ClientError) as e:
+ self.fail_json_aws(e, msg="Failed to create wafv2 ip set.")
+
+ self.existing_set, self.id, self.locktoken, self.arn = self.get_set()
+ return self._format_set(self.existing_set)
+
+ def update(self, description, addresses):
+ req_obj = {
+ 'Name': self.name,
+ 'Scope': self.scope,
+ 'Id': self.id,
+ 'Addresses': addresses,
+ 'LockToken': self.locktoken
+ }
+
+ if description:
+ req_obj['Description'] = description
+
+ try:
+ response = self.wafv2.update_ip_set(**req_obj)
+ except (BotoCoreError, ClientError) as e:
+ self.fail_json_aws(e, msg="Failed to update wafv2 ip set.")
+
+ self.existing_set, self.id, self.locktoken, self.arn = self.get_set()
+ return self._format_set(self.existing_set)
+
+ def get_set(self):
+ response = self.list()
+ existing_set = None
+ id = None
+ arn = None
+ locktoken = None
+ for item in response.get('IPSets'):
+ if item.get('Name') == self.name:
+ id = item.get('Id')
+ locktoken = item.get('LockToken')
+ arn = item.get('ARN')
+ if id:
+ try:
+ existing_set = self.wafv2.get_ip_set(
+ Name=self.name,
+ Scope=self.scope,
+ Id=id
+ ).get('IPSet')
+ except (BotoCoreError, ClientError) as e:
+ self.fail_json_aws(e, msg="Failed to get wafv2 ip set.")
+ tags = describe_wafv2_tags(self.wafv2, arn, self.fail_json_aws)
+ existing_set['tags'] = tags
+
+ return existing_set, id, locktoken, arn
+
+ def list(self, Nextmarker=None):
+ # there is currently no paginator for wafv2
+ req_obj = {
+ 'Scope': self.scope,
+ 'Limit': 100
+ }
+ if Nextmarker:
+ req_obj['NextMarker'] = Nextmarker
+
+ try:
+ response = self.wafv2.list_ip_sets(**req_obj)
+ if response.get('NextMarker'):
+ response['IPSets'] += self.list(Nextmarker=response.get('NextMarker')).get('IPSets')
+ except (BotoCoreError, ClientError) as e:
+ self.fail_json_aws(e, msg="Failed to list wafv2 ip set.")
+
+ return response
+
+
+def compare(existing_set, addresses, purge_addresses, state):
+ diff = False
+ new_rules = []
+ existing_rules = existing_set.get('addresses')
+ if state == 'present':
+ if purge_addresses:
+ new_rules = addresses
+ if sorted(addresses) != sorted(existing_set.get('addresses')):
+ diff = True
+
+ else:
+ for requested_rule in addresses:
+ if requested_rule not in existing_rules:
+ diff = True
+ new_rules.append(requested_rule)
+
+ new_rules += existing_rules
+ else:
+ if purge_addresses and addresses:
+ for requested_rule in addresses:
+ if requested_rule in existing_rules:
+ diff = True
+ existing_rules.pop(existing_rules.index(requested_rule))
+ new_rules = existing_rules
+
+ return diff, new_rules
+
+
+def main():
+
+ arg_spec = dict(
+ state=dict(type='str', required=True, choices=['present', 'absent']),
+ name=dict(type='str', required=True),
+ scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']),
+ description=dict(type='str'),
+ ip_address_version=dict(type='str', choices=['IPV4', 'IPV6']),
+ addresses=dict(type='list', elements='str'),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ purge_addresses=dict(type='bool', default=True),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True,
+ required_if=[['state', 'present', ['ip_address_version', 'addresses']]]
+ )
+
+ state = module.params.get("state")
+ name = module.params.get("name")
+ scope = module.params.get("scope")
+ description = module.params.get("description")
+ ip_address_version = module.params.get("ip_address_version")
+ addresses = module.params.get("addresses")
+ tags = module.params.get("tags")
+ purge_tags = module.params.get("purge_tags")
+ purge_addresses = module.params.get("purge_addresses")
+ check_mode = module.check_mode
+
+ wafv2 = module.client('wafv2')
+
+ change = False
+ retval = {}
+
+ ip_set = IpSet(wafv2, name, scope, module.fail_json_aws)
+
+ if state == 'present':
+
+ if ip_set.get():
+ tags_updated = ensure_wafv2_tags(wafv2, ip_set.arn, tags, purge_tags, module.fail_json_aws, module.check_mode)
+ ips_updated, addresses = compare(ip_set.get(), addresses, purge_addresses, state)
+ description_updated = bool(description) and ip_set.description() != description
+ change = ips_updated or description_updated or tags_updated
+ retval = ip_set.get()
+ if module.check_mode:
+ pass
+ elif ips_updated or description_updated:
+ retval = ip_set.update(
+ description=description,
+ addresses=addresses
+ )
+ elif tags_updated:
+ retval, id, locktoken, arn = ip_set.get_set()
+ else:
+ if not check_mode:
+ retval = ip_set.create(
+ description=description,
+ ip_address_version=ip_address_version,
+ addresses=addresses,
+ tags=tags
+ )
+ change = True
+
+ if state == 'absent':
+ if ip_set.get():
+ if addresses:
+ if len(addresses) > 0:
+ change, addresses = compare(ip_set.get(), addresses, purge_addresses, state)
+ if change and not check_mode:
+ retval = ip_set.update(
+ description=description,
+ addresses=addresses
+ )
+ else:
+ if not check_mode:
+ retval = ip_set.remove()
+ change = True
+
+ module.exit_json(changed=change, **retval)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/wafv2_ip_set_info.py b/ansible_collections/community/aws/plugins/modules/wafv2_ip_set_info.py
new file mode 100644
index 000000000..b92c9a816
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/wafv2_ip_set_info.py
@@ -0,0 +1,151 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: wafv2_ip_set_info
+version_added: 1.5.0
+author:
+ - "Markus Bergholz (@markuman)"
+short_description: Get information about wafv2 ip sets
+description:
+ - Get information about existing wafv2 ip sets.
+options:
+ name:
+ description:
+ - The name of the IP set.
+ required: true
+ type: str
+ scope:
+ description:
+ - Specifies whether this is for an AWS CloudFront distribution or for a regional application.
+ choices: ["CLOUDFRONT","REGIONAL"]
+ required: true
+ type: str
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+- name: test ip set
+ wafv2_ip_set_info:
+ name: test02
+ scope: REGIONAL
+'''
+
+RETURN = """
+addresses:
+ description: Current addresses of the ip set
+ sample:
+ - 8.8.8.8/32
+ - 8.8.4.4/32
+ returned: Always, as long as the ip set exists
+ type: list
+arn:
+ description: IP set arn
+ sample: "arn:aws:wafv2:eu-central-1:11111111:regional/ipset/test02/4b007330-2934-4dc5-af24-82dcb3aeb127"
+ type: str
+ returned: Always, as long as the ip set exists
+description:
+ description: Description of the ip set
+ sample: Some IP set description
+ returned: Always, as long as the ip set exists
+ type: str
+ip_address_version:
+ description: IP version of the ip set
+ sample: IPV4
+ type: str
+ returned: Always, as long as the ip set exists
+name:
+ description: IP set name
+ sample: test02
+ returned: Always, as long as the ip set exists
+ type: str
+"""
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags
+
+
+def list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=None):
+ # there is currently no paginator for wafv2
+ req_obj = {
+ 'Scope': scope,
+ 'Limit': 100
+ }
+ if Nextmarker:
+ req_obj['NextMarker'] = Nextmarker
+
+ try:
+ response = wafv2.list_ip_sets(**req_obj)
+ if response.get('NextMarker'):
+ response['IPSets'] += list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=response.get('NextMarker')).get('IPSets')
+ except (BotoCoreError, ClientError) as e:
+ fail_json_aws(e, msg="Failed to list wafv2 ip set")
+ return response
+
+
+def get_ip_set(wafv2, name, scope, id, fail_json_aws):
+ try:
+ response = wafv2.get_ip_set(
+ Name=name,
+ Scope=scope,
+ Id=id
+ )
+ except (BotoCoreError, ClientError) as e:
+ fail_json_aws(e, msg="Failed to get wafv2 ip set")
+ return response
+
+
+def main():
+
+ arg_spec = dict(
+ name=dict(type='str', required=True),
+ scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True,
+ )
+
+ name = module.params.get("name")
+ scope = module.params.get("scope")
+
+ wafv2 = module.client('wafv2')
+
+ # check if ip set exist
+ response = list_ip_sets(wafv2, scope, module.fail_json_aws)
+
+ id = None
+
+ for item in response.get('IPSets'):
+ if item.get('Name') == name:
+ id = item.get('Id')
+ arn = item.get('ARN')
+
+ retval = {}
+ existing_set = None
+ if id:
+ existing_set = get_ip_set(wafv2, name, scope, id, module.fail_json_aws)
+ retval = camel_dict_to_snake_dict(existing_set.get('IPSet'))
+ retval['tags'] = describe_wafv2_tags(wafv2, arn, module.fail_json_aws) or {}
+ module.exit_json(**retval)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/wafv2_resources.py b/ansible_collections/community/aws/plugins/modules/wafv2_resources.py
new file mode 100644
index 000000000..527ee1087
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/wafv2_resources.py
@@ -0,0 +1,176 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: wafv2_resources
+version_added: 1.5.0
+author:
+ - "Markus Bergholz (@markuman)"
+short_description: wafv2_web_acl
+description:
+ - Apply or remove wafv2 to other aws resources.
+options:
+ state:
+ description:
+ - Whether the rule is present or absent.
+ choices: ["present", "absent"]
+ required: true
+ type: str
+ name:
+ description:
+ - The name of the web acl.
+ type: str
+ scope:
+ description:
+ - Scope of waf
+ choices: ["CLOUDFRONT","REGIONAL"]
+ type: str
+ arn:
+ description:
+ - AWS resources (ALB, API Gateway or AppSync GraphQL API) ARN
+ type: str
+ required: true
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+- name: add test alb to waf string03
+ community.aws.wafv2_resources:
+ name: string03
+ scope: REGIONAL
+ state: present
+ arn: "arn:aws:elasticloadbalancing:eu-central-1:111111111:loadbalancer/app/test03/dd83ea041ba6f933"
+'''
+
+RETURN = """
+resource_arns:
+ description: Current resources where the wafv2 is applied on
+ sample:
+ - "arn:aws:elasticloadbalancing:eu-central-1:111111111:loadbalancer/app/test03/dd83ea041ba6f933"
+ returned: Always, as long as the wafv2 exists
+ type: list
+"""
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls
+
+
+def get_web_acl(wafv2, name, scope, id, fail_json_aws):
+ try:
+ response = wafv2.get_web_acl(
+ Name=name,
+ Scope=scope,
+ Id=id
+ )
+ except (BotoCoreError, ClientError) as e:
+ fail_json_aws(e, msg="Failed to get wafv2 web acl.")
+ return response
+
+
+def list_wafv2_resources(wafv2, arn, fail_json_aws):
+ try:
+ response = wafv2.list_resources_for_web_acl(
+ WebACLArn=arn
+ )
+ except (BotoCoreError, ClientError) as e:
+ fail_json_aws(e, msg="Failed to list wafv2 web acl.")
+ return response
+
+
+def add_wafv2_resources(wafv2, waf_arn, arn, fail_json_aws):
+ try:
+ response = wafv2.associate_web_acl(
+ WebACLArn=waf_arn,
+ ResourceArn=arn
+ )
+ except (BotoCoreError, ClientError) as e:
+ fail_json_aws(e, msg="Failed to add wafv2 web acl.")
+ return response
+
+
+def remove_resources(wafv2, arn, fail_json_aws):
+ try:
+ response = wafv2.disassociate_web_acl(
+ ResourceArn=arn
+ )
+ except (BotoCoreError, ClientError) as e:
+ fail_json_aws(e, msg="Failed to remove wafv2 web acl.")
+ return response
+
+
+def main():
+
+ arg_spec = dict(
+ state=dict(type='str', required=True, choices=['present', 'absent']),
+ name=dict(type='str'),
+ scope=dict(type='str', choices=['CLOUDFRONT', 'REGIONAL']),
+ arn=dict(type='str', required=True)
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True,
+ required_if=[['state', 'present', ['name', 'scope']]]
+ )
+
+ state = module.params.get("state")
+ name = module.params.get("name")
+ scope = module.params.get("scope")
+ arn = module.params.get("arn")
+ check_mode = module.check_mode
+
+ wafv2 = module.client('wafv2')
+
+ # check if web acl exists
+
+ response = wafv2_list_web_acls(wafv2, scope, module.fail_json_aws)
+
+ id = None
+ retval = {}
+ change = False
+
+ for item in response.get('WebACLs'):
+ if item.get('Name') == name:
+ id = item.get('Id')
+
+ if id:
+ existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws)
+ waf_arn = existing_acl.get('WebACL').get('ARN')
+
+ retval = list_wafv2_resources(wafv2, waf_arn, module.fail_json_aws)
+
+ if state == 'present':
+ if retval:
+ if arn not in retval.get('ResourceArns'):
+ change = True
+ if not check_mode:
+ retval = add_wafv2_resources(wafv2, waf_arn, arn, module.fail_json_aws)
+
+ elif state == 'absent':
+ if retval:
+ if arn in retval.get('ResourceArns'):
+ change = True
+ if not check_mode:
+ retval = remove_resources(wafv2, arn, module.fail_json_aws)
+
+ module.exit_json(changed=change, **camel_dict_to_snake_dict(retval))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/wafv2_resources_info.py b/ansible_collections/community/aws/plugins/modules/wafv2_resources_info.py
new file mode 100644
index 000000000..3a2a7b5dd
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/wafv2_resources_info.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: wafv2_resources_info
+version_added: 1.5.0
+author:
+ - "Markus Bergholz (@markuman)"
+short_description: wafv2_resources_info
+description:
+ - List web acl resources.
+options:
+ name:
+ description:
+ - The name wafv2 acl of interest.
+ type: str
+ required: true
+ scope:
+ description:
+ - Scope of wafv2 web acl.
+ required: true
+ choices: ["CLOUDFRONT","REGIONAL"]
+ type: str
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+- name: get web acl
+ community.aws.wafv2_resources_info:
+ name: string03
+ scope: REGIONAL
+'''
+
+RETURN = """
+resource_arns:
+ description: Current resources where the wafv2 is applied on
+ sample:
+ - "arn:aws:elasticloadbalancing:eu-central-1:111111111:loadbalancer/app/test03/dd83ea041ba6f933"
+ returned: Always, as long as the wafv2 exists
+ type: list
+"""
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls
+
+
+def get_web_acl(wafv2, name, scope, id, fail_json_aws):
+ try:
+ response = wafv2.get_web_acl(
+ Name=name,
+ Scope=scope,
+ Id=id
+ )
+ except (BotoCoreError, ClientError) as e:
+ fail_json_aws(e, msg="Failed to get wafv2 web acl.")
+ return response
+
+
+def list_web_acls(wafv2, scope, fail_json_aws):
+ return wafv2_list_web_acls(wafv2, scope, fail_json_aws)
+
+
+def list_wafv2_resources(wafv2, arn, fail_json_aws):
+ try:
+ response = wafv2.list_resources_for_web_acl(
+ WebACLArn=arn
+ )
+ except (BotoCoreError, ClientError) as e:
+ fail_json_aws(e, msg="Failed to list wafv2 resources.")
+ return response
+
+
+def main():
+
+ arg_spec = dict(
+ name=dict(type='str', required=True),
+ scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True,
+ )
+
+ name = module.params.get("name")
+ scope = module.params.get("scope")
+
+ wafv2 = module.client('wafv2')
+ # check if web acl exists
+ response = list_web_acls(wafv2, scope, module.fail_json_aws)
+
+ id = None
+ retval = {}
+
+ for item in response.get('WebACLs'):
+ if item.get('Name') == name:
+ id = item.get('Id')
+
+ if id:
+ existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws)
+ arn = existing_acl.get('WebACL').get('ARN')
+
+ retval = camel_dict_to_snake_dict(list_wafv2_resources(wafv2, arn, module.fail_json_aws))
+
+ module.exit_json(**retval)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/wafv2_rule_group.py b/ansible_collections/community/aws/plugins/modules/wafv2_rule_group.py
new file mode 100644
index 000000000..8e46853c8
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/wafv2_rule_group.py
@@ -0,0 +1,438 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: wafv2_rule_group
+version_added: 1.5.0
+author:
+ - "Markus Bergholz (@markuman)"
+short_description: wafv2_web_acl
+description:
+ - Create, modify and delete wafv2 rule groups.
+options:
+ state:
+ description:
+ - Whether the rule is present or absent.
+ choices: ["present", "absent"]
+ required: true
+ type: str
+ name:
+ description:
+ - The name of the rule group.
+ required: true
+ type: str
+ rules:
+ description:
+ - The Rule statements used to identify the web requests that you want to allow, block, or count.
+ type: list
+ elements: dict
+ scope:
+ description:
+ - Scope of wafv2 rule group.
+ required: true
+ choices: ["CLOUDFRONT","REGIONAL"]
+ type: str
+ description:
+ description:
+ - Description of wafv2 rule group.
+ type: str
+ sampled_requests:
+ description:
+ - Sampled requests, true or false.
+ type: bool
+ default: false
+ cloudwatch_metrics:
+ description:
+ - Enable cloudwatch metric for wafv2 rule group
+ type: bool
+ default: true
+ metric_name:
+ description:
+ - Name of cloudwatch metrics.
+ - If not given and cloudwatch_metrics is enabled, the name of the rule group itself will be taken.
+ type: str
+ capacity:
+ description:
+ - capacity of wafv2 rule group.
+ type: int
+ purge_rules:
+ description:
+ - When set to C(no), keep the existing load balancer rules in place. Will modify and add, but will not delete.
+ default: true
+ type: bool
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.tags
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+- name: change description
+ community.aws.wafv2_rule_group:
+ name: test02
+ state: present
+ description: hallo eins zwei
+ scope: REGIONAL
+ capacity: 500
+ rules:
+ - name: eins
+ priority: 1
+ action:
+ allow: {}
+ visibility_config:
+ sampled_requests_enabled: true
+ cloud_watch_metrics_enabled: true
+ metric_name: fsd
+ statement:
+ ip_set_reference_statement:
+ arn: "{{ IPSET.arn }}"
+ cloudwatch_metrics: true
+ tags:
+ A: B
+ C: D
+ register: out
+
+- name: add rule
+ community.aws.wafv2_rule_group:
+ name: test02
+ state: present
+ description: hallo eins zwei
+ scope: REGIONAL
+ capacity: 500
+ rules:
+ - name: eins
+ priority: 1
+ action:
+ allow: {}
+ visibility_config:
+ sampled_requests_enabled: true
+ cloud_watch_metrics_enabled: true
+ metric_name: fsd
+ statement:
+ ip_set_reference_statement:
+ arn: "{{ IPSET.arn }}"
+ - name: zwei
+ priority: 2
+ action:
+ block: {}
+ visibility_config:
+ sampled_requests_enabled: true
+ cloud_watch_metrics_enabled: true
+ metric_name: ddos
+ statement:
+ or_statement:
+ statements:
+ - byte_match_statement:
+ search_string: ansible.com
+ positional_constraint: CONTAINS
+ field_to_match:
+ single_header:
+ name: host
+ text_transformations:
+ - type: LOWERCASE
+ priority: 0
+ - xss_match_statement:
+ field_to_match:
+ body: {}
+ text_transformations:
+ - type: NONE
+ priority: 0
+ cloudwatch_metrics: true
+ tags:
+ A: B
+ C: D
+ register: out
+'''
+
+RETURN = """
+arn:
+ description: Rule group arn
+ sample: arn:aws:wafv2:eu-central-1:11111111:regional/rulegroup/test02/6e90c01a-e4eb-43e5-b6aa-b1604cedf7d7
+ type: str
+ returned: Always, as long as the web acl exists
+description:
+ description: Description of the rule group
+ sample: Some rule group description
+ returned: Always, as long as the web acl exists
+ type: str
+capacity:
+ description: Current capacity of the rule group
+ sample: 500
+ returned: Always, as long as the rule group exists
+ type: int
+name:
+ description: Rule group name
+ sample: test02
+ returned: Always, as long as the rule group exists
+ type: str
+rules:
+ description: Current rules of the rule group
+ returned: Always, as long as the rule group exists
+ type: list
+ sample:
+ - action:
+ allow: {}
+ name: eins
+ priority: 1
+ statement:
+ ip_set_reference_statement:
+ arn: arn:aws:wafv2:eu-central-1:11111111:regional/ipset/test02/b6978915-c67b-4d1c-8832-2b1bb452143a
+ visibility_config:
+ cloud_watch_metrics_enabled: True
+ metric_name: fsd
+ sampled_requests_enabled: True
+visibility_config:
+ description: Visibility config of the rule group
+ returned: Always, as long as the rule group exists
+ type: dict
+ sample:
+ cloud_watch_metrics_enabled: True
+ metric_name: blub
+ sampled_requests_enabled: False
+"""
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict
+from ansible_collections.community.aws.plugins.module_utils.wafv2 import compare_priority_rules
+from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups
+from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_snake_dict_to_camel_dict
+from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags
+from ansible_collections.community.aws.plugins.module_utils.wafv2 import ensure_wafv2_tags
+
+
+class RuleGroup:
+ def __init__(self, wafv2, name, scope, fail_json_aws):
+ self.wafv2 = wafv2
+ self.id = None
+ self.name = name
+ self.scope = scope
+ self.fail_json_aws = fail_json_aws
+ self.existing_group = self.get_group()
+
+ def update(self, description, rules, sampled_requests, cloudwatch_metrics, metric_name):
+ req_obj = {
+ 'Name': self.name,
+ 'Scope': self.scope,
+ 'Id': self.id,
+ 'Rules': rules,
+ 'LockToken': self.locktoken,
+ 'VisibilityConfig': {
+ 'SampledRequestsEnabled': sampled_requests,
+ 'CloudWatchMetricsEnabled': cloudwatch_metrics,
+ 'MetricName': metric_name
+ }
+ }
+
+ if description:
+ req_obj['Description'] = description
+
+ try:
+ response = self.wafv2.update_rule_group(**req_obj)
+ except (BotoCoreError, ClientError) as e:
+ self.fail_json_aws(e, msg="Failed to update wafv2 rule group.")
+ return self.refresh_group()
+
+ def get_group(self):
+ if self.id is None:
+ response = self.list()
+
+ for item in response.get('RuleGroups'):
+ if item.get('Name') == self.name:
+ self.id = item.get('Id')
+ self.locktoken = item.get('LockToken')
+ self.arn = item.get('ARN')
+
+ return self.refresh_group()
+
+ def refresh_group(self):
+ existing_group = None
+ if self.id:
+ try:
+ response = self.wafv2.get_rule_group(
+ Name=self.name,
+ Scope=self.scope,
+ Id=self.id
+ )
+ existing_group = response.get('RuleGroup')
+ self.locktoken = response.get('LockToken')
+ except (BotoCoreError, ClientError) as e:
+ self.fail_json_aws(e, msg="Failed to get wafv2 rule group.")
+
+ tags = describe_wafv2_tags(self.wafv2, self.arn, self.fail_json_aws)
+ existing_group['tags'] = tags or {}
+
+ return existing_group
+
+ def list(self):
+ return wafv2_list_rule_groups(self.wafv2, self.scope, self.fail_json_aws)
+
+ def get(self):
+ if self.existing_group:
+ return self.existing_group
+ return None
+
+ def remove(self):
+ try:
+ response = self.wafv2.delete_rule_group(
+ Name=self.name,
+ Scope=self.scope,
+ Id=self.id,
+ LockToken=self.locktoken
+ )
+ except (BotoCoreError, ClientError) as e:
+ self.fail_json_aws(e, msg="Failed to delete wafv2 rule group.")
+ return response
+
+ def create(self, capacity, description, rules, sampled_requests, cloudwatch_metrics, metric_name, tags):
+ req_obj = {
+ 'Name': self.name,
+ 'Scope': self.scope,
+ 'Capacity': capacity,
+ 'Rules': rules,
+ 'VisibilityConfig': {
+ 'SampledRequestsEnabled': sampled_requests,
+ 'CloudWatchMetricsEnabled': cloudwatch_metrics,
+ 'MetricName': metric_name
+ }
+ }
+
+ if description:
+ req_obj['Description'] = description
+
+ if tags:
+ req_obj['Tags'] = ansible_dict_to_boto3_tag_list(tags)
+
+ try:
+ response = self.wafv2.create_rule_group(**req_obj)
+ except (BotoCoreError, ClientError) as e:
+ self.fail_json_aws(e, msg="Failed to create wafv2 rule group.")
+
+ self.existing_group = self.get_group()
+
+ return self.existing_group
+
+
+def main():
+
+ arg_spec = dict(
+ state=dict(type='str', required=True, choices=['present', 'absent']),
+ name=dict(type='str', required=True),
+ scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']),
+ capacity=dict(type='int'),
+ description=dict(type='str'),
+ rules=dict(type='list', elements='dict'),
+ sampled_requests=dict(type='bool', default=False),
+ cloudwatch_metrics=dict(type='bool', default=True),
+ metric_name=dict(type='str'),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(default=True, type='bool'),
+ purge_rules=dict(default=True, type='bool'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True,
+ required_if=[['state', 'present', ['capacity', 'rules']]]
+ )
+
+ state = module.params.get("state")
+ name = module.params.get("name")
+ scope = module.params.get("scope")
+ capacity = module.params.get("capacity")
+ description = module.params.get("description")
+ rules = module.params.get("rules")
+ sampled_requests = module.params.get("sampled_requests")
+ cloudwatch_metrics = module.params.get("cloudwatch_metrics")
+ metric_name = module.params.get("metric_name")
+ tags = module.params.get("tags")
+ purge_tags = module.params.get("purge_tags")
+ purge_rules = module.params.get("purge_rules")
+ check_mode = module.check_mode
+
+ if rules:
+ rules = []
+ for rule in module.params.get("rules"):
+ rules.append(wafv2_snake_dict_to_camel_dict(snake_dict_to_camel_dict(rule, capitalize_first=True)))
+
+ if not metric_name:
+ metric_name = name
+
+ wafv2 = module.client('wafv2')
+ rule_group = RuleGroup(wafv2, name, scope, module.fail_json_aws)
+
+ change = False
+ retval = {}
+
+ if state == 'present':
+ if rule_group.get():
+ tagging_change = ensure_wafv2_tags(wafv2, rule_group.arn, tags, purge_tags,
+ module.fail_json_aws, module.check_mode)
+ rules_change, rules = compare_priority_rules(rule_group.get().get('Rules'), rules, purge_rules, state)
+ description_change = bool(description) and (rule_group.get().get('Description') != description)
+ change = tagging_change or rules_change or description_change
+ retval = rule_group.get()
+ if module.check_mode:
+ # In check mode nothing changes...
+ pass
+ elif rules_change or description_change:
+ retval = rule_group.update(
+ description,
+ rules,
+ sampled_requests,
+ cloudwatch_metrics,
+ metric_name
+ )
+ elif tagging_change:
+ retval = rule_group.refresh_group()
+
+ else:
+ change = True
+ if not check_mode:
+ retval = rule_group.create(
+ capacity,
+ description,
+ rules,
+ sampled_requests,
+ cloudwatch_metrics,
+ metric_name,
+ tags
+ )
+
+ elif state == 'absent':
+ if rule_group.get():
+ if rules:
+ if len(rules) > 0:
+ change, rules = compare_priority_rules(rule_group.get().get('Rules'), rules, purge_rules, state)
+ if change and not check_mode:
+ retval = rule_group.update(
+ description,
+ rules,
+ sampled_requests,
+ cloudwatch_metrics,
+ metric_name
+ )
+ else:
+ change = True
+ if not check_mode:
+ retval = rule_group.remove()
+
+ module.exit_json(changed=change, **camel_dict_to_snake_dict(retval, ignore_list=['tags']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/wafv2_rule_group_info.py b/ansible_collections/community/aws/plugins/modules/wafv2_rule_group_info.py
new file mode 100644
index 000000000..a42bea0c2
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/wafv2_rule_group_info.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: wafv2_rule_group_info
+version_added: 1.5.0
+author:
+ - "Markus Bergholz (@markuman)"
+short_description: wafv2_web_acl_info
+description:
+ - Get informations about existing wafv2 rule groups.
+options:
+ state:
+ description:
+ - This option does nothing, has been deprecated, and will be removed in a release after 2022-12-01.
+ required: false
+ type: str
+ name:
+ description:
+ - The name of the rule group.
+ required: true
+ type: str
+ scope:
+ description:
+ - Scope of wafv2 rule group.
+ required: true
+ choices: ["CLOUDFRONT","REGIONAL"]
+ type: str
+
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+- name: rule group info
+ community.aws.wafv2_rule_group_info:
+ name: test02
+ state: present
+ scope: REGIONAL
+'''
+
+RETURN = """
+arn:
+ description: Rule group arn
+ sample: arn:aws:wafv2:eu-central-1:11111111:regional/rulegroup/test02/6e90c01a-e4eb-43e5-b6aa-b1604cedf7d7
+ type: str
+ returned: Always, as long as the web acl exists
+description:
+ description: Description of the rule group
+ sample: Some rule group description
+ returned: Always, as long as the web acl exists
+ type: str
+capacity:
+ description: Current capacity of the rule group
+ sample: 500
+ returned: Always, as long as the rule group exists
+ type: int
+name:
+ description: Rule group name
+ sample: test02
+ returned: Always, as long as the rule group exists
+ type: str
+rules:
+ description: Current rules of the rule group
+ returned: Always, as long as the rule group exists
+ type: list
+ sample:
+ - action:
+ allow: {}
+ name: eins
+ priority: 1
+ statement:
+ ip_set_reference_statement:
+ arn: arn:aws:wafv2:eu-central-1:111111111:regional/ipset/test02/b6978915-c67b-4d1c-8832-2b1bb452143a
+ visibility_config:
+ cloud_watch_metrics_enabled: True
+ metric_name: fsd
+ sampled_requests_enabled: True
+visibility_config:
+ description: Visibility config of the rule group
+ returned: Always, as long as the rule group exists
+ type: dict
+ sample:
+ cloud_watch_metrics_enabled: True
+ metric_name: blub
+ sampled_requests_enabled: False
+"""
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups
+from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags
+
+
+def get_rule_group(wafv2, name, scope, id, fail_json_aws):
+ try:
+ response = wafv2.get_rule_group(
+ Name=name,
+ Scope=scope,
+ Id=id
+ )
+ except (BotoCoreError, ClientError) as e:
+ fail_json_aws(e, msg="Failed to get wafv2 rule group.")
+ return response
+
+
+def main():
+ arg_spec = dict(
+ state=dict(type='str', required=False),
+ name=dict(type='str', required=True),
+ scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True
+ )
+
+ state = module.params.get("state")
+ name = module.params.get("name")
+ scope = module.params.get("scope")
+
+ wafv2 = module.client('wafv2')
+
+ if state:
+ module.deprecate(
+ 'The state parameter does nothing, has been deprecated, and will be removed in a future release.',
+ version='6.0.0', collection_name='community.aws')
+
+ # check if rule group exists
+ response = wafv2_list_rule_groups(wafv2, scope, module.fail_json_aws)
+ id = None
+ retval = {}
+
+ for item in response.get('RuleGroups'):
+ if item.get('Name') == name:
+ id = item.get('Id')
+ arn = item.get('ARN')
+
+ existing_group = None
+ if id:
+ existing_group = get_rule_group(wafv2, name, scope, id, module.fail_json_aws)
+ retval = camel_dict_to_snake_dict(existing_group.get('RuleGroup'))
+ tags = describe_wafv2_tags(wafv2, arn, module.fail_json_aws)
+ retval['tags'] = tags or {}
+
+ module.exit_json(**retval)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/wafv2_web_acl.py b/ansible_collections/community/aws/plugins/modules/wafv2_web_acl.py
new file mode 100644
index 000000000..f91fe64e6
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/wafv2_web_acl.py
@@ -0,0 +1,583 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: wafv2_web_acl
+version_added: 1.5.0
+author:
+ - "Markus Bergholz (@markuman)"
+short_description: Create and delete WAF Web ACLs
+description:
+ - Create, modify or delete AWS WAF v2 web ACLs (not for classic WAF).
+ - See docs at U(https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html)
+options:
+ state:
+ description:
+ - Whether the rule is present or absent.
+ choices: ["present", "absent"]
+ required: true
+ type: str
+ name:
+ description:
+ - The name of the web acl.
+ required: true
+ type: str
+ scope:
+ description:
+ - Geographical scope of the web acl.
+ required: true
+ choices: ["CLOUDFRONT", "REGIONAL"]
+ type: str
+ description:
+ description:
+ - Description of wafv2 web acl.
+ type: str
+ default_action:
+ description:
+ - Default action of the wafv2 web acl.
+ choices: ["Block", "Allow"]
+ type: str
+ sampled_requests:
+ description:
+ - Whether to store a sample of the web requests, true or false.
+ type: bool
+ default: false
+ cloudwatch_metrics:
+ description:
+ - Enable cloudwatch metric for wafv2 web acl.
+ type: bool
+ default: true
+ metric_name:
+ description:
+ - Name of cloudwatch metrics.
+ - If not given and cloudwatch_metrics is enabled, the name of the web acl itself will be taken.
+ type: str
+ rules:
+ description:
+ - The Rule statements used to identify the web requests that you want to allow, block, or count.
+ - For a list of managed rules see U(https://docs.aws.amazon.com/waf/latest/developerguide/aws-managed-rule-groups-list.html).
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the wafv2 rule
+ type: str
+ priority:
+ description:
+ - The rule priority
+ type: int
+ action:
+ description:
+ - Wether a rule is blocked, allowed or counted.
+ type: dict
+ visibility_config:
+ description:
+ - Visibility of single wafv2 rule.
+ type: dict
+ statement:
+ description:
+ - Rule configuration.
+ type: dict
+ custom_response_bodies:
+ description:
+ - A map of custom response keys and content bodies. Define response bodies here and reference them in the rules by providing
+ - the key of the body dictionary element.
+ - Each element must have a unique dict key and in the dict two keys for I(content_type) and I(content).
+ - Requires botocore >= 1.20.40
+ type: dict
+ version_added: 3.1.0
+ purge_rules:
+ description:
+ - When set to C(no), keep the existing load balancer rules in place. Will modify and add, but will not delete.
+ default: true
+ type: bool
+
+notes:
+ - Support for the I(purge_tags) parameter was added in release 4.0.0.
+
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+- name: Create test web acl
+ community.aws.wafv2_web_acl:
+ name: test05
+ description: hallo eins
+ scope: REGIONAL
+ default_action: Allow
+ sampled_requests: false
+ cloudwatch_metrics: true
+ metric_name: test05-acl-metric
+ rules:
+ - name: zwei
+ priority: 0
+ action:
+ block: {}
+ visibility_config:
+ sampled_requests_enabled: true
+ cloud_watch_metrics_enabled: true
+ metric_name: ddos
+ statement:
+ xss_match_statement:
+ field_to_match:
+ body: {}
+ text_transformations:
+ - type: NONE
+ priority: 0
+ - name: admin_protect
+ priority: 1
+ override_action:
+ none: {}
+ visibility_config:
+ sampled_requests_enabled: true
+ cloud_watch_metrics_enabled: true
+ metric_name: fsd
+ statement:
+ managed_rule_group_statement:
+ vendor_name: AWS
+ name: AWSManagedRulesAdminProtectionRuleSet
+
+ # AWS Managed Bad Input Rule Set
+ # but allow PROPFIND_METHOD used e.g. by webdav
+ - name: bad_input_protect_whitelist_webdav
+ priority: 2
+ override_action:
+ none: {}
+ visibility_config:
+ sampled_requests_enabled: true
+ cloud_watch_metrics_enabled: true
+ metric_name: bad_input_protect
+ statement:
+ managed_rule_group_statement:
+ vendor_name: AWS
+ name: AWSManagedRulesKnownBadInputsRuleSet
+ excluded_rules:
+ - name: PROPFIND_METHOD
+
+ # Rate Limit example. 1500 req/5min
+ # counted for two domains via or_statement. login.mydomain.tld and api.mydomain.tld
+ - name: rate_limit_example
+ priority: 3
+ action:
+ block: {}
+ visibility_config:
+ sampled_requests_enabled: true
+ cloud_watch_metrics_enabled: true
+ metric_name: mydomain-ratelimit
+ statement:
+ rate_based_statement:
+ limit: 1500
+ aggregate_key_type: IP
+ scope_down_statement:
+ or_statement:
+ statements:
+ - byte_match_statement:
+ search_string: login.mydomain.tld
+ positional_constraint: CONTAINS
+ field_to_match:
+ single_header:
+ name: host
+ text_transformations:
+ - type: LOWERCASE
+ priority: 0
+ - byte_match_dtatement:
+ search_string: api.mydomain.tld
+ positional_constraint: CONTAINS
+ field_to_match:
+ single_header:
+ name: host
+ text_transformations:
+ - type: LOWERCASE
+ priority: 0
+ purge_rules: true
+ tags:
+ A: B
+ C: D
+ state: present
+
+- name: Create IP filtering web ACL
+ community.aws.wafv2_web_acl:
+ name: ip-filtering-traffic
+ description: ACL that filters web traffic based on rate limits and whitelists some IPs
+ scope: REGIONAL
+ default_action: Allow
+ sampled_requests: true
+ cloudwatch_metrics: true
+ metric_name: ip-filtering-traffic
+ rules:
+ - name: whitelist-own-IPs
+ priority: 0
+ action:
+ allow: {}
+ statement:
+ ip_set_reference_statement:
+ arn: 'arn:aws:wafv2:us-east-1:123456789012:regional/ipset/own-public-ips/1c4bdfc4-0f77-3b23-5222-123123123'
+ visibility_config:
+ sampled_requests_enabled: true
+ cloud_watch_metrics_enabled: true
+ metric_name: waf-acl-rule-whitelist-own-IPs
+ - name: rate-limit-per-IP
+ priority: 1
+ action:
+ block:
+ custom_response:
+ response_code: 429
+ custom_response_body_key: too_many_requests
+ statement:
+ rate_based_statement:
+ limit: 5000
+ aggregate_key_type: IP
+ visibility_config:
+ sampled_requests_enabled: true
+ cloud_watch_metrics_enabled: true
+ metric_name: waf-acl-rule-rate-limit-per-IP
+ purge_rules: true
+ custom_response_bodies:
+ too_many_requests:
+ content_type: APPLICATION_JSON
+ content: '{ message: "Your request has been blocked due to too many HTTP requests coming from your IP" }'
+ region: us-east-1
+ state: present
+
+'''
+
+RETURN = """
+arn:
+ description: web acl arn
+ sample: arn:aws:wafv2:eu-central-1:123456789012:regional/webacl/test05/318c1ab9-fa74-4b3b-a974-f92e25106f61
+ type: str
+ returned: Always, as long as the web acl exists
+description:
+ description: Description of the web acl
+ sample: Some web acl description
+ returned: Always, as long as the web acl exists
+ type: str
+capacity:
+ description: Current capacity of the web acl
+ sample: 140
+ returned: Always, as long as the web acl exists
+ type: int
+name:
+ description: Web acl name
+ sample: test02
+ returned: Always, as long as the web acl exists
+ type: str
+default_action:
+ description: Default action of ACL
+ returned: Always, as long as the web acl exists
+ sample:
+ allow: {}
+ type: dict
+rules:
+ description: Current rules of the web acl
+ returned: Always, as long as the web acl exists
+ type: list
+ sample:
+ - name: admin_protect
+ override_action:
+ none: {}
+ priority: 1
+ statement:
+ managed_rule_group_statement:
+ name: AWSManagedRulesAdminProtectionRuleSet
+ vendor_name: AWS
+ visibility_config:
+ cloud_watch_metrics_enabled: true
+ metric_name: admin_protect
+ sampled_requests_enabled: true
+custom_response_bodies:
+ description: Custom response body configurations to be used in rules
+ type: dict
+ sample:
+ too_many_requests:
+ content_type: APPLICATION_JSON
+ content: '{ message: "Your request has been blocked due to too many HTTP requests coming from your IP" }'
+ returned: Always, as long as the web acl exists
+visibility_config:
+ description: Visibility config of the web acl
+ returned: Always, as long as the web acl exists
+ type: dict
+ sample:
+ cloud_watch_metrics_enabled: true
+ metric_name: blub
+ sampled_requests_enabled: false
+"""
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict
+from ansible_collections.community.aws.plugins.module_utils.wafv2 import compare_priority_rules
+from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags
+from ansible_collections.community.aws.plugins.module_utils.wafv2 import ensure_wafv2_tags
+from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls
+from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_snake_dict_to_camel_dict
+
+
+class WebACL:
+ def __init__(self, wafv2, name, scope, fail_json_aws):
+ self.wafv2 = wafv2
+ self.name = name
+ self.scope = scope
+ self.fail_json_aws = fail_json_aws
+ self.existing_acl, self.id, self.locktoken = self.get_web_acl()
+
+ def update(self, default_action, description, rules, sampled_requests, cloudwatch_metrics, metric_name, custom_response_bodies):
+ req_obj = {
+ 'Name': self.name,
+ 'Scope': self.scope,
+ 'Id': self.id,
+ 'DefaultAction': default_action,
+ 'Rules': rules,
+ 'VisibilityConfig': {
+ 'SampledRequestsEnabled': sampled_requests,
+ 'CloudWatchMetricsEnabled': cloudwatch_metrics,
+ 'MetricName': metric_name
+ },
+ 'LockToken': self.locktoken
+ }
+
+ if description:
+ req_obj['Description'] = description
+
+ if custom_response_bodies:
+ req_obj['CustomResponseBodies'] = custom_response_bodies
+
+ try:
+ response = self.wafv2.update_web_acl(**req_obj)
+ except (BotoCoreError, ClientError) as e:
+ self.fail_json_aws(e, msg="Failed to update wafv2 web acl.")
+
+ self.existing_acl, self.id, self.locktoken = self.get_web_acl()
+ return self.existing_acl
+
+ def remove(self):
+ try:
+ response = self.wafv2.delete_web_acl(
+ Name=self.name,
+ Scope=self.scope,
+ Id=self.id,
+ LockToken=self.locktoken
+ )
+ except (BotoCoreError, ClientError) as e:
+ self.fail_json_aws(e, msg="Failed to remove wafv2 web acl.")
+ return response
+
+ def get(self):
+ if self.existing_acl:
+ return self.existing_acl
+ return None
+
+ def get_web_acl(self):
+ id = None
+ locktoken = None
+ arn = None
+ existing_acl = None
+ response = self.list()
+
+ for item in response.get('WebACLs'):
+ if item.get('Name') == self.name:
+ id = item.get('Id')
+ locktoken = item.get('LockToken')
+ arn = item.get('ARN')
+
+ if id:
+ try:
+ existing_acl = self.wafv2.get_web_acl(
+ Name=self.name,
+ Scope=self.scope,
+ Id=id
+ )
+ except (BotoCoreError, ClientError) as e:
+ self.fail_json_aws(e, msg="Failed to get wafv2 web acl.")
+ tags = describe_wafv2_tags(self.wafv2, arn, self.fail_json_aws)
+ existing_acl['tags'] = tags
+ return existing_acl, id, locktoken
+
+ def list(self):
+ return wafv2_list_web_acls(self.wafv2, self.scope, self.fail_json_aws)
+
+ def create(self, default_action, rules, sampled_requests, cloudwatch_metrics, metric_name, tags, description, custom_response_bodies):
+ req_obj = {
+ 'Name': self.name,
+ 'Scope': self.scope,
+ 'DefaultAction': default_action,
+ 'Rules': rules,
+ 'VisibilityConfig': {
+ 'SampledRequestsEnabled': sampled_requests,
+ 'CloudWatchMetricsEnabled': cloudwatch_metrics,
+ 'MetricName': metric_name
+ }
+ }
+
+ if custom_response_bodies:
+ req_obj['CustomResponseBodies'] = custom_response_bodies
+ if description:
+ req_obj['Description'] = description
+ if tags:
+ req_obj['Tags'] = ansible_dict_to_boto3_tag_list(tags)
+
+ try:
+ response = self.wafv2.create_web_acl(**req_obj)
+ except (BotoCoreError, ClientError) as e:
+ self.fail_json_aws(e, msg="Failed to create wafv2 web acl.")
+
+ self.existing_acl, self.id, self.locktoken = self.get_web_acl()
+ return self.existing_acl
+
+
+def format_result(result):
+
+ # We were returning details of the Web ACL inside a "web_acl" parameter on
+ # creation, keep returning it to avoid breaking existing playbooks, but also
+ # return what the docs said we return (and returned when no change happened)
+ retval = dict(result)
+ if "WebACL" in retval:
+ retval.update(retval["WebACL"])
+
+ return camel_dict_to_snake_dict(retval, ignore_list=['tags'])
+
+
+def main():
+
+ arg_spec = dict(
+ state=dict(type='str', required=True, choices=['present', 'absent']),
+ name=dict(type='str', required=True),
+ scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']),
+ description=dict(type='str'),
+ default_action=dict(type='str', choices=['Block', 'Allow']),
+ rules=dict(type='list', elements='dict'),
+ sampled_requests=dict(type='bool', default=False),
+ cloudwatch_metrics=dict(type='bool', default=True),
+ metric_name=dict(type='str'),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(default=True, type='bool'),
+ custom_response_bodies=dict(type='dict'),
+ purge_rules=dict(default=True, type='bool'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True,
+ required_if=[['state', 'present', ['default_action', 'rules']]]
+ )
+
+ state = module.params.get("state")
+ name = module.params.get("name")
+ scope = module.params.get("scope")
+ description = module.params.get("description")
+ default_action = module.params.get("default_action")
+ rules = module.params.get("rules")
+ sampled_requests = module.params.get("sampled_requests")
+ cloudwatch_metrics = module.params.get("cloudwatch_metrics")
+ metric_name = module.params.get("metric_name")
+ tags = module.params.get("tags")
+ purge_tags = module.params.get("purge_tags")
+ purge_rules = module.params.get("purge_rules")
+ check_mode = module.check_mode
+
+ custom_response_bodies = module.params.get("custom_response_bodies")
+ if custom_response_bodies:
+ module.require_botocore_at_least('1.20.40', reason='to set custom response bodies')
+ custom_response_bodies = {}
+
+ for custom_name, body in module.params.get("custom_response_bodies").items():
+ custom_response_bodies[custom_name] = snake_dict_to_camel_dict(body, capitalize_first=True)
+
+ if default_action == 'Block':
+ default_action = {'Block': {}}
+ elif default_action == 'Allow':
+ default_action = {'Allow': {}}
+
+ if rules:
+ rules = []
+ for rule in module.params.get("rules"):
+ rules.append(wafv2_snake_dict_to_camel_dict(snake_dict_to_camel_dict(rule, capitalize_first=True)))
+
+ if not metric_name:
+ metric_name = name
+
+ wafv2 = module.client('wafv2')
+ web_acl = WebACL(wafv2, name, scope, module.fail_json_aws)
+ change = False
+ retval = {}
+
+ if state == 'present':
+ if web_acl.get():
+ tags_changed = ensure_wafv2_tags(wafv2, web_acl.get().get('WebACL').get('ARN'), tags, purge_tags, module.fail_json_aws, module.check_mode)
+ change, rules = compare_priority_rules(web_acl.get().get('WebACL').get('Rules'), rules, purge_rules, state)
+ change = change or (description and web_acl.get().get('WebACL').get('Description') != description)
+ change = change or (default_action and web_acl.get().get('WebACL').get('DefaultAction') != default_action)
+
+ if change and not check_mode:
+ retval = web_acl.update(
+ default_action,
+ description,
+ rules,
+ sampled_requests,
+ cloudwatch_metrics,
+ metric_name,
+ custom_response_bodies
+ )
+ elif tags_changed:
+ retval, id, locktoken = web_acl.get_web_acl()
+ else:
+ retval = web_acl.get()
+
+ change |= tags_changed
+
+ else:
+ change = True
+ if not check_mode:
+ retval = web_acl.create(
+ default_action,
+ rules,
+ sampled_requests,
+ cloudwatch_metrics,
+ metric_name,
+ tags,
+ description,
+ custom_response_bodies
+ )
+
+ elif state == 'absent':
+ if web_acl.get():
+ if rules:
+ if len(rules) > 0:
+ change, rules = compare_priority_rules(web_acl.get().get('WebACL').get('Rules'), rules, purge_rules, state)
+ if change and not check_mode:
+ retval = web_acl.update(
+ default_action,
+ description,
+ rules,
+ sampled_requests,
+ cloudwatch_metrics,
+ metric_name,
+ custom_response_bodies
+ )
+ else:
+ change = True
+ if not check_mode:
+ retval = web_acl.remove()
+
+ module.exit_json(changed=change, **format_result(retval))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/wafv2_web_acl_info.py b/ansible_collections/community/aws/plugins/modules/wafv2_web_acl_info.py
new file mode 100644
index 000000000..13be05db5
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/wafv2_web_acl_info.py
@@ -0,0 +1,155 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: wafv2_web_acl_info
+version_added: 1.5.0
+author:
+ - "Markus Bergholz (@markuman)"
+short_description: wafv2_web_acl
+description:
+ - Info about web acl
+options:
+ name:
+ description:
+ - The name of the web acl.
+ required: true
+ type: str
+ scope:
+ description:
+ - Scope of wafv2 web acl.
+ required: true
+ choices: ["CLOUDFRONT", "REGIONAL"]
+ type: str
+
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+- name: get web acl
+ community.aws.wafv2_web_acl_info:
+ name: test05
+ scope: REGIONAL
+ register: out
+'''
+
+RETURN = """
+arn:
+ description: web acl arn
+ sample: arn:aws:wafv2:eu-central-1:11111111:regional/webacl/test05/318c1ab9-fa74-4b3b-a974-f92e25106f61
+ type: str
+ returned: Always, as long as the web acl exists
+description:
+ description: Description of the web acl
+ sample: Some web acl description
+ returned: Always, as long as the web acl exists
+ type: str
+capacity:
+ description: Current capacity of the web acl
+ sample: 140
+ returned: Always, as long as the web acl exists
+ type: int
+name:
+ description: Web acl name
+ sample: test02
+ returned: Always, as long as the web acl exists
+ type: str
+rules:
+ description: Current rules of the web acl
+ returned: Always, as long as the web acl exists
+ type: list
+ sample:
+ - name: admin_protect
+ override_action:
+ none: {}
+ priority: 1
+ statement:
+ managed_rule_group_statement:
+ name: AWSManagedRulesAdminProtectionRuleSet
+ vendor_name: AWS
+ visibility_config:
+ cloud_watch_metrics_enabled: true
+ metric_name: admin_protect
+ sampled_requests_enabled: true
+visibility_config:
+ description: Visibility config of the web acl
+ returned: Always, as long as the web acl exists
+ type: dict
+ sample:
+ cloud_watch_metrics_enabled: true
+ metric_name: blub
+ sampled_requests_enabled: false
+"""
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags
+from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls
+
+
+def get_web_acl(wafv2, name, scope, id, fail_json_aws):
+ try:
+ response = wafv2.get_web_acl(
+ Name=name,
+ Scope=scope,
+ Id=id
+ )
+ except (BotoCoreError, ClientError) as e:
+ fail_json_aws(e, msg="Failed to get wafv2 web acl.")
+ return response
+
+
+def main():
+
+ arg_spec = dict(
+ name=dict(type='str', required=True),
+ scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True,
+ )
+
+ state = module.params.get("state")
+ name = module.params.get("name")
+ scope = module.params.get("scope")
+
+ wafv2 = module.client('wafv2')
+ # check if web acl exists
+ response = wafv2_list_web_acls(wafv2, scope, module.fail_json_aws)
+
+ id = None
+ arn = None
+ retval = {}
+
+ for item in response.get('WebACLs'):
+ if item.get('Name') == name:
+ id = item.get('Id')
+ arn = item.get('ARN')
+
+ if id:
+ existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws)
+ retval = camel_dict_to_snake_dict(existing_acl.get('WebACL'))
+ tags = describe_wafv2_tags(wafv2, arn, module.fail_json_aws)
+ retval['tags'] = tags
+
+ module.exit_json(**retval)
+
+
+if __name__ == '__main__':
+ main()